You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

eval.py 3.1 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """test ShuffleNetV1"""
  16. import argparse
  17. import time
  18. from mindspore import context, nn
  19. from mindspore.train.model import Model
  20. from mindspore.common import set_seed
  21. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  22. from src.shufflenetv1 import ShuffleNetV1 as shufflenetv1
  23. from src.config import config
  24. from src.dataset import create_dataset
  25. from src.crossentropysmooth import CrossEntropySmooth
  26. set_seed(1)
  27. if __name__ == '__main__':
  28. parser = argparse.ArgumentParser(description='Image classification')
  29. parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
  30. parser.add_argument('--device_id', type=int, default=0, help='Device id')
  31. parser.add_argument('--checkpoint_path', type=str, default='', help='Checkpoint file path')
  32. parser.add_argument('--dataset_path', type=str, default='', help='Dataset path')
  33. parser.add_argument('--model_size', type=str, default='2.0x', help='ShuffleNetV1 model size',
  34. choices=['2.0x', '1.5x', '1.0x', '0.5x'])
  35. args_opt = parser.parse_args()
  36. context.set_context(mode=context.GRAPH_MODE, device_target=args_opt.device_target, save_graphs=False,
  37. device_id=args_opt.device_id)
  38. # create dataset
  39. dataset = create_dataset(args_opt.dataset_path, do_train=False, device_num=1, rank=0)
  40. step_size = dataset.get_dataset_size()
  41. # define net
  42. net = shufflenetv1(model_size=args_opt.model_size)
  43. # load checkpoint
  44. param_dict = load_checkpoint(args_opt.checkpoint_path)
  45. load_param_into_net(net, param_dict)
  46. net.set_train(False)
  47. # define loss
  48. loss = CrossEntropySmooth(sparse=True, reduction="mean", smooth_factor=config.label_smooth_factor,
  49. num_classes=config.num_classes)
  50. # define model
  51. eval_metrics = {'Loss': nn.Loss(), 'Top_1_Acc': nn.Top1CategoricalAccuracy(),
  52. 'Top_5_Acc': nn.Top5CategoricalAccuracy()}
  53. model = Model(net, loss_fn=loss, metrics=eval_metrics)
  54. # eval model
  55. start_time = time.time()
  56. res = model.eval(dataset, dataset_sink_mode=True)
  57. log = "result:" + str(res) + ", ckpt:'" + args_opt.checkpoint_path + "', time: " + str(
  58. (time.time() - start_time) * 1000)
  59. print(log)
  60. filename = './eval_log.txt'
  61. with open(filename, 'a') as file_object:
  62. file_object.write(log + '\n')