You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

eval.py 4.0 kB

5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. ######################## eval alexnet example ########################
  17. eval alexnet according to model file:
  18. python eval.py --data_path /YourDataPath --ckpt_path Your.ckpt
  19. """
  20. import ast
  21. import argparse
  22. from src.config import alexnet_cifar10_cfg, alexnet_imagenet_cfg
  23. from src.dataset import create_dataset_cifar10, create_dataset_imagenet
  24. from src.alexnet import AlexNet
  25. import mindspore.nn as nn
  26. from mindspore import context
  27. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  28. from mindspore.train import Model
  29. from mindspore.nn.metrics import Accuracy
  30. if __name__ == "__main__":
  31. parser = argparse.ArgumentParser(description='MindSpore AlexNet Example')
  32. parser.add_argument('--dataset_name', type=str, default='cifar10', choices=['imagenet', 'cifar10'],
  33. help='dataset name.')
  34. parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'],
  35. help='device where the code will be implemented (default: Ascend)')
  36. parser.add_argument('--data_path', type=str, default="./", help='path where the dataset is saved')
  37. parser.add_argument('--ckpt_path', type=str, default="./ckpt", help='if is test, must provide\
  38. path where the trained ckpt file')
  39. parser.add_argument('--dataset_sink_mode', type=ast.literal_eval,
  40. default=True, help='dataset_sink_mode is False or True')
  41. parser.add_argument('--device_id', type=int, default=0, help='device id of GPU or Ascend. (Default: 0)')
  42. args = parser.parse_args()
  43. context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target)
  44. print("============== Starting Testing ==============")
  45. if args.dataset_name == 'cifar10':
  46. cfg = alexnet_cifar10_cfg
  47. network = AlexNet(cfg.num_classes, phase='test')
  48. loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
  49. opt = nn.Momentum(network.trainable_params(), cfg.learning_rate, cfg.momentum)
  50. ds_eval = create_dataset_cifar10(args.data_path, cfg.batch_size, status="test", target=args.device_target)
  51. param_dict = load_checkpoint(args.ckpt_path)
  52. print("load checkpoint from [{}].".format(args.ckpt_path))
  53. load_param_into_net(network, param_dict)
  54. network.set_train(False)
  55. model = Model(network, loss, opt, metrics={"Accuracy": Accuracy()})
  56. elif args.dataset_name == 'imagenet':
  57. cfg = alexnet_imagenet_cfg
  58. network = AlexNet(cfg.num_classes, phase='test')
  59. loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
  60. ds_eval = create_dataset_imagenet(args.data_path, cfg.batch_size, training=False)
  61. param_dict = load_checkpoint(args.ckpt_path)
  62. print("load checkpoint from [{}].".format(args.ckpt_path))
  63. load_param_into_net(network, param_dict)
  64. network.set_train(False)
  65. model = Model(network, loss_fn=loss, metrics={'top_1_accuracy', 'top_5_accuracy'})
  66. else:
  67. raise ValueError("Unsupported dataset.")
  68. if ds_eval.get_dataset_size() == 0:
  69. raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
  70. result = model.eval(ds_eval, dataset_sink_mode=args.dataset_sink_mode)
  71. print("result : {}".format(result))