You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test.py 4.0 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """train_imagenet."""
  16. import os
  17. import sys
  18. import argparse
  19. import random
  20. import math
  21. import numpy as np
  22. from test_dataset import create_dataset
  23. from config import config
  24. from mindspore import context
  25. from mindspore.nn.dynamic_lr import piecewise_constant_lr, warmup_lr
  26. import mindspore.dataset.engine as de
  27. from mindspore.train.serialization import load_checkpoint
  28. from model.model import resnet50, TrainStepWrap, NetWithLossClass
  29. from utils.distance import compute_dist, compute_score
  30. random.seed(1)
  31. np.random.seed(1)
  32. de.config.set_seed(1)
  33. parser = argparse.ArgumentParser(description='Image classification')
  34. parser.add_argument('--data_url', type=str, default=None, help='Dataset path')
  35. parser.add_argument('--train_url', type=str, default=None, help='Train output path')
  36. args_opt = parser.parse_args()
  37. context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False)
  38. local_data_url = 'data'
  39. local_train_url = 'ckpt'
  40. class Logger():
  41. '''Log'''
  42. def __init__(self, logFile="log_max.txt"):
  43. self.terminal = sys.stdout
  44. self.log = open(logFile, 'a')
  45. def write(self, message):
  46. self.terminal.write(message)
  47. self.log.write(message)
  48. self.log.flush()
  49. def flush(self):
  50. pass
  51. sys.stdout = Logger("log/log.txt")
  52. if __name__ == '__main__':
  53. query_dataset = create_dataset(data_dir=os.path.join('/home/dingfeifei/datasets', \
  54. 'test/query'), p=config.p, k=config.k)
  55. gallery_dataset = create_dataset(data_dir=os.path.join('/home/dingfeifei/datasets', \
  56. 'test/gallery'), p=config.p, k=config.k)
  57. epoch_size = config.epoch_size
  58. net = resnet50(class_num=config.class_num, is_train=False)
  59. loss_net = NetWithLossClass(net, is_train=False)
  60. base_lr = config.learning_rate
  61. warm_up_epochs = config.lr_warmup_epochs
  62. lr_decay_epochs = config.lr_decay_epochs
  63. lr_decay_factor = config.lr_decay_factor
  64. step_size = math.ceil(config.class_num / config.p)
  65. lr_decay_steps = []
  66. lr_decay = []
  67. for i, v in enumerate(lr_decay_epochs):
  68. lr_decay_steps.append(v * step_size)
  69. lr_decay.append(base_lr * lr_decay_factor ** i)
  70. lr_1 = warmup_lr(base_lr, step_size*warm_up_epochs, step_size, warm_up_epochs)
  71. lr_2 = piecewise_constant_lr(lr_decay_steps, lr_decay)
  72. lr = lr_1 + lr_2
  73. train_net = TrainStepWrap(loss_net, lr, config.momentum, is_train=False)
  74. load_checkpoint("checkpoints/40.ckpt", net=train_net)
  75. q_feats, q_labels, g_feats, g_labels = [], [], [], []
  76. for data, gt_classes, theta in query_dataset:
  77. output = train_net(data, gt_classes, theta)
  78. output = output.asnumpy()
  79. label = gt_classes.asnumpy()
  80. q_feats.append(output)
  81. q_labels.append(label)
  82. q_feats = np.vstack(q_feats)
  83. q_labels = np.hstack(q_labels)
  84. for data, gt_classes, theta in gallery_dataset:
  85. output = train_net(data, gt_classes, theta)
  86. output = output.asnumpy()
  87. label = gt_classes.asnumpy()
  88. g_feats.append(output)
  89. g_labels.append(label)
  90. g_feats = np.vstack(g_feats)
  91. g_labels = np.hstack(g_labels)
  92. q_g_dist = compute_dist(q_feats, g_feats, dis_type='cosine')
  93. mAP, cmc_scores = compute_score(q_g_dist, q_labels, g_labels)
  94. print(mAP, cmc_scores)