You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

eval.py 6.2 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """FastText for Evaluation"""
  16. import argparse
  17. import numpy as np
  18. import mindspore.nn as nn
  19. import mindspore.common.dtype as mstype
  20. import mindspore.ops.operations as P
  21. from mindspore.common.tensor import Tensor
  22. from mindspore.train.model import Model
  23. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  24. import mindspore.dataset as ds
  25. import mindspore.dataset.transforms.c_transforms as deC
  26. from mindspore import context
  27. from src.fasttext_model import FastText
  28. parser = argparse.ArgumentParser(description='fasttext')
  29. parser.add_argument('--data_path', type=str, help='infer dataset path..')
  30. parser.add_argument('--data_name', type=str, required=True, default='ag',
  31. help='dataset name. eg. ag, dbpedia')
  32. parser.add_argument("--model_ckpt", type=str, required=True,
  33. help="existed checkpoint address.")
  34. parser.add_argument('--device_target', type=str, default="Ascend", choices=['Ascend', 'GPU'],
  35. help='device where the code will be implemented (default: Ascend)')
  36. args = parser.parse_args()
  37. if args.data_name == "ag":
  38. from src.config import config_ag as config_ascend
  39. from src.config import config_ag_gpu as config_gpu
  40. target_label1 = ['0', '1', '2', '3']
  41. elif args.data_name == 'dbpedia':
  42. from src.config import config_db as config_ascend
  43. from src.config import config_db_gpu as config_gpu
  44. target_label1 = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']
  45. elif args.data_name == 'yelp_p':
  46. from src.config import config_yelpp as config_ascend
  47. from src.config import config_yelpp_gpu as config_gpu
  48. target_label1 = ['0', '1']
  49. context.set_context(
  50. mode=context.GRAPH_MODE,
  51. save_graphs=False,
  52. device_target=args.device_target)
  53. config = config_ascend if args.device_target == 'Ascend' else config_gpu
  54. class FastTextInferCell(nn.Cell):
  55. """
  56. Encapsulation class of FastText network infer.
  57. Args:
  58. network (nn.Cell): FastText model.
  59. Returns:
  60. Tuple[Tensor, Tensor], predicted_ids
  61. """
  62. def __init__(self, network):
  63. super(FastTextInferCell, self).__init__(auto_prefix=False)
  64. self.network = network
  65. self.argmax = P.ArgMaxWithValue(axis=1, keep_dims=True)
  66. self.log_softmax = nn.LogSoftmax(axis=1)
  67. def construct(self, src_tokens, src_tokens_lengths):
  68. """construct fasttext infer cell"""
  69. prediction = self.network(src_tokens, src_tokens_lengths)
  70. predicted_idx = self.log_softmax(prediction)
  71. predicted_idx, _ = self.argmax(predicted_idx)
  72. return predicted_idx
  73. def load_infer_dataset(batch_size, datafile, bucket):
  74. """data loader for infer"""
  75. def batch_per_bucket(bucket_length, input_file):
  76. input_file = input_file + '/test_dataset_bs_' + str(bucket_length) + '.mindrecord'
  77. if not input_file:
  78. raise FileNotFoundError("input file parameter must not be empty.")
  79. data_set = ds.MindDataset(input_file,
  80. columns_list=['src_tokens', 'src_tokens_length', 'label_idx'])
  81. type_cast_op = deC.TypeCast(mstype.int32)
  82. data_set = data_set.map(operations=type_cast_op, input_columns="src_tokens")
  83. data_set = data_set.map(operations=type_cast_op, input_columns="src_tokens_length")
  84. data_set = data_set.map(operations=type_cast_op, input_columns="label_idx")
  85. data_set = data_set.batch(batch_size, drop_remainder=False)
  86. return data_set
  87. for i, _ in enumerate(bucket):
  88. bucket_len = bucket[i]
  89. ds_per = batch_per_bucket(bucket_len, datafile)
  90. if i == 0:
  91. data_set = ds_per
  92. else:
  93. data_set = data_set + ds_per
  94. return data_set
  95. def run_fasttext_infer():
  96. """run infer with FastText"""
  97. dataset = load_infer_dataset(batch_size=config.batch_size, datafile=args.data_path, bucket=config.test_buckets)
  98. fasttext_model = FastText(config.vocab_size, config.embedding_dims, config.num_class)
  99. parameter_dict = load_checkpoint(args.model_ckpt)
  100. load_param_into_net(fasttext_model, parameter_dict=parameter_dict)
  101. ft_infer = FastTextInferCell(fasttext_model)
  102. model = Model(ft_infer)
  103. predictions = []
  104. target_sens = []
  105. for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
  106. target_sens.append(batch['label_idx'])
  107. src_tokens = Tensor(batch['src_tokens'], mstype.int32)
  108. src_tokens_length = Tensor(batch['src_tokens_length'], mstype.int32)
  109. predicted_idx = model.predict(src_tokens, src_tokens_length)
  110. predictions.append(predicted_idx.asnumpy())
  111. from sklearn.metrics import accuracy_score, classification_report
  112. target_sens = np.array(target_sens).flatten()
  113. merge_target_sens = []
  114. for target_sen in target_sens:
  115. merge_target_sens.extend(target_sen)
  116. target_sens = merge_target_sens
  117. predictions = np.array(predictions).flatten()
  118. merge_predictions = []
  119. for prediction in predictions:
  120. merge_predictions.extend(prediction)
  121. predictions = merge_predictions
  122. acc = accuracy_score(target_sens, predictions)
  123. result_report = classification_report(target_sens, predictions, target_names=target_label1)
  124. print("********Accuracy: ", acc)
  125. print(result_report)
  126. if __name__ == '__main__':
  127. run_fasttext_infer()