You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

eval.py 4.1 kB

4 years ago
4 years ago
4 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Transformer evaluation script."""
  16. import os
  17. import argparse
  18. import mindspore.common.dtype as mstype
  19. from mindspore.common.tensor import Tensor
  20. from mindspore.train.model import Model
  21. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  22. from mindspore import context
  23. from src.dataset import create_gru_dataset
  24. from src.seq2seq import Seq2Seq
  25. from src.gru_for_infer import GRUInferCell
  26. from src.config import config
  27. def run_gru_eval():
  28. """
  29. Transformer evaluation.
  30. """
  31. parser = argparse.ArgumentParser(description='GRU eval')
  32. parser.add_argument("--device_target", type=str, default="Ascend",
  33. help="device where the code will be implemented, default is Ascend")
  34. parser.add_argument('--device_id', type=int, default=0, help='device id of GPU or Ascend, default is 0')
  35. parser.add_argument('--device_num', type=int, default=1, help='Use device nums, default is 1')
  36. parser.add_argument('--ckpt_file', type=str, default="", help='ckpt file path')
  37. parser.add_argument("--dataset_path", type=str, default="",
  38. help="Dataset path, default: f`sns.")
  39. args = parser.parse_args()
  40. context.set_context(mode=context.GRAPH_MODE, device_target=args.device_target, reserve_class_name_in_scope=False, \
  41. device_id=args.device_id, save_graphs=False)
  42. prefix = "multi30k_test_mindrecord_32"
  43. mindrecord_file = os.path.join(args.dataset_path, prefix)
  44. if not os.path.exists(mindrecord_file):
  45. print("dataset file {} not exists, please check!".format(mindrecord_file))
  46. raise ValueError(mindrecord_file)
  47. dataset = create_gru_dataset(epoch_count=config.num_epochs, batch_size=config.eval_batch_size, \
  48. dataset_path=mindrecord_file, rank_size=args.device_num, rank_id=0, do_shuffle=False, is_training=False)
  49. dataset_size = dataset.get_dataset_size()
  50. print("dataset size is {}".format(dataset_size))
  51. network = Seq2Seq(config, is_training=False)
  52. network = GRUInferCell(network)
  53. network.set_train(False)
  54. if args.ckpt_file != "":
  55. parameter_dict = load_checkpoint(args.ckpt_file)
  56. load_param_into_net(network, parameter_dict)
  57. model = Model(network)
  58. predictions = []
  59. source_sents = []
  60. target_sents = []
  61. eval_text_len = 0
  62. for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
  63. source_sents.append(batch["source_ids"])
  64. target_sents.append(batch["target_ids"])
  65. source_ids = Tensor(batch["source_ids"], mstype.int32)
  66. target_ids = Tensor(batch["target_ids"], mstype.int32)
  67. predicted_ids = model.predict(source_ids, target_ids)
  68. print("predicts is ", predicted_ids.asnumpy())
  69. print("target_ids is ", target_ids)
  70. predictions.append(predicted_ids.asnumpy())
  71. eval_text_len = eval_text_len + 1
  72. f_output = open(config.output_file, 'w')
  73. f_target = open(config.target_file, "w")
  74. for batch_out, true_sentence in zip(predictions, target_sents):
  75. for i in range(config.eval_batch_size):
  76. target_ids = [str(x) for x in true_sentence[i].tolist()]
  77. f_target.write(" ".join(target_ids) + "\n")
  78. token_ids = [str(x) for x in batch_out[i].tolist()]
  79. f_output.write(" ".join(token_ids) + "\n")
  80. f_output.close()
  81. f_target.close()
  82. if __name__ == "__main__":
  83. run_gru_eval()