You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

eval.py 3.8 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Using for eval the model checkpoint"""
  16. import os
  17. import argparse
  18. from absl import logging
  19. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  20. from mindspore import context, Model
  21. import src.constants as rconst
  22. from src.dataset import create_dataset
  23. from src.metrics import NCFMetric
  24. from src.ncf import NCFModel, NetWithLossClass, TrainStepWrap, PredictWithSigmoid
  25. from src.config import cfg
  26. logging.set_verbosity(logging.INFO)
  27. parser = argparse.ArgumentParser(description='NCF')
  28. parser.add_argument("--data_path", type=str, default="./dataset/") # The location of the input data.
  29. parser.add_argument("--dataset", type=str, default="ml-1m", choices=["ml-1m", "ml-20m"]) # Dataset to be trained and evaluated. ["ml-1m", "ml-20m"]
  30. parser.add_argument("--output_path", type=str, default="./output/") # The location of the output file.
  31. parser.add_argument("--eval_file_name", type=str, default="eval.log") # Eval output file.
  32. parser.add_argument("--checkpoint_file_path", type=str, default="./checkpoint/NCF-14_19418.ckpt") # The location of the checkpoint file.
  33. args, _ = parser.parse_known_args()
  34. def test_eval():
  35. """eval method"""
  36. if not os.path.exists(args.output_path):
  37. os.makedirs(args.output_path)
  38. layers = cfg.layers
  39. num_factors = cfg.num_factors
  40. topk = rconst.TOP_K
  41. num_eval_neg = rconst.NUM_EVAL_NEGATIVES
  42. ds_eval, num_eval_users, num_eval_items = create_dataset(test_train=False, data_dir=args.data_path,
  43. dataset=args.dataset, train_epochs=0,
  44. eval_batch_size=cfg.eval_batch_size)
  45. print("ds_eval.size: {}".format(ds_eval.get_dataset_size()))
  46. ncf_net = NCFModel(num_users=num_eval_users,
  47. num_items=num_eval_items,
  48. num_factors=num_factors,
  49. model_layers=layers,
  50. mf_regularization=0,
  51. mlp_reg_layers=[0.0, 0.0, 0.0, 0.0],
  52. mf_dim=16)
  53. param_dict = load_checkpoint(args.checkpoint_file_path)
  54. load_param_into_net(ncf_net, param_dict)
  55. loss_net = NetWithLossClass(ncf_net)
  56. train_net = TrainStepWrap(loss_net)
  57. # train_net.set_train()
  58. eval_net = PredictWithSigmoid(ncf_net, topk, num_eval_neg)
  59. ncf_metric = NCFMetric()
  60. model = Model(train_net, eval_network=eval_net, metrics={"ncf": ncf_metric})
  61. ncf_metric.clear()
  62. out = model.eval(ds_eval)
  63. eval_file_path = os.path.join(args.output_path, args.eval_file_name)
  64. eval_file = open(eval_file_path, "a+")
  65. eval_file.write("EvalCallBack: HR = {}, NDCG = {}\n".format(out['ncf'][0], out['ncf'][1]))
  66. eval_file.close()
  67. print("EvalCallBack: HR = {}, NDCG = {}".format(out['ncf'][0], out['ncf'][1]))
  68. if __name__ == '__main__':
  69. devid = int(os.getenv('DEVICE_ID'))
  70. context.set_context(mode=context.GRAPH_MODE,
  71. device_target="Davinci",
  72. save_graphs=True,
  73. device_id=devid)
  74. test_eval()