You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

metrics.py 2.2 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. Area under cure metric
  17. """
  18. from sklearn.metrics import roc_auc_score
  19. from mindspore import context
  20. from mindspore.nn.metrics import Metric
  21. from mindspore.communication.management import get_rank, get_group_size
  22. class AUCMetric(Metric):
  23. """
  24. Area under cure metric
  25. """
  26. def __init__(self):
  27. super(AUCMetric, self).__init__()
  28. self.clear()
  29. self.full_batch = context.get_auto_parallel_context("full_batch")
  30. def clear(self):
  31. """Clear the internal evaluation result."""
  32. self.true_labels = []
  33. self.pred_probs = []
  34. def update(self, *inputs): # inputs
  35. """Update list of predicts and labels."""
  36. all_predict = inputs[1].asnumpy().flatten().tolist() # predict
  37. all_label = inputs[2].asnumpy().flatten().tolist() # label
  38. self.pred_probs.extend(all_predict)
  39. if self.full_batch:
  40. rank_id = get_rank()
  41. group_size = get_group_size()
  42. gap = len(all_label) // group_size
  43. self.true_labels.extend(all_label[rank_id*gap: (rank_id+1)*gap])
  44. else:
  45. self.true_labels.extend(all_label)
  46. def eval(self):
  47. if len(self.true_labels) != len(self.pred_probs):
  48. raise RuntimeError(
  49. 'true_labels.size is not equal to pred_probs.size()')
  50. auc = roc_auc_score(self.true_labels, self.pred_probs)
  51. print("====" * 20 + " auc_metric end")
  52. print("====" * 20 + " auc: {}".format(auc))
  53. return auc