You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_callbacks.py 3.1 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. import unittest
  2. import numpy as np
  3. from fastNLP.core.callback import EchoCallback, EarlyStopCallback, GradientClipCallback
  4. from fastNLP.core.dataset import DataSet
  5. from fastNLP.core.instance import Instance
  6. from fastNLP.core.losses import BCELoss
  7. from fastNLP.core.metrics import AccuracyMetric
  8. from fastNLP.core.optimizer import SGD
  9. from fastNLP.core.trainer import Trainer
  10. from fastNLP.models.base_model import NaiveClassifier
  11. def prepare_env():
  12. def prepare_fake_dataset():
  13. mean = np.array([-3, -3])
  14. cov = np.array([[1, 0], [0, 1]])
  15. class_A = np.random.multivariate_normal(mean, cov, size=(1000,))
  16. mean = np.array([3, 3])
  17. cov = np.array([[1, 0], [0, 1]])
  18. class_B = np.random.multivariate_normal(mean, cov, size=(1000,))
  19. data_set = DataSet([Instance(x=[float(item[0]), float(item[1])], y=[0.0]) for item in class_A] +
  20. [Instance(x=[float(item[0]), float(item[1])], y=[1.0]) for item in class_B])
  21. return data_set
  22. data_set = prepare_fake_dataset()
  23. data_set.set_input("x")
  24. data_set.set_target("y")
  25. model = NaiveClassifier(2, 1)
  26. return data_set, model
  27. class TestCallback(unittest.TestCase):
  28. def test_echo_callback(self):
  29. data_set, model = prepare_env()
  30. trainer = Trainer(data_set, model,
  31. loss=BCELoss(pred="predict", target="y"),
  32. n_epochs=2,
  33. batch_size=32,
  34. print_every=50,
  35. optimizer=SGD(lr=0.1),
  36. check_code_level=2,
  37. use_tqdm=False,
  38. callbacks=[EchoCallback()])
  39. trainer.train()
  40. def test_gradient_clip(self):
  41. data_set, model = prepare_env()
  42. trainer = Trainer(data_set, model,
  43. loss=BCELoss(pred="predict", target="y"),
  44. n_epochs=30,
  45. batch_size=32,
  46. print_every=50,
  47. optimizer=SGD(lr=0.1),
  48. check_code_level=2,
  49. use_tqdm=False,
  50. dev_data=data_set,
  51. metrics=AccuracyMetric(pred="predict", target="y"),
  52. callbacks=[GradientClipCallback(model.parameters(), clip_value=2)])
  53. trainer.train()
  54. def test_early_stop(self):
  55. data_set, model = prepare_env()
  56. trainer = Trainer(data_set, model,
  57. loss=BCELoss(pred="predict", target="y"),
  58. n_epochs=50,
  59. batch_size=32,
  60. print_every=50,
  61. optimizer=SGD(lr=0.01),
  62. check_code_level=2,
  63. use_tqdm=False,
  64. dev_data=data_set,
  65. metrics=AccuracyMetric(pred="predict", target="y"),
  66. callbacks=[EarlyStopCallback(5)])
  67. trainer.train()