You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

example.py 2.6 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. # coding: utf-8
  2. # ================================================================#
  3. # Copyright (C) 2021 Freecss All rights reserved.
  4. #
  5. # File Name :share_example.py
  6. # Author :freecss
  7. # Email :karlfreecss@gmail.com
  8. # Created Date :2021/06/07
  9. # Description :
  10. #
  11. # ================================================================#
  12. from utils.plog import logger, INFO
  13. import torch.nn as nn
  14. import torch
  15. from models.nn import LeNet5, SymbolNet
  16. from models.basic_model import BasicModel, BasicDataset
  17. from models.wabl_models import DecisionTree, WABLBasicModel
  18. from multiprocessing import Pool
  19. from abducer.abducer_base import AbducerBase
  20. from abducer.kb import add_KB, HWF_KB, HED_prolog_KB
  21. from datasets.mnist_add.get_mnist_add import get_mnist_add
  22. from datasets.hwf.get_hwf import get_hwf
  23. from datasets.hed.get_hed import get_hed, split_equation
  24. import framework_hed
  25. def run_test():
  26. # kb = add_KB(True)
  27. # kb = HWF_KB(True)
  28. # abducer = AbducerBase(kb)
  29. kb = HED_prolog_KB()
  30. abducer = AbducerBase(kb, zoopt=True, multiple_predictions=True)
  31. recorder = logger()
  32. total_train_data = get_hed(train=True)
  33. train_data, val_data = split_equation(total_train_data, 3, 1)
  34. test_data = get_hed(train=False)
  35. # cls = LeNet5(num_classes=len(kb.pseudo_label_list), image_size=(train_data[0][0][0].shape[1:]))
  36. cls = SymbolNet(num_classes=len(kb.pseudo_label_list))
  37. device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
  38. framework_hed.hed_pretrain(kb, cls, recorder)
  39. criterion = nn.CrossEntropyLoss()
  40. optimizer = torch.optim.RMSprop(cls.parameters(), lr=0.001, weight_decay=1e-6)
  41. # optimizer = torch.optim.Adam(cls.parameters(), lr=0.00001, betas=(0.9, 0.99))
  42. base_model = BasicModel(cls, criterion, optimizer, device, save_interval=1, save_dir=recorder.save_dir, batch_size=32, num_epochs=10, recorder=recorder)
  43. model = WABLBasicModel(base_model, kb.pseudo_label_list)
  44. # train_X, train_Z, train_Y = get_mnist_add(train = True, get_pseudo_label = True)
  45. # test_X, test_Z, test_Y = get_mnist_add(train = False, get_pseudo_label = True)
  46. # train_data = get_hwf(train = True, get_pseudo_label = True)
  47. # test_data = get_hwf(train = False, get_pseudo_label = True)
  48. model, mapping = framework_hed.train_with_rule(model, abducer, train_data, val_data, select_num=10, min_len=5, max_len=8)
  49. framework_hed.hed_test(model, abducer, mapping, train_data, test_data, min_len=5, max_len=8)
  50. recorder.dump()
  51. return True
  52. if __name__ == "__main__":
  53. run_test()

An efficient Python toolkit for Abductive Learning (ABL), a novel paradigm that integrates machine learning and logical reasoning in a unified framework.