You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_classifier.py 31 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. """
  2. Auto Classfier for Graph Node Classification
  3. """
  4. import time
  5. import json
  6. from copy import deepcopy
  7. from typing import Sequence
  8. import torch
  9. import numpy as np
  10. import yaml
  11. from .base import BaseClassifier
  12. from ...module.feature import FEATURE_DICT
  13. from ...module.train import TRAINER_DICT, get_feval, BaseGraphClassificationTrainer
  14. from ..base import _initialize_single_model, _parse_hp_space, _parse_model_hp
  15. from ..utils import LeaderBoard, get_dataset_labels, set_seed, get_graph_from_dataset, get_graph_node_features, convert_dataset
  16. from ...datasets import utils
  17. from ..utils import get_logger
  18. from ...backend import DependentBackend
  19. LOGGER = get_logger("GraphClassifier")
  20. BACKEND = DependentBackend.get_backend_name()
  21. class AutoGraphClassifier(BaseClassifier):
  22. """
  23. Auto Multi-class Graph Classifier.
  24. Used to automatically solve the graph classification problems.
  25. Parameters
  26. ----------
  27. feature_module: autogl.module.feature.BaseFeatureEngineer or str or None
  28. The (name of) auto feature engineer used to process the given dataset.
  29. Disable feature engineer by setting it to ``None``. Default ``deepgl``.
  30. graph_models: list of autogl.module.model.BaseModel or list of str
  31. The (name of) models to be optimized as backbone. Default ``['gat', 'gcn']``.
  32. hpo_module: autogl.module.hpo.BaseHPOptimizer or str or None
  33. The (name of) hpo module used to search for best hyper parameters.
  34. Disable hpo by setting it to ``None``. Default ``anneal``.
  35. ensemble_module: autogl.module.ensemble.BaseEnsembler or str or None
  36. The (name of) ensemble module used to ensemble the multi-models found.
  37. Disable ensemble by setting it to ``None``. Default ``voting``.
  38. max_evals: int (Optional)
  39. If given, will set the number eval times the hpo module will use.
  40. Only be effective when hpo_module is ``str``. Default ``None``.
  41. default_trainer: str (Optional)
  42. The (name of) the trainer used in this solver. Default to ``NodeClassificationFull``.
  43. trainer_hp_space: Iterable[dict] (Optional)
  44. trainer hp space or list of trainer hp spaces configuration.
  45. If a single trainer hp is given, will specify the hp space of trainer for
  46. every model. If a list of trainer hp is given, will specify every model
  47. with corrsponding trainer hp space. Default ``None``.
  48. model_hp_spaces: list of list of dict (Optional)
  49. model hp space configuration.
  50. If given, will specify every hp space of every passed model. Default ``None``.
  51. If the encoder(-decoder) is passed, the space should be a dict containing keys "encoder"
  52. and "decoder", specifying the detailed encoder decoder hp spaces.
  53. size: int (Optional)
  54. The max models ensemble module will use. Default ``None``.
  55. device: torch.device or str
  56. The device where model will be running on. If set to ``auto``, will use gpu
  57. when available. You can also specify the device by directly giving ``gpu`` or
  58. ``cuda:0``, etc. Default ``auto``.
  59. """
  60. # pylint: disable=W0102
  61. def __init__(
  62. self,
  63. feature_module=None,
  64. graph_models=("gin", "topkpool"),
  65. hpo_module="anneal",
  66. ensemble_module="voting",
  67. max_evals=50,
  68. default_trainer="GraphClassificationFull",
  69. trainer_hp_space=None,
  70. model_hp_spaces=None,
  71. size=4,
  72. device="auto",
  73. ):
  74. super().__init__(
  75. feature_module=feature_module,
  76. graph_models=graph_models,
  77. nas_algorithms=None,
  78. nas_spaces=None,
  79. nas_estimators=None,
  80. hpo_module=hpo_module,
  81. ensemble_module=ensemble_module,
  82. max_evals=max_evals,
  83. default_trainer=default_trainer,
  84. trainer_hp_space=trainer_hp_space,
  85. model_hp_spaces=model_hp_spaces,
  86. size=size,
  87. device=device,
  88. )
  89. self.dataset = None
  90. def _init_graph_module(
  91. self,
  92. graph_models,
  93. num_classes,
  94. num_features,
  95. feval,
  96. device,
  97. loss,
  98. num_graph_features,
  99. ) -> "AutoGraphClassifier":
  100. # load graph network module
  101. self.graph_model_list = []
  102. for i, model in enumerate(graph_models):
  103. # init the trainer
  104. if not isinstance(model, BaseGraphClassificationTrainer):
  105. trainer = (
  106. self._default_trainer if not isinstance(self._default_trainer, (tuple, list))
  107. else self._default_trainer[i]
  108. )
  109. if isinstance(trainer, str):
  110. if trainer not in TRAINER_DICT:
  111. raise KeyError(f"Does not support trainer {trainer}")
  112. trainer = TRAINER_DICT[trainer]()
  113. if isinstance(model, (tuple, list)):
  114. trainer.encoder = model[0]
  115. trainer.decoder = model[1]
  116. else:
  117. trainer.encoder = model
  118. else:
  119. trainer = model
  120. # set model hp space
  121. if self._model_hp_spaces is not None:
  122. if self._model_hp_spaces[i] is not None:
  123. if isinstance(self._model_hp_spaces[i], dict):
  124. encoder_hp_space = self._model_hp_spaces[i].get('encoder', None)
  125. decoder_hp_space = self._model_hp_spaces[i].get('decoder', None)
  126. else:
  127. encoder_hp_space = self._model_hp_spaces[i]
  128. decoder_hp_space = None
  129. if encoder_hp_space is not None:
  130. trainer.encoder.hyper_parameter_space = encoder_hp_space
  131. if decoder_hp_space is not None:
  132. trainer.decoder.hyper_parameter_space = decoder_hp_space
  133. # set trainer hp space
  134. if self._trainer_hp_space is not None:
  135. if isinstance(self._trainer_hp_space[0], list):
  136. current_hp_for_trainer = self._trainer_hp_space[i]
  137. else:
  138. current_hp_for_trainer = self._trainer_hp_space
  139. trainer.hyper_parameter_space = current_hp_for_trainer
  140. trainer.num_features = num_features
  141. trainer.num_classes = num_classes
  142. trainer.num_graph_features = num_graph_features
  143. trainer.loss = loss
  144. trainer.feval = feval
  145. trainer.to(device)
  146. self.graph_model_list.append(trainer)
  147. return self
  148. # pylint: disable=arguments-differ
  149. def fit(
  150. self,
  151. dataset,
  152. time_limit=-1,
  153. inplace=False,
  154. train_split=None,
  155. val_split=None,
  156. evaluation_method="infer",
  157. seed=None,
  158. ) -> "AutoGraphClassifier":
  159. """
  160. Fit current solver on given dataset.
  161. Parameters
  162. ----------
  163. dataset: autogl.data.dataset
  164. The multi-graph dataset needed to fit on.
  165. time_limit: int
  166. The time limit of the whole fit process (in seconds). If set below 0, will ignore
  167. time limit. Default ``-1``.
  168. inplace: bool
  169. Whether we process the given dataset in inplace manner. Default ``False``.
  170. Set it to True if you want to save memory by modifying the given dataset directly.
  171. train_split: float or int (Optional)
  172. The train ratio (in ``float``) or number (in ``int``) of dataset. If you want to use
  173. default train/val/test split in dataset, please set this to ``None``.
  174. Default ``None``.
  175. val_split: float or int (Optional)
  176. The validation ratio (in ``float``) or number (in ``int``) of dataset. If you want to
  177. use default train/val/test split in dataset, please set this to ``None``.
  178. Default ``None``.
  179. evaluation_method: (list of) str autogl.module.train.evaluation
  180. A (list of) evaluation method for current solver. If ``infer``, will automatically
  181. determine. Default ``infer``.
  182. seed: int (Optional)
  183. The random seed. If set to ``None``, will run everything at random.
  184. Default ``None``.
  185. Returns
  186. -------
  187. self: autogl.solver.AutoGraphClassifier
  188. A reference of current solver.
  189. """
  190. set_seed(seed)
  191. num_classes = get_dataset_labels(dataset).max().item() + 1
  192. if time_limit < 0:
  193. time_limit = 3600 * 24
  194. time_begin = time.time()
  195. # initialize leaderboard
  196. if evaluation_method == "infer":
  197. if hasattr(dataset, "metric"):
  198. evaluation_method = [dataset.metric]
  199. else:
  200. if num_classes == 2:
  201. evaluation_method = ["auc"]
  202. else:
  203. evaluation_method = ["acc"]
  204. assert isinstance(evaluation_method, list)
  205. evaluator_list = get_feval(evaluation_method)
  206. self.leaderboard = LeaderBoard(
  207. [e.get_eval_name() for e in evaluator_list],
  208. {e.get_eval_name(): e.is_higher_better() for e in evaluator_list},
  209. )
  210. # set up the dataset
  211. if train_split is None and val_split is None:
  212. assert hasattr(dataset, "train_split") and hasattr(dataset, "val_split"), (
  213. "The dataset has no default train/val split! "
  214. "Please manually pass train and val ratio."
  215. )
  216. LOGGER.info("Use the default train/val/test ratio in given dataset")
  217. # if hasattr(dataset.train_split, "n_splits"):
  218. # cross_validation = True
  219. elif train_split is not None and val_split is not None:
  220. utils.graph_random_splits(dataset, train_split, val_split, seed=seed)
  221. else:
  222. LOGGER.error(
  223. "Please set both train_split and val_split explicitly. Detect %s is None.",
  224. "train_split" if train_split is None else "val_split",
  225. )
  226. raise ValueError(
  227. "In consistent setting of train/val split. Detect {} is None.".format(
  228. "train_split" if train_split is None else "val_split"
  229. )
  230. )
  231. # feature engineering
  232. if self.feature_module is not None:
  233. self.feature_module.fit(dataset.train_split)
  234. dataset = self.feature_module.transform(dataset, inplace=inplace)
  235. self.dataset = dataset
  236. # check whether the dataset has features.
  237. # currently we only support graph classification with features.
  238. feat = get_graph_node_features(get_graph_from_dataset(dataset))
  239. assert feat is not None, (
  240. "Does not support fit on non node-feature dataset!"
  241. " Please add node features to dataset or specify feature engineers that generate"
  242. " node features."
  243. )
  244. num_features = feat.size(-1)
  245. # initialize graph networks
  246. self._init_graph_module(
  247. self.gml,
  248. num_features=num_features,
  249. num_classes=num_classes,
  250. feval=evaluator_list,
  251. device=self.runtime_device,
  252. loss="cross_entropy" if not hasattr(dataset, "loss") else dataset.loss,
  253. num_graph_features=(0
  254. if not hasattr(dataset[0], "gf")
  255. else dataset[0].gf.size(1)) if BACKEND == 'pyg' else
  256. (0 if 'gf' not in dataset[0].data else dataset[0].data['gf'].size(1)),
  257. )
  258. # train the models and tune hpo
  259. result_valid = []
  260. names = []
  261. for idx, model in enumerate(self.graph_model_list):
  262. if time_limit < 0:
  263. time_for_each_model = None
  264. else:
  265. time_for_each_model = (time_limit - time.time() + time_begin) / (
  266. len(self.graph_model_list) - idx
  267. )
  268. if self.hpo_module is None:
  269. model.initialize()
  270. model.train(convert_dataset(dataset), True)
  271. optimized = model
  272. else:
  273. optimized, _ = self.hpo_module.optimize(
  274. trainer=model, dataset=convert_dataset(dataset), time_limit=time_for_each_model
  275. )
  276. # to save memory, all the trainer derived will be mapped to cpu
  277. optimized.to(torch.device("cpu"))
  278. name = str(optimized) + "_idx%d" % (idx)
  279. names.append(name)
  280. performance_on_valid, _ = optimized.get_valid_score(return_major=False)
  281. result_valid.append(
  282. optimized.get_valid_predict_proba().detach().cpu().numpy()
  283. )
  284. self.leaderboard.insert_model_performance(
  285. name,
  286. dict(
  287. zip(
  288. [e.get_eval_name() for e in evaluator_list],
  289. performance_on_valid,
  290. )
  291. ),
  292. )
  293. self.trained_models[name] = optimized
  294. # fit the ensemble model
  295. if self.ensemble_module is not None:
  296. performance = self.ensemble_module.fit(
  297. result_valid,
  298. get_dataset_labels(dataset)[dataset.val_index].cpu().numpy(),
  299. names,
  300. evaluator_list,
  301. n_classes=num_classes,
  302. )
  303. self.leaderboard.insert_model_performance(
  304. "ensemble",
  305. dict(zip([e.get_eval_name() for e in evaluator_list], performance)),
  306. )
  307. return self
  308. def fit_predict(
  309. self,
  310. dataset,
  311. time_limit=-1,
  312. inplace=False,
  313. train_split=None,
  314. val_split=None,
  315. evaluation_method="infer",
  316. seed=None,
  317. use_ensemble=True,
  318. use_best=True,
  319. name=None,
  320. ) -> np.ndarray:
  321. """
  322. Fit current solver on given dataset and return the predicted value.
  323. Parameters
  324. ----------
  325. dataset: torch_geometric.data.dataset.Dataset
  326. The dataset needed to fit on. This dataset must have only one graph.
  327. time_limit: int
  328. The time limit of the whole fit process (in seconds). If set below 0, will
  329. ignore time limit. Default ``-1``.
  330. inplace: bool
  331. Whether we process the given dataset in inplace manner. Default ``False``.
  332. Set it to True if you want to save memory by modifying the given dataset directly.
  333. train_split: float or int (Optional)
  334. The train ratio (in ``float``) or number (in ``int``) of dataset. If you want to
  335. use default train/val/test split in dataset, please set this to ``None``.
  336. Default ``None``.
  337. val_split: float or int (Optional)
  338. The validation ratio (in ``float``) or number (in ``int``) of dataset. If you want
  339. to use default train/val/test split in dataset, please set this to ``None``.
  340. Default ``None``.
  341. evaluation_method: (list of) str or autogl.module.train.evaluation
  342. A (list of) evaluation method for current solver. If ``infer``, will automatically
  343. determine. Default ``infer``.
  344. seed: int (Optional)
  345. The random seed. If set to ``None``, will run everything at random.
  346. Default ``None``.
  347. use_ensemble: bool
  348. Whether to use ensemble to do the predict. Default ``True``.
  349. use_best: bool
  350. Whether to use the best single model to do the predict. Will only be effective when
  351. ``use_ensemble`` is ``False``. Default ``True``.
  352. name: str or None
  353. The name of model used to predict. Will only be effective when ``use_ensemble`` and
  354. ``use_best`` both are ``False``. Default ``None``.
  355. Returns
  356. -------
  357. result: np.ndarray
  358. An array of shape ``(N,)``, where ``N`` is the number of test nodes. The prediction
  359. on given dataset.
  360. """
  361. self.fit(
  362. dataset=dataset,
  363. time_limit=time_limit,
  364. inplace=inplace,
  365. train_split=train_split,
  366. val_split=val_split,
  367. evaluation_method=evaluation_method,
  368. seed=seed,
  369. )
  370. return self.predict(
  371. dataset=dataset,
  372. inplaced=inplace,
  373. inplace=inplace,
  374. use_ensemble=use_ensemble,
  375. use_best=use_best,
  376. name=name,
  377. )
  378. def predict_proba(
  379. self,
  380. dataset=None,
  381. inplaced=False,
  382. inplace=False,
  383. use_ensemble=True,
  384. use_best=True,
  385. name=None,
  386. mask="test",
  387. ) -> np.ndarray:
  388. """
  389. Predict the node probability.
  390. Parameters
  391. ----------
  392. dataset: autogl.data.Dataset or None
  393. The dataset needed to predict. If ``None``, will use the processed dataset
  394. passed to ``fit()`` instead. Default ``None``.
  395. inplaced: bool
  396. Whether the given dataset is processed. Only be effective when ``dataset``
  397. is not ``None``. If you pass the dataset to ``fit()`` with ``inplace=True``,
  398. and you pass the dataset again to this method, you should set this argument
  399. to ``True``. Otherwise ``False``. Default ``False``.
  400. inplace: bool
  401. Whether we process the given dataset in inplace manner. Default ``False``.
  402. Set it to True if you want to save memory by modifying the given dataset directly.
  403. use_ensemble: bool
  404. Whether to use ensemble to do the predict. Default ``True``.
  405. use_best: bool
  406. Whether to use the best single model to do the predict. Will only be effective when
  407. ``use_ensemble`` is ``False``. Default ``True``.
  408. name: str or None
  409. The name of model used to predict. Will only be effective when ``use_ensemble`` and
  410. ``use_best`` both are ``False``. Default ``None``.
  411. mask: str
  412. The data split to give prediction on. Default ``test``.
  413. Returns
  414. -------
  415. result: np.ndarray
  416. An array of shape ``(N,C,)``, where ``N`` is the number of test nodes and ``C`` is
  417. the number of classes. The prediction on given dataset.
  418. """
  419. if dataset is None:
  420. dataset = self.dataset
  421. elif not inplaced:
  422. if self.feature_module is not None:
  423. dataset = self.feature_module.transform(dataset, inplace=inplace)
  424. if use_ensemble:
  425. LOGGER.info("Ensemble argument on, will try using ensemble model.")
  426. if not use_ensemble and use_best:
  427. LOGGER.info(
  428. "Ensemble argument off and best argument on, will try using best model."
  429. )
  430. if (use_ensemble and self.ensemble_module is not None) or (
  431. not use_best and name == "ensemble"
  432. ):
  433. # we need to get all the prediction of every model trained
  434. predict_result = []
  435. names = []
  436. for model_name in self.trained_models:
  437. predict_result.append(
  438. self._predict_proba_by_name(dataset, model_name, mask)
  439. )
  440. names.append(model_name)
  441. return self.ensemble_module.ensemble(predict_result, names)
  442. if use_ensemble and self.ensemble_module is None:
  443. LOGGER.warning(
  444. "Cannot use ensemble because no ensebmle module is given. "
  445. "Will use best model instead."
  446. )
  447. if use_best or (use_ensemble and self.ensemble_module is None):
  448. # just return the best model we have found
  449. best_model_name = self.leaderboard.get_best_model()
  450. return self._predict_proba_by_name(dataset, best_model_name, mask)
  451. if name is not None:
  452. # return model performance by name
  453. return self._predict_proba_by_name(dataset, name, mask)
  454. LOGGER.error(
  455. "No model name is given while ensemble and best arguments are off."
  456. )
  457. raise ValueError(
  458. "You need to specify a model name if you do not want use ensemble and best model."
  459. )
  460. def _predict_proba_by_name(self, dataset, name, mask):
  461. self.trained_models[name].to(self.runtime_device)
  462. predicted = (
  463. self.trained_models[name]
  464. .predict_proba(convert_dataset(dataset), mask=mask)
  465. .detach()
  466. .cpu()
  467. .numpy()
  468. )
  469. self.trained_models[name].to(torch.device("cpu"))
  470. return predicted
  471. def predict(
  472. self,
  473. dataset=None,
  474. inplaced=False,
  475. inplace=False,
  476. use_ensemble=True,
  477. use_best=True,
  478. name=None,
  479. mask="test",
  480. ) -> np.ndarray:
  481. """
  482. Predict the node class number.
  483. Parameters
  484. ----------
  485. dataset: autogl.data.Dataset or None
  486. The dataset needed to predict. If ``None``, will use the processed dataset passed
  487. to ``fit()`` instead. Default ``None``.
  488. inplaced: bool
  489. Whether the given dataset is processed. Only be effective when ``dataset``
  490. is not ``None``. If you pass the dataset to ``fit()`` with ``inplace=True``, and
  491. you pass the dataset again to this method, you should set this argument to ``True``.
  492. Otherwise ``False``. Default ``False``.
  493. inplace: bool
  494. Whether we process the given dataset in inplace manner. Default ``False``.
  495. Set it to True if you want to save memory by modifying the given dataset directly.
  496. use_ensemble: bool
  497. Whether to use ensemble to do the predict. Default ``True``.
  498. use_best: bool
  499. Whether to use the best single model to do the predict. Will only be effective
  500. when ``use_ensemble`` is ``False``. Default ``True``.
  501. name: str or None
  502. The name of model used to predict. Will only be effective when ``use_ensemble``
  503. and ``use_best`` both are ``False``. Default ``None``.
  504. Returns
  505. -------
  506. result: np.ndarray
  507. An array of shape ``(N,)``, where ``N`` is the number of test nodes.
  508. The prediction on given dataset.
  509. """
  510. proba = self.predict_proba(
  511. dataset, inplaced, inplace, use_ensemble, use_best, name, mask
  512. )
  513. return np.argmax(proba, axis=1)
  514. def evaluate(self, dataset=None,
  515. inplaced=False,
  516. inplace=False,
  517. use_ensemble=True,
  518. use_best=True,
  519. name=None,
  520. mask="test",
  521. label=None,
  522. metric="acc"
  523. ):
  524. """
  525. Evaluate the given dataset.
  526. Parameters
  527. ----------
  528. dataset: torch_geometric.data.dataset.Dataset or None
  529. The dataset needed to predict. If ``None``, will use the processed dataset passed
  530. to ``fit()`` instead. Default ``None``.
  531. inplaced: bool
  532. Whether the given dataset is processed. Only be effective when ``dataset``
  533. is not ``None``. If you pass the dataset to ``fit()`` with ``inplace=True``, and
  534. you pass the dataset again to this method, you should set this argument to ``True``.
  535. Otherwise ``False``. Default ``False``.
  536. inplace: bool
  537. Whether we process the given dataset in inplace manner. Default ``False``. Set it to
  538. True if you want to save memory by modifying the given dataset directly.
  539. use_ensemble: bool
  540. Whether to use ensemble to do the predict. Default ``True``.
  541. use_best: bool
  542. Whether to use the best single model to do the predict. Will only be effective when
  543. ``use_ensemble`` is ``False``. Default ``True``.
  544. name: str or None
  545. The name of model used to predict. Will only be effective when ``use_ensemble`` and
  546. ``use_best`` both are ``False``. Default ``None``.
  547. mask: str
  548. The data split to give prediction on. Default ``test``.
  549. label: torch.Tensor (Optional)
  550. The groud truth label of the given predicted dataset split. If not passed, will extract
  551. labels from the input dataset.
  552. metric: str
  553. The metric to be used for evaluating the model. Default ``acc``.
  554. Returns
  555. -------
  556. score(s): (list of) evaluation scores
  557. the evaluation results according to the evaluator passed.
  558. """
  559. predicted = self.predict_proba(dataset, inplaced, inplace, use_ensemble, use_best, name, mask)
  560. if dataset is None:
  561. dataset = self.dataset
  562. if label is None:
  563. if mask == "all":
  564. masked_dataset = dataset
  565. else:
  566. masked_dataset = utils.graph_get_split(dataset, mask, False)
  567. label = get_dataset_labels(masked_dataset)
  568. evaluator = get_feval(metric)
  569. if isinstance(evaluator, Sequence):
  570. return [evals.evaluate(predicted, label) for evals in evaluator]
  571. return evaluator.evaluate(predicted, label)
  572. @classmethod
  573. def from_config(cls, path_or_dict, filetype="auto") -> "AutoGraphClassifier":
  574. """
  575. Load solver from config file.
  576. You can use this function to directly load a solver from predefined config dict
  577. or config file path. Currently, only support file type of ``json`` or ``yaml``,
  578. if you pass a path.
  579. Parameters
  580. ----------
  581. path_or_dict: str or dict
  582. The path to the config file or the config dictionary object
  583. filetype: str
  584. The filetype the given file if the path is specified. Currently only support
  585. ``json`` or ``yaml``. You can set to ``auto`` to automatically detect the file
  586. type (from file name). Default ``auto``.
  587. Returns
  588. -------
  589. solver: autogl.solver.AutoGraphClassifier
  590. The solver that is created from given file or dictionary.
  591. """
  592. assert filetype in ["auto", "yaml", "json"], (
  593. "currently only support yaml file or json file type, but get type "
  594. + filetype
  595. )
  596. if isinstance(path_or_dict, str):
  597. if filetype == "auto":
  598. if path_or_dict.endswith(".yaml") or path_or_dict.endswith(".yml"):
  599. filetype = "yaml"
  600. elif path_or_dict.endswith(".json"):
  601. filetype = "json"
  602. else:
  603. LOGGER.error(
  604. "cannot parse the type of the given file name, "
  605. "please manually set the file type"
  606. )
  607. raise ValueError(
  608. "cannot parse the type of the given file name, "
  609. "please manually set the file type"
  610. )
  611. if filetype == "yaml":
  612. path_or_dict = yaml.load(
  613. open(path_or_dict, "r").read(), Loader=yaml.FullLoader
  614. )
  615. else:
  616. path_or_dict = json.load(open(path_or_dict, "r"))
  617. # load the dictionary
  618. path_or_dict = deepcopy(path_or_dict)
  619. solver = cls(None, [], None, None)
  620. fe_list = path_or_dict.pop("feature", None)
  621. if fe_list is not None:
  622. fe_list_ele = []
  623. for feature_engineer in fe_list:
  624. name = feature_engineer.pop("name")
  625. if name is not None:
  626. fe_list_ele.append(FEATURE_DICT[name](**feature_engineer))
  627. if fe_list_ele != []:
  628. solver.set_feature_module(fe_list_ele)
  629. models = path_or_dict.pop("models", [{"name": "gcn"}, {"name": "gat"}, {"name": "sage"}, {"name": "gin"}])
  630. # models should be a list of model
  631. # with each element in two cases
  632. # * a dict describing a certain model
  633. # * a dict containing {"encoder": encoder, "decoder": decoder}
  634. model_hp_space = [
  635. _parse_model_hp(model) for model in models
  636. ]
  637. model_list = [
  638. _initialize_single_model(model) for model in models
  639. ]
  640. trainer = path_or_dict.pop("trainer", None)
  641. default_trainer = "GraphClassificationFull"
  642. trainer_space = None
  643. if isinstance(trainer, dict):
  644. # global default
  645. default_trainer = trainer.pop("name", "GraphClassificationFull")
  646. trainer_space = _parse_hp_space(trainer.pop("hp_space", None))
  647. default_kwargs = {"num_features": None, "num_classes": None}
  648. default_kwargs.update(trainer)
  649. default_kwargs["init"] = False
  650. for i in range(len(model_list)):
  651. model = model_list[i]
  652. trainer_wrapper = TRAINER_DICT[default_trainer](
  653. model=model, **default_kwargs
  654. )
  655. model_list[i] = trainer_wrapper
  656. elif isinstance(trainer, list):
  657. # sequential trainer definition
  658. assert len(trainer) == len(
  659. model_list
  660. ), "The number of trainer and model does not match"
  661. trainer_space = []
  662. for i in range(len(model_list)):
  663. train, model = trainer[i], model_list[i]
  664. default_trainer = train.pop("name", "GraphClassificationFull")
  665. trainer_space.append(_parse_hp_space(train.pop("hp_space", None)))
  666. default_kwargs = {"num_features": None, "num_classes": None}
  667. default_kwargs.update(train)
  668. default_kwargs["init"] = False
  669. trainer_wrap = TRAINER_DICT[default_trainer](
  670. model=model, **default_kwargs
  671. )
  672. model_list[i] = trainer_wrap
  673. solver.set_graph_models(
  674. model_list, default_trainer, trainer_space, model_hp_space
  675. )
  676. hpo_dict = path_or_dict.pop("hpo", {"name": "anneal"})
  677. if hpo_dict is not None:
  678. name = hpo_dict.pop("name")
  679. solver.set_hpo_module(name, **hpo_dict)
  680. ensemble_dict = path_or_dict.pop("ensemble", {"name": "voting"})
  681. if ensemble_dict is not None:
  682. name = ensemble_dict.pop("name")
  683. solver.set_ensemble_module(name, **ensemble_dict)
  684. return solver