You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

node_classifier.py 34 kB

5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. """
  2. Auto Classfier for Node Classification
  3. """
  4. import time
  5. import json
  6. from copy import deepcopy
  7. import torch
  8. import numpy as np
  9. import yaml
  10. from .base import BaseClassifier
  11. from ..base import _parse_hp_space, _initialize_single_model
  12. from ...module.feature import FEATURE_DICT
  13. from ...module.model import MODEL_DICT, BaseModel
  14. from ...module.train import TRAINER_DICT, BaseNodeClassificationTrainer
  15. from ...module.train import get_feval
  16. from ...module.nas.space import NAS_SPACE_DICT
  17. from ...module.nas.algorithm import NAS_ALGO_DICT
  18. from ...module.nas.estimator import NAS_ESTIMATOR_DICT, BaseEstimator
  19. from ..utils import LeaderBoard, get_graph_from_dataset, get_graph_labels, get_graph_masks, get_graph_node_features, get_graph_node_number, set_seed, convert_dataset
  20. from ...datasets import utils
  21. from ...utils import get_logger
  22. LOGGER = get_logger("NodeClassifier")
  23. class AutoNodeClassifier(BaseClassifier):
  24. """
  25. Auto Multi-class Graph Node Classifier.
  26. Used to automatically solve the node classification problems.
  27. Parameters
  28. ----------
  29. feature_module: autogl.module.feature.BaseFeatureEngineer or str or None
  30. The (name of) auto feature engineer used to process the given dataset. Default ``deepgl``.
  31. Disable feature engineer by setting it to ``None``.
  32. graph_models: list of autogl.module.model.BaseModel or list of str
  33. The (name of) models to be optimized as backbone. Default ``['gat', 'gcn']``.
  34. nas_algorithms: (list of) autogl.module.nas.algorithm.BaseNAS or str (Optional)
  35. The (name of) nas algorithms used. Default ``None``.
  36. nas_spaces: (list of) autogl.module.nas.space.BaseSpace or str (Optional)
  37. The (name of) nas spaces used. Default ``None``.
  38. nas_estimators: (list of) autogl.module.nas.estimator.BaseEstimator or str (Optional)
  39. The (name of) nas estimators used. Default ``None``.
  40. hpo_module: autogl.module.hpo.BaseHPOptimizer or str or None
  41. The (name of) hpo module used to search for best hyper parameters. Default ``anneal``.
  42. Disable hpo by setting it to ``None``.
  43. ensemble_module: autogl.module.ensemble.BaseEnsembler or str or None
  44. The (name of) ensemble module used to ensemble the multi-models found. Default ``voting``.
  45. Disable ensemble by setting it to ``None``.
  46. max_evals: int (Optional)
  47. If given, will set the number eval times the hpo module will use.
  48. Only be effective when hpo_module is ``str``. Default ``None``.
  49. trainer_hp_space: list of dict (Optional)
  50. trainer hp space or list of trainer hp spaces configuration.
  51. If a single trainer hp is given, will specify the hp space of trainer for every model.
  52. If a list of trainer hp is given, will specify every model with corrsponding
  53. trainer hp space.
  54. Default ``None``.
  55. model_hp_spaces: list of list of dict (Optional)
  56. model hp space configuration.
  57. If given, will specify every hp space of every passed model. Default ``None``.
  58. size: int (Optional)
  59. The max models ensemble module will use. Default ``None``.
  60. device: torch.device or str
  61. The device where model will be running on. If set to ``auto``, will use gpu when available.
  62. You can also specify the device by directly giving ``gpu`` or ``cuda:0``, etc.
  63. Default ``auto``.
  64. """
  65. def __init__(
  66. self,
  67. feature_module=None,
  68. graph_models=("gat", "gcn"),
  69. nas_algorithms=None,
  70. nas_spaces=None,
  71. nas_estimators=None,
  72. hpo_module="anneal",
  73. ensemble_module="voting",
  74. max_evals=50,
  75. default_trainer=None,
  76. trainer_hp_space=None,
  77. model_hp_spaces=None,
  78. size=4,
  79. device="auto",
  80. ):
  81. super().__init__(
  82. feature_module=feature_module,
  83. graph_models=graph_models,
  84. nas_algorithms=nas_algorithms,
  85. nas_spaces=nas_spaces,
  86. nas_estimators=nas_estimators,
  87. hpo_module=hpo_module,
  88. ensemble_module=ensemble_module,
  89. max_evals=max_evals,
  90. default_trainer=default_trainer or "NodeClassificationFull",
  91. trainer_hp_space=trainer_hp_space,
  92. model_hp_spaces=model_hp_spaces,
  93. size=size,
  94. device=device,
  95. )
  96. # data to be kept when fit
  97. self.dataset = None
  98. def _init_graph_module(
  99. self, graph_models, num_classes, num_features, feval, device, loss
  100. ) -> "AutoNodeClassifier":
  101. # load graph network module
  102. self.graph_model_list = []
  103. if isinstance(graph_models, (list, tuple)):
  104. for model in graph_models:
  105. if isinstance(model, str):
  106. if model in MODEL_DICT:
  107. self.graph_model_list.append(
  108. MODEL_DICT[model](
  109. num_classes=num_classes,
  110. num_features=num_features,
  111. device=device,
  112. init=False,
  113. )
  114. )
  115. else:
  116. raise KeyError("cannot find model %s" % (model))
  117. elif isinstance(model, type) and issubclass(model, BaseModel):
  118. self.graph_model_list.append(
  119. model(
  120. num_classes=num_classes,
  121. num_features=num_features,
  122. device=device,
  123. init=False,
  124. )
  125. )
  126. elif isinstance(model, BaseModel):
  127. # setup the hp of num_classes and num_features
  128. model.set_num_classes(num_classes)
  129. model.set_num_features(num_features)
  130. self.graph_model_list.append(model.to(device))
  131. elif isinstance(model, BaseNodeClassificationTrainer):
  132. # receive a trainer list, put trainer to list
  133. assert (
  134. model.get_model() is not None
  135. ), "Passed trainer should contain a model"
  136. model.model.set_num_classes(num_classes)
  137. model.model.set_num_features(num_features)
  138. model.update_parameters(
  139. num_classes=num_classes,
  140. num_features=num_features,
  141. loss=loss,
  142. feval=feval,
  143. device=device,
  144. )
  145. self.graph_model_list.append(model)
  146. else:
  147. raise KeyError("cannot find graph network %s." % (model))
  148. else:
  149. raise ValueError(
  150. "need graph network to be (list of) str or a BaseModel class/instance, get",
  151. graph_models,
  152. "instead.",
  153. )
  154. # wrap all model_cls with specified trainer
  155. for i, model in enumerate(self.graph_model_list):
  156. # set model hp space
  157. if self._model_hp_spaces is not None:
  158. if self._model_hp_spaces[i] is not None:
  159. if isinstance(model, BaseNodeClassificationTrainer):
  160. model.model.hyper_parameter_space = self._model_hp_spaces[i]
  161. else:
  162. model.hyper_parameter_space = self._model_hp_spaces[i]
  163. # initialize trainer if needed
  164. if isinstance(model, BaseModel):
  165. name = (
  166. self._default_trainer
  167. if isinstance(self._default_trainer, str)
  168. else self._default_trainer[i]
  169. )
  170. model = TRAINER_DICT[name](
  171. model=model,
  172. num_features=num_features,
  173. num_classes=num_classes,
  174. loss=loss,
  175. feval=feval,
  176. device=device,
  177. init=False,
  178. )
  179. # set trainer hp space
  180. if self._trainer_hp_space is not None:
  181. if isinstance(self._trainer_hp_space[0], list):
  182. current_hp_for_trainer = self._trainer_hp_space[i]
  183. else:
  184. current_hp_for_trainer = self._trainer_hp_space
  185. model.hyper_parameter_space = current_hp_for_trainer
  186. self.graph_model_list[i] = model
  187. return self
  188. def _init_nas_module(self, num_features, num_classes, feval, device, loss):
  189. for algo, space, estimator in zip(
  190. self.nas_algorithms, self.nas_spaces, self.nas_estimators
  191. ):
  192. estimator: BaseEstimator
  193. algo.to(device)
  194. space.instantiate(input_dim=num_features, output_dim=num_classes)
  195. estimator.setEvaluation(feval)
  196. estimator.setLossFunction(loss)
  197. # pylint: disable=arguments-differ
  198. def fit(
  199. self,
  200. dataset,
  201. time_limit=-1,
  202. inplace=False,
  203. train_split=None,
  204. val_split=None,
  205. balanced=True,
  206. evaluation_method="infer",
  207. seed=None,
  208. ) -> "AutoNodeClassifier":
  209. """
  210. Fit current solver on given dataset.
  211. Parameters
  212. ----------
  213. dataset: autogl.data.Dataset
  214. The dataset needed to fit on. This dataset must have only one graph.
  215. time_limit: int
  216. The time limit of the whole fit process (in seconds). If set below 0,
  217. will ignore time limit. Default ``-1``.
  218. inplace: bool
  219. Whether we process the given dataset in inplace manner. Default ``False``.
  220. Set it to True if you want to save memory by modifying the given dataset directly.
  221. train_split: float or int (Optional)
  222. The train ratio (in ``float``) or number (in ``int``) of dataset. If you want to
  223. use default train/val/test split in dataset, please set this to ``None``.
  224. Default ``None``.
  225. val_split: float or int (Optional)
  226. The validation ratio (in ``float``) or number (in ``int``) of dataset. If you want
  227. to use default train/val/test split in dataset, please set this to ``None``.
  228. Default ``None``.
  229. balanced: bool
  230. Wether to create the train/valid/test split in a balanced way.
  231. If set to ``True``, the train/valid will have the same number of different classes.
  232. Default ``True``.
  233. evaluation_method: (list of) str or autogl.module.train.evaluation
  234. A (list of) evaluation method for current solver. If ``infer``, will automatically
  235. determine. Default ``infer``.
  236. seed: int (Optional)
  237. The random seed. If set to ``None``, will run everything at random.
  238. Default ``None``.
  239. Returns
  240. -------
  241. self: autogl.solver.AutoNodeClassifier
  242. A reference of current solver.
  243. """
  244. set_seed(seed)
  245. if time_limit < 0:
  246. time_limit = 3600 * 24
  247. time_begin = time.time()
  248. graph_data = get_graph_from_dataset(dataset, 0)
  249. all_labels = get_graph_labels(graph_data)
  250. num_classes = all_labels.max().item() + 1
  251. # initialize leaderboard
  252. if evaluation_method == "infer":
  253. if hasattr(dataset, "metric"):
  254. evaluation_method = [dataset.metric]
  255. else:
  256. num_of_label = num_classes
  257. if num_of_label == 2:
  258. evaluation_method = ["auc"]
  259. else:
  260. evaluation_method = ["acc"]
  261. assert isinstance(evaluation_method, list)
  262. evaluator_list = get_feval(evaluation_method)
  263. self.leaderboard = LeaderBoard(
  264. [e.get_eval_name() for e in evaluator_list],
  265. {e.get_eval_name(): e.is_higher_better() for e in evaluator_list},
  266. )
  267. # set up the dataset
  268. if train_split is not None and val_split is not None:
  269. size = get_graph_node_number(graph_data)
  270. if balanced:
  271. train_split = (
  272. train_split if train_split > 1 else int(train_split * size)
  273. )
  274. val_split = val_split if val_split > 1 else int(val_split * size)
  275. utils.random_splits_mask_class(
  276. dataset,
  277. num_train_per_class=train_split // num_classes,
  278. num_val_per_class=val_split // num_classes,
  279. seed=seed,
  280. )
  281. else:
  282. train_split = train_split if train_split < 1 else train_split / size
  283. val_split = val_split if val_split < 1 else val_split / size
  284. utils.random_splits_mask(
  285. dataset, train_ratio=train_split, val_ratio=val_split
  286. )
  287. else:
  288. assert get_graph_masks(graph_data, 'train') is not None and get_graph_masks(graph_data, 'val') is not None, (
  289. "The dataset has no default train/val split! Please manually pass "
  290. "train and val ratio."
  291. )
  292. LOGGER.info("Use the default train/val/test ratio in given dataset")
  293. # feature engineering
  294. if self.feature_module is not None:
  295. dataset = self.feature_module.fit_transform(dataset, inplace=inplace)
  296. self.dataset = dataset
  297. # check whether the dataset has features.
  298. # currently we only support graph classification with features.
  299. feat = get_graph_node_features(graph_data)
  300. assert feat is not None, (
  301. "Does not support fit on non node-feature dataset!"
  302. " Please add node features to dataset or specify feature engineers that generate"
  303. " node features."
  304. )
  305. num_features = feat.size(-1)
  306. # initialize graph networks
  307. self._init_graph_module(
  308. self.gml,
  309. num_features=num_features,
  310. num_classes=num_classes,
  311. feval=evaluator_list,
  312. device=self.runtime_device,
  313. loss="nll_loss" if not hasattr(dataset, "loss") else self.dataset.loss,
  314. )
  315. if self.nas_algorithms is not None:
  316. # perform neural architecture search
  317. self._init_nas_module(
  318. num_features=num_features,
  319. num_classes=num_classes,
  320. feval=evaluator_list,
  321. device=self.runtime_device,
  322. loss="nll_loss" if not hasattr(dataset, "loss") else dataset.loss,
  323. )
  324. assert not isinstance(self._default_trainer, list) or len(
  325. self.nas_algorithms
  326. ) == len(self._default_trainer) - len(
  327. self.graph_model_list
  328. ), "length of default trainer should match total graph models and nas models passed"
  329. # perform nas and add them to model list
  330. idx_trainer = len(self.graph_model_list)
  331. for algo, space, estimator in zip(
  332. self.nas_algorithms, self.nas_spaces, self.nas_estimators
  333. ):
  334. model = algo.search(space, convert_dataset(self.dataset), estimator)
  335. # insert model into default trainer
  336. if isinstance(self._default_trainer, list):
  337. train_name = self._default_trainer[idx_trainer]
  338. idx_trainer += 1
  339. else:
  340. train_name = self._default_trainer
  341. if isinstance(train_name, str):
  342. trainer = TRAINER_DICT[train_name](
  343. model=model,
  344. num_features=num_features,
  345. num_classes=num_classes,
  346. loss="nll_loss"
  347. if not hasattr(dataset, "loss")
  348. else dataset.loss,
  349. feval=evaluator_list,
  350. device=self.runtime_device,
  351. init=False,
  352. )
  353. else:
  354. trainer = train_name
  355. trainer.model = model
  356. trainer.update_parameters(
  357. num_features=num_features,
  358. num_classes=num_classes,
  359. loss="nll_loss"
  360. if not hasattr(dataset, "loss")
  361. else dataset.loss,
  362. feval=evaluator_list,
  363. device=self.runtime_device,
  364. )
  365. self.graph_model_list.append(trainer)
  366. # train the models and tune hpo
  367. result_valid = []
  368. names = []
  369. for idx, model in enumerate(self.graph_model_list):
  370. time_for_each_model = (time_limit - time.time() + time_begin) / (
  371. len(self.graph_model_list) - idx
  372. )
  373. if self.hpo_module is None:
  374. model.initialize()
  375. model.train(convert_dataset(self.dataset), True)
  376. optimized = model
  377. else:
  378. optimized, _ = self.hpo_module.optimize(
  379. trainer=model, dataset=convert_dataset(self.dataset), time_limit=time_for_each_model
  380. )
  381. # to save memory, all the trainer derived will be mapped to cpu
  382. optimized.to(torch.device("cpu"))
  383. name = str(optimized) + "_idx%d" % (idx)
  384. names.append(name)
  385. performance_on_valid, _ = optimized.get_valid_score(return_major=False)
  386. result_valid.append(optimized.get_valid_predict_proba().cpu().numpy())
  387. self.leaderboard.insert_model_performance(
  388. name,
  389. dict(
  390. zip(
  391. [e.get_eval_name() for e in evaluator_list],
  392. performance_on_valid,
  393. )
  394. ),
  395. )
  396. self.trained_models[name] = optimized
  397. # fit the ensemble model
  398. if self.ensemble_module is not None:
  399. performance = self.ensemble_module.fit(
  400. result_valid,
  401. all_labels[get_graph_masks(graph_data, 'val')].cpu().numpy(),
  402. names,
  403. evaluator_list,
  404. n_classes=num_classes,
  405. )
  406. self.leaderboard.insert_model_performance(
  407. "ensemble",
  408. dict(zip([e.get_eval_name() for e in evaluator_list], performance)),
  409. )
  410. return self
  411. def fit_predict(
  412. self,
  413. dataset,
  414. time_limit=-1,
  415. inplace=False,
  416. train_split=None,
  417. val_split=None,
  418. balanced=True,
  419. evaluation_method="infer",
  420. use_ensemble=True,
  421. use_best=True,
  422. name=None,
  423. ) -> np.ndarray:
  424. """
  425. Fit current solver on given dataset and return the predicted value.
  426. Parameters
  427. ----------
  428. dataset: torch_geometric.data.dataset.Dataset
  429. The dataset needed to fit on. This dataset must have only one graph.
  430. time_limit: int
  431. The time limit of the whole fit process (in seconds).
  432. If set below 0, will ignore time limit. Default ``-1``.
  433. inplace: bool
  434. Whether we process the given dataset in inplace manner. Default ``False``.
  435. Set it to True if you want to save memory by modifying the given dataset directly.
  436. train_split: float or int (Optional)
  437. The train ratio (in ``float``) or number (in ``int``) of dataset. If you want to
  438. use default train/val/test split in dataset, please set this to ``None``.
  439. Default ``None``.
  440. val_split: float or int (Optional)
  441. The validation ratio (in ``float``) or number (in ``int``) of dataset. If you want
  442. to use default train/val/test split in dataset, please set this to ``None``.
  443. Default ``None``.
  444. balanced: bool
  445. Wether to create the train/valid/test split in a balanced way.
  446. If set to ``True``, the train/valid will have the same number of different classes.
  447. Default ``False``.
  448. evaluation_method: (list of) str or autogl.module.train.evaluation
  449. A (list of) evaluation method for current solver. If ``infer``, will automatically
  450. determine. Default ``infer``.
  451. use_ensemble: bool
  452. Whether to use ensemble to do the predict. Default ``True``.
  453. use_best: bool
  454. Whether to use the best single model to do the predict. Will only be effective when
  455. ``use_ensemble`` is ``False``.
  456. Default ``True``.
  457. name: str or None
  458. The name of model used to predict. Will only be effective when ``use_ensemble`` and
  459. ``use_best`` both are ``False``.
  460. Default ``None``.
  461. Returns
  462. -------
  463. result: np.ndarray
  464. An array of shape ``(N,)``, where ``N`` is the number of test nodes. The prediction
  465. on given dataset.
  466. """
  467. self.fit(
  468. dataset=dataset,
  469. time_limit=time_limit,
  470. inplace=inplace,
  471. train_split=train_split,
  472. val_split=val_split,
  473. balanced=balanced,
  474. evaluation_method=evaluation_method,
  475. )
  476. return self.predict(
  477. dataset=dataset,
  478. inplaced=inplace,
  479. inplace=inplace,
  480. use_ensemble=use_ensemble,
  481. use_best=use_best,
  482. name=name,
  483. )
  484. def predict_proba(
  485. self,
  486. dataset=None,
  487. inplaced=False,
  488. inplace=False,
  489. use_ensemble=True,
  490. use_best=True,
  491. name=None,
  492. mask="test",
  493. ) -> np.ndarray:
  494. """
  495. Predict the node probability.
  496. Parameters
  497. ----------
  498. dataset: torch_geometric.data.dataset.Dataset or None
  499. The dataset needed to predict. If ``None``, will use the processed dataset passed
  500. to ``fit()`` instead. Default ``None``.
  501. inplaced: bool
  502. Whether the given dataset is processed. Only be effective when ``dataset``
  503. is not ``None``. If you pass the dataset to ``fit()`` with ``inplace=True``, and
  504. you pass the dataset again to this method, you should set this argument to ``True``.
  505. Otherwise ``False``. Default ``False``.
  506. inplace: bool
  507. Whether we process the given dataset in inplace manner. Default ``False``. Set it to
  508. True if you want to save memory by modifying the given dataset directly.
  509. use_ensemble: bool
  510. Whether to use ensemble to do the predict. Default ``True``.
  511. use_best: bool
  512. Whether to use the best single model to do the predict. Will only be effective when
  513. ``use_ensemble`` is ``False``. Default ``True``.
  514. name: str or None
  515. The name of model used to predict. Will only be effective when ``use_ensemble`` and
  516. ``use_best`` both are ``False``. Default ``None``.
  517. mask: str
  518. The data split to give prediction on. Default ``test``.
  519. Returns
  520. -------
  521. result: np.ndarray
  522. An array of shape ``(N,C,)``, where ``N`` is the number of test nodes and ``C`` is
  523. the number of classes. The prediction on given dataset.
  524. """
  525. if dataset is None:
  526. dataset = self.dataset
  527. assert dataset is not None, (
  528. "Please execute fit() first before" " predicting on remembered dataset"
  529. )
  530. elif not inplaced and self.feature_module is not None:
  531. dataset = self.feature_module.transform(dataset, inplace=inplace)
  532. if use_ensemble:
  533. LOGGER.info("Ensemble argument on, will try using ensemble model.")
  534. if not use_ensemble and use_best:
  535. LOGGER.info(
  536. "Ensemble argument off and best argument on, will try using best model."
  537. )
  538. if (use_ensemble and self.ensemble_module is not None) or (
  539. not use_best and name == "ensemble"
  540. ):
  541. # we need to get all the prediction of every model trained
  542. predict_result = []
  543. names = []
  544. for model_name in self.trained_models:
  545. predict_result.append(
  546. self._predict_proba_by_name(dataset, model_name, mask)
  547. )
  548. names.append(model_name)
  549. return self.ensemble_module.ensemble(predict_result, names)
  550. if use_ensemble and self.ensemble_module is None:
  551. LOGGER.warning(
  552. "Cannot use ensemble because no ensebmle module is given."
  553. " Will use best model instead."
  554. )
  555. if use_best or (use_ensemble and self.ensemble_module is None):
  556. # just return the best model we have found
  557. name = self.leaderboard.get_best_model()
  558. return self._predict_proba_by_name(dataset, name, mask)
  559. if name is not None:
  560. # return model performance by name
  561. return self._predict_proba_by_name(dataset, name, mask)
  562. LOGGER.error(
  563. "No model name is given while ensemble and best arguments are off."
  564. )
  565. raise ValueError(
  566. "You need to specify a model name if you do not want use ensemble and best model."
  567. )
  568. def _predict_proba_by_name(self, dataset, name, mask="test"):
  569. self.trained_models[name].to(self.runtime_device)
  570. predicted = (
  571. self.trained_models[name].predict_proba(convert_dataset(dataset), mask=mask).cpu().numpy()
  572. )
  573. self.trained_models[name].to(torch.device("cpu"))
  574. return predicted
  575. def predict(
  576. self,
  577. dataset=None,
  578. inplaced=False,
  579. inplace=False,
  580. use_ensemble=True,
  581. use_best=True,
  582. name=None,
  583. mask="test",
  584. ) -> np.ndarray:
  585. """
  586. Predict the node class number.
  587. Parameters
  588. ----------
  589. dataset: torch_geometric.data.dataset.Dataset or None
  590. The dataset needed to predict. If ``None``, will use the processed dataset passed
  591. to ``fit()`` instead. Default ``None``.
  592. inplaced: bool
  593. Whether the given dataset is processed. Only be effective when ``dataset``
  594. is not ``None``. If you pass the dataset to ``fit()`` with ``inplace=True``,
  595. and you pass the dataset again to this method, you should set this argument
  596. to ``True``. Otherwise ``False``. Default ``False``.
  597. inplace: bool
  598. Whether we process the given dataset in inplace manner. Default ``False``.
  599. Set it to True if you want to save memory by modifying the given dataset directly.
  600. use_ensemble: bool
  601. Whether to use ensemble to do the predict. Default ``True``.
  602. use_best: bool
  603. Whether to use the best single model to do the predict. Will only be effective
  604. when ``use_ensemble`` is ``False``. Default ``True``.
  605. name: str or None
  606. The name of model used to predict. Will only be effective when ``use_ensemble``
  607. and ``use_best`` both are ``False``. Default ``None``.
  608. mask: str
  609. The data split to give prediction on. Default ``test``.
  610. Returns
  611. -------
  612. result: np.ndarray
  613. An array of shape ``(N,)``, where ``N`` is the number of test nodes.
  614. The prediction on given dataset.
  615. """
  616. proba = self.predict_proba(
  617. dataset, inplaced, inplace, use_ensemble, use_best, name, mask
  618. )
  619. return np.argmax(proba, axis=1)
  620. @classmethod
  621. def from_config(cls, path_or_dict, filetype="auto") -> "AutoNodeClassifier":
  622. """
  623. Load solver from config file.
  624. You can use this function to directly load a solver from predefined config dict
  625. or config file path. Currently, only support file type of ``json`` or ``yaml``,
  626. if you pass a path.
  627. Parameters
  628. ----------
  629. path_or_dict: str or dict
  630. The path to the config file or the config dictionary object
  631. filetype: str
  632. The filetype the given file if the path is specified. Currently only support
  633. ``json`` or ``yaml``. You can set to ``auto`` to automatically detect the file
  634. type (from file name). Default ``auto``.
  635. Returns
  636. -------
  637. solver: autogl.solver.AutoGraphClassifier
  638. The solver that is created from given file or dictionary.
  639. """
  640. assert filetype in ["auto", "yaml", "json"], (
  641. "currently only support yaml file or json file type, but get type "
  642. + filetype
  643. )
  644. if isinstance(path_or_dict, str):
  645. if filetype == "auto":
  646. if path_or_dict.endswith(".yaml") or path_or_dict.endswith(".yml"):
  647. filetype = "yaml"
  648. elif path_or_dict.endswith(".json"):
  649. filetype = "json"
  650. else:
  651. LOGGER.error(
  652. "cannot parse the type of the given file name, "
  653. "please manually set the file type"
  654. )
  655. raise ValueError(
  656. "cannot parse the type of the given file name, "
  657. "please manually set the file type"
  658. )
  659. if filetype == "yaml":
  660. path_or_dict = yaml.load(
  661. open(path_or_dict, "r").read(), Loader=yaml.FullLoader
  662. )
  663. else:
  664. path_or_dict = json.load(open(path_or_dict, "r"))
  665. path_or_dict = deepcopy(path_or_dict)
  666. solver = cls(None, [], None, None)
  667. fe_list = path_or_dict.pop("feature", None)
  668. if fe_list is not None:
  669. fe_list_ele = []
  670. for feature_engineer in fe_list:
  671. name = feature_engineer.pop("name")
  672. if name is not None:
  673. fe_list_ele.append(FEATURE_DICT[name](**feature_engineer))
  674. if fe_list_ele != []:
  675. solver.set_feature_module(fe_list_ele)
  676. models = path_or_dict.pop("models", [{"name": "gcn"}, {"name": "gat"}])
  677. model_hp_space = [
  678. _parse_hp_space(model.pop("hp_space", None)) for model in models
  679. ]
  680. model_list = [
  681. _initialize_single_model(model.pop("name"), model) for model in models
  682. ]
  683. trainer = path_or_dict.pop("trainer", None)
  684. default_trainer = "NodeClassificationFull"
  685. trainer_space = None
  686. if isinstance(trainer, dict):
  687. # global default
  688. default_trainer = trainer.pop("name", "NodeClassificationFull")
  689. trainer_space = _parse_hp_space(trainer.pop("hp_space", None))
  690. default_kwargs = {"num_features": None, "num_classes": None}
  691. default_kwargs.update(trainer)
  692. default_kwargs["init"] = False
  693. for i in range(len(model_list)):
  694. model = model_list[i]
  695. trainer_wrap = TRAINER_DICT[default_trainer](
  696. model=model, **default_kwargs
  697. )
  698. model_list[i] = trainer_wrap
  699. elif isinstance(trainer, list):
  700. # sequential trainer definition
  701. assert len(trainer) == len(
  702. model_list
  703. ), "The number of trainer and model does not match"
  704. trainer_space = []
  705. for i in range(len(model_list)):
  706. train, model = trainer[i], model_list[i]
  707. default_trainer = train.pop("name", "NodeClassificationFull")
  708. trainer_space.append(_parse_hp_space(train.pop("hp_space", None)))
  709. default_kwargs = {"num_features": None, "num_classes": None}
  710. default_kwargs.update(train)
  711. default_kwargs["init"] = False
  712. trainer_wrap = TRAINER_DICT[default_trainer](
  713. model=model, **default_kwargs
  714. )
  715. model_list[i] = trainer_wrap
  716. solver.set_graph_models(
  717. model_list, default_trainer, trainer_space, model_hp_space
  718. )
  719. hpo_dict = path_or_dict.pop("hpo", {"name": "anneal"})
  720. if hpo_dict is not None:
  721. name = hpo_dict.pop("name")
  722. solver.set_hpo_module(name, **hpo_dict)
  723. ensemble_dict = path_or_dict.pop("ensemble", {"name": "voting"})
  724. if ensemble_dict is not None:
  725. name = ensemble_dict.pop("name")
  726. solver.set_ensemble_module(name, **ensemble_dict)
  727. nas_dict = path_or_dict.pop("nas", None)
  728. if nas_dict is not None:
  729. keys: set = set(nas_dict.keys())
  730. needed = {"space", "algorithm", "estimator"}
  731. if keys != needed:
  732. LOGGER.error("Key mismatch, we need %s, you give %s", needed, keys)
  733. raise KeyError("Key mismatch, we need %s, you give %s" % (needed, keys))
  734. spaces, algorithms, estimators = [], [], []
  735. for container, indexer, k in zip(
  736. [spaces, algorithms, estimators],
  737. [NAS_SPACE_DICT, NAS_ALGO_DICT, NAS_ESTIMATOR_DICT],
  738. ["space", "algorithm", "estimator"],
  739. ):
  740. configs = nas_dict[k]
  741. if isinstance(configs, list):
  742. for item in configs:
  743. container.append(indexer[item.pop("name")](**item))
  744. else:
  745. container.append(indexer[configs.pop("name")](**configs))
  746. solver.set_nas_module(algorithms, spaces, estimators)
  747. return solver