You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

enas_controller.py 8.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. # Code Modified from https://github.com/carpedm20/ENAS-pytorch
  2. """A module with NAS controller-related code."""
  3. import collections
  4. import os
  5. import torch
  6. import torch.nn.functional as F
  7. import fastNLP.automl.enas_utils as utils
  8. from fastNLP.automl.enas_utils import Node
  9. def _construct_dags(prev_nodes, activations, func_names, num_blocks):
  10. """Constructs a set of DAGs based on the actions, i.e., previous nodes and
  11. activation functions, sampled from the controller/policy pi.
  12. Args:
  13. prev_nodes: Previous node actions from the policy.
  14. activations: Activations sampled from the policy.
  15. func_names: Mapping from activation function names to functions.
  16. num_blocks: Number of blocks in the target RNN cell.
  17. Returns:
  18. A list of DAGs defined by the inputs.
  19. RNN cell DAGs are represented in the following way:
  20. 1. Each element (node) in a DAG is a list of `Node`s.
  21. 2. The `Node`s in the list dag[i] correspond to the subsequent nodes
  22. that take the output from node i as their own input.
  23. 3. dag[-1] is the node that takes input from x^{(t)} and h^{(t - 1)}.
  24. dag[-1] always feeds dag[0].
  25. dag[-1] acts as if `w_xc`, `w_hc`, `w_xh` and `w_hh` are its
  26. weights.
  27. 4. dag[N - 1] is the node that produces the hidden state passed to
  28. the next timestep. dag[N - 1] is also always a leaf node, and therefore
  29. is always averaged with the other leaf nodes and fed to the output
  30. decoder.
  31. """
  32. dags = []
  33. for nodes, func_ids in zip(prev_nodes, activations):
  34. dag = collections.defaultdict(list)
  35. # add first node
  36. dag[-1] = [Node(0, func_names[func_ids[0]])]
  37. dag[-2] = [Node(0, func_names[func_ids[0]])]
  38. # add following nodes
  39. for jdx, (idx, func_id) in enumerate(zip(nodes, func_ids[1:])):
  40. dag[utils.to_item(idx)].append(Node(jdx + 1, func_names[func_id]))
  41. leaf_nodes = set(range(num_blocks)) - dag.keys()
  42. # merge with avg
  43. for idx in leaf_nodes:
  44. dag[idx] = [Node(num_blocks, 'avg')]
  45. # This is actually y^{(t)}. h^{(t)} is node N - 1 in
  46. # the graph, where N Is the number of nodes. I.e., h^{(t)} takes
  47. # only one other node as its input.
  48. # last h[t] node
  49. last_node = Node(num_blocks + 1, 'h[t]')
  50. dag[num_blocks] = [last_node]
  51. dags.append(dag)
  52. return dags
  53. class Controller(torch.nn.Module):
  54. """Based on
  55. https://github.com/pytorch/examples/blob/master/word_language_model/model.py
  56. RL controllers do not necessarily have much to do with
  57. language models.
  58. Base the controller RNN on the GRU from:
  59. https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
  60. """
  61. def __init__(self, num_blocks=4, controller_hid=100, cuda=False):
  62. torch.nn.Module.__init__(self)
  63. # `num_tokens` here is just the activation function
  64. # for every even step,
  65. self.shared_rnn_activations = ['tanh', 'ReLU', 'identity', 'sigmoid']
  66. self.num_tokens = [len(self.shared_rnn_activations)]
  67. self.controller_hid = controller_hid
  68. self.use_cuda = cuda
  69. self.num_blocks = num_blocks
  70. for idx in range(num_blocks):
  71. self.num_tokens += [idx + 1, len(self.shared_rnn_activations)]
  72. self.func_names = self.shared_rnn_activations
  73. num_total_tokens = sum(self.num_tokens)
  74. self.encoder = torch.nn.Embedding(num_total_tokens,
  75. controller_hid)
  76. self.lstm = torch.nn.LSTMCell(controller_hid, controller_hid)
  77. # Perhaps these weights in the decoder should be
  78. # shared? At least for the activation functions, which all have the
  79. # same size.
  80. self.decoders = []
  81. for idx, size in enumerate(self.num_tokens):
  82. decoder = torch.nn.Linear(controller_hid, size)
  83. self.decoders.append(decoder)
  84. self._decoders = torch.nn.ModuleList(self.decoders)
  85. self.reset_parameters()
  86. self.static_init_hidden = utils.keydefaultdict(self.init_hidden)
  87. def _get_default_hidden(key):
  88. return utils.get_variable(
  89. torch.zeros(key, self.controller_hid),
  90. self.use_cuda,
  91. requires_grad=False)
  92. self.static_inputs = utils.keydefaultdict(_get_default_hidden)
  93. def reset_parameters(self):
  94. init_range = 0.1
  95. for param in self.parameters():
  96. param.data.uniform_(-init_range, init_range)
  97. for decoder in self.decoders:
  98. decoder.bias.data.fill_(0)
  99. def forward(self, # pylint:disable=arguments-differ
  100. inputs,
  101. hidden,
  102. block_idx,
  103. is_embed):
  104. if not is_embed:
  105. embed = self.encoder(inputs)
  106. else:
  107. embed = inputs
  108. hx, cx = self.lstm(embed, hidden)
  109. logits = self.decoders[block_idx](hx)
  110. logits /= 5.0
  111. # # exploration
  112. # if self.args.mode == 'train':
  113. # logits = (2.5 * F.tanh(logits))
  114. return logits, (hx, cx)
  115. def sample(self, batch_size=1, with_details=False, save_dir=None):
  116. """Samples a set of `args.num_blocks` many computational nodes from the
  117. controller, where each node is made up of an activation function, and
  118. each node except the last also includes a previous node.
  119. """
  120. if batch_size < 1:
  121. raise Exception(f'Wrong batch_size: {batch_size} < 1')
  122. # [B, L, H]
  123. inputs = self.static_inputs[batch_size]
  124. hidden = self.static_init_hidden[batch_size]
  125. activations = []
  126. entropies = []
  127. log_probs = []
  128. prev_nodes = []
  129. # The RNN controller alternately outputs an activation,
  130. # followed by a previous node, for each block except the last one,
  131. # which only gets an activation function. The last node is the output
  132. # node, and its previous node is the average of all leaf nodes.
  133. for block_idx in range(2*(self.num_blocks - 1) + 1):
  134. logits, hidden = self.forward(inputs,
  135. hidden,
  136. block_idx,
  137. is_embed=(block_idx == 0))
  138. probs = F.softmax(logits, dim=-1)
  139. log_prob = F.log_softmax(logits, dim=-1)
  140. # .mean() for entropy?
  141. entropy = -(log_prob * probs).sum(1, keepdim=False)
  142. action = probs.multinomial(num_samples=1).data
  143. selected_log_prob = log_prob.gather(
  144. 1, utils.get_variable(action, requires_grad=False))
  145. # why the [:, 0] here? Should it be .squeeze(), or
  146. # .view()? Same below with `action`.
  147. entropies.append(entropy)
  148. log_probs.append(selected_log_prob[:, 0])
  149. # 0: function, 1: previous node
  150. mode = block_idx % 2
  151. inputs = utils.get_variable(
  152. action[:, 0] + sum(self.num_tokens[:mode]),
  153. requires_grad=False)
  154. if mode == 0:
  155. activations.append(action[:, 0])
  156. elif mode == 1:
  157. prev_nodes.append(action[:, 0])
  158. prev_nodes = torch.stack(prev_nodes).transpose(0, 1)
  159. activations = torch.stack(activations).transpose(0, 1)
  160. dags = _construct_dags(prev_nodes,
  161. activations,
  162. self.func_names,
  163. self.num_blocks)
  164. if save_dir is not None:
  165. for idx, dag in enumerate(dags):
  166. utils.draw_network(dag,
  167. os.path.join(save_dir, f'graph{idx}.png'))
  168. if with_details:
  169. return dags, torch.cat(log_probs), torch.cat(entropies)
  170. return dags
  171. def init_hidden(self, batch_size):
  172. zeros = torch.zeros(batch_size, self.controller_hid)
  173. return (utils.get_variable(zeros, self.use_cuda, requires_grad=False),
  174. utils.get_variable(zeros.clone(), self.use_cuda, requires_grad=False))