You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model.py 2.6 kB

7 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970
  1. import numpy
  2. import torch
  3. import torch.nn as nn
  4. from torch.autograd import Variable
  5. import torch.nn.functional as F
  6. class HAN(nn.Module):
  7. def __init__(self, input_size, output_size,
  8. word_hidden_size, word_num_layers, word_context_size,
  9. sent_hidden_size, sent_num_layers, sent_context_size):
  10. super(HAN, self).__init__()
  11. self.word_layer = AttentionNet(input_size,
  12. word_hidden_size,
  13. word_num_layers,
  14. word_context_size)
  15. self.sent_layer = AttentionNet(2* word_hidden_size,
  16. sent_hidden_size,
  17. sent_num_layers,
  18. sent_context_size)
  19. self.output_layer = nn.Linear(2* sent_hidden_size, output_size)
  20. self.softmax = nn.Softmax()
  21. def forward(self, x, level='w'):
  22. # input is a sequence of vector
  23. # if level == w, a seq of words (a sent); level == s, a seq of sents (a doc)
  24. if level == 's':
  25. v = self.sent_layer(x)
  26. output = self.softmax(self.output_layer(v))
  27. return output
  28. elif level == 'w':
  29. s = self.word_layer(x)
  30. return s
  31. else:
  32. print('unknow level in Parameter!')
  33. class AttentionNet(nn.Module):
  34. def __init__(self, input_size, gru_hidden_size, gru_num_layers, context_vec_size):
  35. super(AttentionNet, self).__init__()
  36. self.input_size = input_size
  37. self.gru_hidden_size = gru_hidden_size
  38. self.gru_num_layers = gru_num_layers
  39. self.context_vec_size = context_vec_size
  40. # Encoder
  41. self.gru = nn.GRU(input_size=input_size,
  42. hidden_size=gru_hidden_size,
  43. num_layers=gru_num_layers,
  44. batch_first=False,
  45. bidirectional=True)
  46. # Attention
  47. self.fc = nn.Linear(2* gru_hidden_size, context_vec_size)
  48. self.tanh = nn.Tanh()
  49. self.softmax = nn.Softmax()
  50. # context vector
  51. self.context_vec = nn.Parameter(torch.Tensor(context_vec_size, 1))
  52. self.context_vec.data.uniform_(-0.1, 0.1)
  53. def forward(self, inputs):
  54. # inputs's dim seq_len*word_dim
  55. inputs = torch.unsqueeze(inputs, 1)
  56. h_t, hidden = self.gru(inputs)
  57. h_t = torch.squeeze(h_t, 1)
  58. u = self.tanh(self.fc(h_t))
  59. alpha = self.softmax(torch.mm(u, self.context_vec))
  60. output = torch.mm(h_t.t(), alpha)
  61. return output

一款轻量级的自然语言处理(NLP)工具包,目标是减少用户项目中的工程型代码,例如数据处理循环、训练循环、多卡运行等