You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_seq2seq_decoder.py 2.3 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950
  1. import unittest
  2. import torch
  3. from fastNLP import Vocabulary
  4. from fastNLP.embeddings import StaticEmbedding
  5. from fastNLP.modules import TransformerSeq2SeqDecoder
  6. from fastNLP.modules import LSTMSeq2SeqDecoder
  7. from fastNLP import seq_len_to_mask
  8. class TestTransformerSeq2SeqDecoder(unittest.TestCase):
  9. def test_case(self):
  10. vocab = Vocabulary().add_word_lst("This is a test .".split())
  11. vocab.add_word_lst("Another test !".split())
  12. embed = StaticEmbedding(vocab, embedding_dim=10)
  13. encoder_output = torch.randn(2, 3, 10)
  14. src_seq_len = torch.LongTensor([3, 2])
  15. encoder_mask = seq_len_to_mask(src_seq_len)
  16. for flag in [True, False]:
  17. with self.subTest(bind_decoder_input_output_embed=flag):
  18. decoder = TransformerSeq2SeqDecoder(embed=embed, pos_embed = None,
  19. d_model = 10, num_layers=2, n_head = 5, dim_ff = 20, dropout = 0.1,
  20. bind_decoder_input_output_embed = True)
  21. state = decoder.init_state(encoder_output, encoder_mask)
  22. output = decoder(tokens=torch.randint(0, len(vocab), size=(2, 4)), state=state)
  23. self.assertEqual(output.size(), (2, 4, len(vocab)))
  24. class TestLSTMDecoder(unittest.TestCase):
  25. def test_case(self):
  26. vocab = Vocabulary().add_word_lst("This is a test .".split())
  27. vocab.add_word_lst("Another test !".split())
  28. embed = StaticEmbedding(vocab, model_dir_or_name=None, embedding_dim=10)
  29. encoder_output = torch.randn(2, 3, 10)
  30. tgt_words_idx = torch.LongTensor([[1, 2, 3, 4], [2, 3, 0, 0]])
  31. src_seq_len = torch.LongTensor([3, 2])
  32. encoder_mask = seq_len_to_mask(src_seq_len)
  33. for flag in [True, False]:
  34. for attention in [True, False]:
  35. with self.subTest(bind_decoder_input_output_embed=flag, attention=attention):
  36. decoder = LSTMSeq2SeqDecoder(embed=embed, num_layers = 2, hidden_size = 10,
  37. dropout = 0.3, bind_decoder_input_output_embed=flag, attention=attention)
  38. state = decoder.init_state(encoder_output, encoder_mask)
  39. output = decoder(tgt_words_idx, state)
  40. self.assertEqual(tuple(output.size()), (2, 4, len(vocab)))