You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

eval.py 5.2 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ===========================================================================
  15. """
  16. Eval DeepSpeech2
  17. """
  18. import argparse
  19. import json
  20. import pickle
  21. import numpy as np
  22. from src.config import eval_config
  23. from src.deepspeech2 import DeepSpeechModel, PredictWithSoftmax
  24. from src.dataset import create_dataset
  25. from src.greedydecoder import MSGreedyDecoder
  26. from mindspore import context
  27. from mindspore.train.serialization import load_checkpoint, load_param_into_net
  28. context.set_context(mode=context.GRAPH_MODE, device_target="GPU", save_graphs=False)
  29. parser = argparse.ArgumentParser(description='DeepSpeech evaluation')
  30. parser.add_argument('--bidirectional', action="store_false", default=True, help='Use bidirectional RNN')
  31. parser.add_argument('--pretrain_ckpt', type=str, default='', help='Pretrained checkpoint path')
  32. args = parser.parse_args()
  33. if __name__ == '__main__':
  34. config = eval_config
  35. with open(config.DataConfig.labels_path) as label_file:
  36. labels = json.load(label_file)
  37. model = PredictWithSoftmax(DeepSpeechModel(batch_size=config.DataConfig.batch_size,
  38. rnn_hidden_size=config.ModelConfig.hidden_size,
  39. nb_layers=config.ModelConfig.hidden_layers,
  40. labels=labels,
  41. rnn_type=config.ModelConfig.rnn_type,
  42. audio_conf=config.DataConfig.SpectConfig,
  43. bidirectional=args.bidirectional))
  44. ds_eval = create_dataset(audio_conf=config.DataConfig.SpectConfig,
  45. manifest_filepath=config.DataConfig.test_manifest,
  46. labels=labels, normalize=True, train_mode=False,
  47. batch_size=config.DataConfig.batch_size, rank=0, group_size=1)
  48. param_dict = load_checkpoint(args.pretrain_ckpt)
  49. load_param_into_net(model, param_dict)
  50. print('Successfully loading the pre-trained model')
  51. if config.LMConfig.decoder_type == 'greedy':
  52. decoder = MSGreedyDecoder(labels=labels, blank_index=labels.index('_'))
  53. else:
  54. raise NotImplementedError("Only greedy decoder is supported now")
  55. target_decoder = MSGreedyDecoder(labels, blank_index=labels.index('_'))
  56. model.set_train(False)
  57. total_cer, total_wer, num_tokens, num_chars = 0, 0, 0, 0
  58. output_data = []
  59. for data in ds_eval.create_dict_iterator():
  60. inputs, input_length, target_indices, targets = data['inputs'], data['input_length'], data['target_indices'], \
  61. data['label_values']
  62. split_targets = []
  63. start, count, last_id = 0, 0, 0
  64. target_indices, targets = target_indices.asnumpy(), targets.asnumpy()
  65. for i in range(np.shape(targets)[0]):
  66. if target_indices[i, 0] == last_id:
  67. count += 1
  68. else:
  69. split_targets.append(list(targets[start:count]))
  70. last_id += 1
  71. start = count
  72. count += 1
  73. out, output_sizes = model(inputs, input_length)
  74. decoded_output, _ = decoder.decode(out, output_sizes)
  75. target_strings = target_decoder.convert_to_strings(split_targets)
  76. if config.save_output is not None:
  77. output_data.append((out.asnumpy(), output_sizes.asnumpy(), target_strings))
  78. for doutput, toutput in zip(decoded_output, target_strings):
  79. transcript, reference = doutput[0], toutput[0]
  80. wer_inst = decoder.wer(transcript, reference)
  81. cer_inst = decoder.cer(transcript, reference)
  82. total_wer += wer_inst
  83. total_cer += cer_inst
  84. num_tokens += len(reference.split())
  85. num_chars += len(reference.replace(' ', ''))
  86. if config.verbose:
  87. print("Ref:", reference.lower())
  88. print("Hyp:", transcript.lower())
  89. print("WER:", float(wer_inst) / len(reference.split()),
  90. "CER:", float(cer_inst) / len(reference.replace(' ', '')), "\n")
  91. wer = float(total_wer) / num_tokens
  92. cer = float(total_cer) / num_chars
  93. print('Test Summary \t'
  94. 'Average WER {wer:.3f}\t'
  95. 'Average CER {cer:.3f}\t'.format(wer=wer * 100, cer=cer * 100))
  96. if config.save_output is not None:
  97. with open(config.save_output + '.bin', 'wb') as output:
  98. pickle.dump(output_data, output)