You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_softmax_stitch.py 3.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import mindspore.context as context
  17. from mindspore import Tensor
  18. import mindspore.nn as nn
  19. from mindspore.nn import Cell
  20. from mindspore.ops import operations as P
  21. import mindspore.ops.functional as F
  22. import pytest
  23. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  24. # enable graph kernel optimization.
  25. context.set_context(enable_graph_kernel=True)
  26. class BertAttentionPiece(Cell):
  27. def __init__(self):
  28. super(BertAttentionPiece, self).__init__()
  29. self.add = P.Add()
  30. self.dropout = nn.Dropout(1 - 0.1)
  31. self.softmax = nn.Softmax()
  32. self.multiply_data = -10000.0
  33. self.sub = P.Sub()
  34. self.multiply = P.Mul()
  35. self.get_dtype = P.DType()
  36. self.cast = P.Cast()
  37. def construct(self, attention_mask, attention_scores):
  38. multiply_out = self.sub(self.cast(F.tuple_to_array((1.0,)), self.get_dtype(attention_scores)),
  39. self.cast(attention_mask, self.get_dtype(attention_scores)))
  40. adder = self.multiply(multiply_out, self.multiply_data)
  41. attention_scores = self.add(adder, attention_scores)
  42. attention_probs = self.softmax(attention_scores)
  43. attention_probs = self.dropout(attention_probs)
  44. return attention_probs
  45. def get_rtol_atol(dtype):
  46. if dtype == np.float16:
  47. return 1.e-3, 1.e-3
  48. return 1.e-4, 1.e-4
  49. def compare_result(expect, output, dtype):
  50. rtol, atol = get_rtol_atol(dtype)
  51. if isinstance(expect, (list, tuple)):
  52. assert isinstance(output, (list, tuple)) and len(expect) == len(output)
  53. expect_list = list(expect)
  54. output_list = list(output)
  55. for e, o in zip(expect_list, output_list):
  56. assert np.allclose(e.asnumpy(), o.asnumpy(), rtol, atol, equal_nan=True)
  57. else:
  58. assert np.allclose(expect.asnumpy(), output.asnumpy(), rtol, atol, equal_nan=True)
  59. def get_softmax_output(x, y, enable_stitch_fusion):
  60. # enable graph kernel stitch fusion.
  61. if enable_stitch_fusion:
  62. context.set_context(graph_kernel_flags="--enable_stitch_fusion=true")
  63. net = BertAttentionPiece()
  64. result = net(x, y)
  65. return result
  66. def test_softmax(shape, dtype):
  67. np.random.seed(0)
  68. x = Tensor(np.random.normal(0, 1, shape).astype(dtype))
  69. y = Tensor(np.random.normal(0, 1, shape).astype(dtype))
  70. expect = get_softmax_output(x, y, False)
  71. output = get_softmax_output(x, y, True)
  72. compare_result(expect, output, dtype)
  73. @pytest.mark.level0
  74. @pytest.mark.platform_x86_gpu_training
  75. @pytest.mark.env_onecard
  76. def test_softmax_gpu():
  77. context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
  78. test_softmax([64, 12, 128, 128], np.float16)