You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_batchtospace_op.py 3.0 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. # Copyright 2021 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. import numpy as np
  16. import pytest
  17. import mindspore.context as context
  18. import mindspore.nn as nn
  19. import mindspore.ops.operations.array_ops as P
  20. from mindspore import Tensor
  21. from mindspore.common.api import ms_function
  22. from mindspore.common.initializer import initializer
  23. from mindspore.common.parameter import Parameter
  24. class BatchToSpaceNet(nn.Cell):
  25. def __init__(self, nptype, block_size=2, input_shape=(4, 1, 2, 2)):
  26. super(BatchToSpaceNet, self).__init__()
  27. self.BatchToSpace = P.BatchToSpace(block_size=block_size, crops=[[0, 0], [0, 0]])
  28. input_size = 1
  29. for i in input_shape:
  30. input_size = input_size*i
  31. data_np = np.arange(input_size).reshape(input_shape).astype(nptype)
  32. self.x1 = Parameter(initializer(Tensor(data_np), input_shape), name='x1')
  33. @ms_function
  34. def construct(self):
  35. y1 = self.BatchToSpace(self.x1)
  36. return y1
  37. def BatchToSpace(nptype, block_size=2, input_shape=(4, 1, 2, 2)):
  38. context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
  39. input_size = 1
  40. for i in input_shape:
  41. input_size = input_size*i
  42. expect = np.array([[[[0, 4, 1, 5],
  43. [8, 12, 9, 13],
  44. [2, 6, 3, 7],
  45. [10, 14, 11, 15]]]]).astype(nptype)
  46. dts = BatchToSpaceNet(nptype, block_size, input_shape)
  47. output = dts()
  48. assert (output.asnumpy() == expect).all()
  49. def BatchToSpace_pynative(nptype, block_size=2, input_shape=(4, 1, 2, 2)):
  50. context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
  51. input_size = 1
  52. for i in input_shape:
  53. input_size = input_size*i
  54. expect = np.array([[[[0, 4, 1, 5],
  55. [8, 12, 9, 13],
  56. [2, 6, 3, 7],
  57. [10, 14, 11, 15]]]]).astype(nptype)
  58. dts = P.BatchToSpace(block_size=block_size, crops=[[0, 0], [0, 0]])
  59. arr_input = Tensor(np.arange(input_size).reshape(input_shape).astype(nptype))
  60. output = dts(arr_input)
  61. assert (output.asnumpy() == expect).all()
  62. @pytest.mark.level0
  63. @pytest.mark.platform_x86_gpu_training
  64. @pytest.mark.env_onecard
  65. def test_batchtospace_graph_float32():
  66. BatchToSpace(np.float32)
  67. @pytest.mark.level0
  68. @pytest.mark.platform_x86_gpu_training
  69. @pytest.mark.env_onecard
  70. def test_batchtospace_graph_float16():
  71. BatchToSpace(np.float16)