You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_row_tensor.py 18 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """
  16. @File : test_row_tensor.py
  17. @Author:
  18. @Date : 2020-06-08
  19. @Desc : test mindspore row_tensor's operation
  20. """
  21. import numpy as np
  22. import pytest
  23. import mindspore as ms
  24. import mindspore.nn as nn
  25. from mindspore.ops import composite as C
  26. from mindspore.ops import functional as F
  27. from mindspore.ops import operations as P
  28. from mindspore.ops.composite.multitype_ops.zeros_like_impl import zeros_like
  29. from mindspore.ops.primitive import constexpr, PrimitiveWithInfer, prim_attr_register
  30. from mindspore.ops._grad.grad_base import bprop_getters
  31. from mindspore import Tensor, RowTensor, context
  32. from mindspore.common.parameter import Parameter, ParameterTuple
  33. from mindspore.common import dtype as mstype
  34. from mindspore._checkparam import Validator as validator
  35. from mindspore._checkparam import Rel
  36. from mindspore.nn import Optimizer
  37. from mindspore.nn import TrainOneStepCell, WithLossCell
  38. from mindspore.nn.optim import Momentum
  39. from mindspore.train import Model
  40. from ....dataset_mock import MindData
  41. @pytest.fixture(scope="module", autouse=True)
  42. def setup_teardown():
  43. context.set_context(mode=context.GRAPH_MODE, enable_sparse=True)
  44. yield
  45. context.set_context(enable_sparse=False)
  46. reduce_sum = P.ReduceSum()
  47. unsorted_segment_sum = P.UnsortedSegmentSum()
  48. transpose = P.Transpose()
  49. shape_op = P.Shape()
  50. reshape = P.Reshape()
  51. size_op = P.Size()
  52. invert_permutation = P.InvertPermutation()
  53. logical_and = P.LogicalAnd()
  54. def get_axis(x):
  55. shape = shape_op(x)
  56. length = F.tuple_len(shape)
  57. perm = F.make_range(0, length)
  58. return perm
  59. class MSELoss(nn.Cell):
  60. def __init__(self):
  61. super(MSELoss, self).__init__()
  62. self.reduce_sum = P.ReduceSum()
  63. self.square = P.Square()
  64. self.reduce_mean = P.ReduceMean()
  65. def construct(self, data, label):
  66. diff = data - label
  67. return self.reduce_mean(self.square(diff), get_axis(diff))
  68. class MindDataSet(MindData):
  69. def __init__(self, dataset_types, dataset_shapes):
  70. super(MindDataSet, self).__init__(size=2, batch_size=32,
  71. np_types=dataset_types,
  72. output_shapes=dataset_shapes,
  73. input_indexs=(0, 1))
  74. def __next__(self):
  75. if self._size < self._iter_num:
  76. raise StopIteration
  77. self._iter_num += 1
  78. lst = []
  79. for shape_, type_ in zip(self._output_shapes, self._np_types):
  80. lst.append(Tensor(np.ones(shape_).astype(type_)))
  81. return tuple(lst)
  82. @constexpr
  83. def _generate_shape_index(out_shape, indices_shape, axis):
  84. out_rank = len(out_shape)
  85. ind_rank = len(indices_shape)
  86. if axis < 0:
  87. axis += out_rank - ind_rank + 1
  88. perm_part1 = tuple(range(axis, axis + ind_rank))
  89. index = tuple(range(out_rank))
  90. perm = perm_part1 + index[:axis] + index[axis + ind_rank:]
  91. return perm
  92. @constexpr
  93. def _generate_inverse_index(x_shape, axis):
  94. x_rank = len(x_shape)
  95. index = tuple(range(x_rank))
  96. if axis < 0:
  97. axis += x_rank
  98. perm = index[1:1 + axis] + (0,) + index[1 + axis:]
  99. return perm
  100. # pylint: disable=W0231
  101. class MySparseGatherV2(PrimitiveWithInfer):
  102. """
  103. For test
  104. """
  105. @prim_attr_register
  106. def __init__(self):
  107. """init index_select"""
  108. self.init_prim_io_names(inputs=['params', 'indices', 'axis'], outputs=['output'])
  109. def __infer__(self, params, indices, axis):
  110. validator.check_subclass("params", params['dtype'], mstype.tensor, self.name)
  111. validator.check_tensor_dtype_valid("indices", indices['dtype'], mstype.int_type, self.name)
  112. validator.check_subclass("axis", axis['dtype'], mstype.int_, self.name)
  113. axis_v = axis['value']
  114. params_shp = params['shape']
  115. rank = len(params_shp)
  116. validator.check_int_range(axis_v, -rank, rank, Rel.INC_LEFT, "axis", self.name)
  117. if axis_v < 0:
  118. axis_v += rank
  119. out_shape = params_shp[:axis_v] + indices['shape'] + params_shp[axis_v + 1:]
  120. out = {'shape': out_shape,
  121. 'dtype': params['dtype'],
  122. 'value': None}
  123. return out
  124. @bprop_getters.register(MySparseGatherV2)
  125. def get_bprop_sparse_gather_v2(self):
  126. """Generate bprop for MySparseGatherV2"""
  127. def bprop(x, indices, axis, out, dout):
  128. x_shp = shape_op(x)
  129. if axis == 0:
  130. indices_size = (size_op(indices),)
  131. x_tail_shp = x_shp[1:]
  132. values_shape = indices_size + x_tail_shp
  133. values = reshape(dout, values_shape)
  134. indices = reshape(indices, indices_size)
  135. return RowTensor(indices, values, x_shp), zeros_like(indices), zeros_like(axis)
  136. if F.rank(dout) == 0:
  137. dout = P.ExpandDims()(dout, -1)
  138. if F.rank(indices) == 0:
  139. indices = P.ExpandDims()(indices, -1)
  140. out_shp = shape_op(dout)
  141. ind_shp = shape_op(indices)
  142. # Example: out_shape:(3,2,3) axis 1 -> (1,0,2)
  143. perm_1 = _generate_shape_index(out_shp, ind_shp, axis)
  144. values_transpose = transpose(dout, perm_1)
  145. params_grad = unsorted_segment_sum(values_transpose, indices, shape_op(x)[axis])
  146. # Example: out_shape:(3,2,3) axis 2 -> (1,2,0)
  147. perm_2 = _generate_inverse_index(x_shp, axis)
  148. params_grad = transpose(params_grad, perm_2)
  149. return params_grad, zeros_like(indices), zeros_like(axis)
  150. return bprop
  151. adam_opt_for_map = C.MultitypeFuncGraph("adam_opt_for_map")
  152. @adam_opt_for_map.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
  153. "Tensor", "Tensor", "Tensor", "RowTensor", "Bool")
  154. def _update_run_op_for_map_row_tensor(beta1, beta2, eps, lr, weight_decay_tensor, param,
  155. m, v, gradient, decay_flag):
  156. return gradient.values
  157. @adam_opt_for_map.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
  158. "Tensor", "Tensor", "Tensor", "Tensor", "Bool")
  159. def _update_run_op_for_map_tensor(beta1, beta2, eps, lr, weight_decay_tensor, param,
  160. m, v, gradient, decay_flag):
  161. op_mul = P.Mul()
  162. op_square = P.Square()
  163. op_sqrt = P.Sqrt()
  164. op_cast = P.Cast()
  165. op_reshape = P.Reshape()
  166. op_shape = P.Shape()
  167. param_fp32 = op_cast(param, mstype.float32)
  168. m_fp32 = op_cast(m, mstype.float32)
  169. v_fp32 = op_cast(v, mstype.float32)
  170. gradient_fp32 = op_cast(gradient, mstype.float32)
  171. next_m = op_mul(beta1, m_fp32) + op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32) - beta1, gradient_fp32)
  172. next_v = op_mul(beta2, v_fp32) + op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32)
  173. - beta2, op_square(gradient_fp32))
  174. update = next_m / (op_sqrt(next_v) + eps)
  175. if decay_flag:
  176. update = update + op_mul(weight_decay_tensor, param_fp32)
  177. update_with_lr = op_mul(lr, update)
  178. next_param = param_fp32 - op_reshape(update_with_lr, op_shape(param_fp32))
  179. next_v = F.depend(next_v, F.assign(param, next_param))
  180. next_v = F.depend(next_v, F.assign(m, next_m))
  181. next_v = F.depend(next_v, F.assign(v, next_v))
  182. return next_v
  183. def _check_param_value(beta1, beta2, eps, weight_decay, prim_name):
  184. """Check the type of inputs."""
  185. validator.check_value_type("beta1", beta1, [float], prim_name)
  186. validator.check_value_type("beta2", beta2, [float], prim_name)
  187. validator.check_value_type("eps", eps, [float], prim_name)
  188. validator.check_value_type("weight_dacay", weight_decay, [float], prim_name)
  189. validator.check_float_range(beta1, 0.0, 1.0, Rel.INC_NEITHER, "beta1", prim_name)
  190. validator.check_float_range(beta2, 0.0, 1.0, Rel.INC_NEITHER, "beta2", prim_name)
  191. validator.check_positive_float(eps, "eps", prim_name)
  192. validator.check_non_negative_float(weight_decay, "weight_decay", prim_name)
  193. class AdamWeightDecaySparse(Optimizer):
  194. def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0,
  195. decay_filter=lambda x: 'beta' not in x.name and 'gamma' not in x.name):
  196. super(AdamWeightDecaySparse, self).__init__(learning_rate, params)
  197. if self.is_group:
  198. raise RuntimeError(f"The {self.cls_name} optimizer cannot support group setting.")
  199. _check_param_value(beta1, beta2, eps, weight_decay, self.cls_name)
  200. self.beta1 = Tensor(np.array([beta1]).astype(np.float32))
  201. self.beta2 = Tensor(np.array([beta2]).astype(np.float32))
  202. self.eps = Tensor(np.array([eps]).astype(np.float32))
  203. self.weight_decay_tensor = Tensor(np.array([weight_decay]).astype(np.float32))
  204. self.params = self.parameters
  205. self.moments1 = self.params.clone(prefix="adam_m", init='zeros')
  206. self.moments2 = self.params.clone(prefix="adam_v", init='zeros')
  207. self.decay_flag = tuple(decay_filter(x) for x in self.params)
  208. self.map = C.Map()
  209. def construct(self, gradients):
  210. lr = self.get_lr()
  211. updated_velocity = self.map(F.partial(adam_opt_for_map, self.beta1, self.beta2, self.eps, lr,
  212. self.weight_decay_tensor),
  213. self.params, self.moments1, self.moments2, gradients, self.decay_flag)
  214. return updated_velocity
  215. def test_row_tensor_make_row_tensor():
  216. class MakeRowTensor(nn.Cell):
  217. def __init__(self):
  218. super(MakeRowTensor, self).__init__()
  219. self.dense_shape = (3, 2)
  220. def construct(self, indices, values):
  221. ret = (RowTensor(indices, values, self.dense_shape),)
  222. return ret[0]
  223. indices = Tensor([1, 2])
  224. values = Tensor([[0, 0], [1, 2]], dtype=ms.float32)
  225. MakeRowTensor()(indices, values)
  226. class RowTensorGetAttr(nn.Cell):
  227. def __init__(self, dense_shape):
  228. super(RowTensorGetAttr, self).__init__()
  229. self.dense_shape = dense_shape
  230. def construct(self, indices, values):
  231. x = RowTensor(indices, values, self.dense_shape)
  232. return x.values, x.indices, x.dense_shape
  233. def test_row_tensor_attr():
  234. indices = Tensor([0])
  235. values = Tensor([[1, 2]], dtype=ms.float32)
  236. RowTensorGetAttr((3, 2))(indices, values)
  237. def test_row_tensor_sparse_gatherv2_grad_all():
  238. grad_all = C.GradOperation(get_all=True)
  239. class GradWrap(nn.Cell):
  240. def __init__(self, network):
  241. super(GradWrap, self).__init__()
  242. self.network = network
  243. def construct(self, x, y):
  244. grad = grad_all(self.network)(x, y)
  245. return grad[0].indices, grad[0].values, grad[0].dense_shape
  246. class SparseGatherV2(nn.Cell):
  247. def __init__(self):
  248. super(SparseGatherV2, self).__init__()
  249. self.sparse_gatherv2 = MySparseGatherV2()
  250. self.axis = 0
  251. def construct(self, params, indices):
  252. return self.sparse_gatherv2(params, indices, self.axis)
  253. params = Tensor(np.ones([3, 1, 2]).astype(np.int32))
  254. indices = Tensor(np.array([0, 1]).astype(np.int32))
  255. GradWrap(SparseGatherV2())(params, indices)
  256. def test_row_tensor_sparse_gatherv2_grad_with_pram():
  257. grad_by_list = C.GradOperation(get_by_list=True)
  258. class GradWrap(nn.Cell):
  259. def __init__(self, network):
  260. super(GradWrap, self).__init__()
  261. self.network = network
  262. self.weights = ParameterTuple(filter(lambda x: x.requires_grad, network.get_parameters()))
  263. def construct(self, x):
  264. weights = self.weights
  265. grad = grad_by_list(self.network, weights)(x)
  266. x = grad[0]
  267. return x.values, x.indices, x.dense_shape
  268. class SparseGatherV2(nn.Cell):
  269. def __init__(self):
  270. super(SparseGatherV2, self).__init__()
  271. self.sparse_gatherv2 = MySparseGatherV2()
  272. self.axis = 0
  273. self.params = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.int32)), name="params")
  274. def construct(self, indices):
  275. return self.sparse_gatherv2(self.params, indices, self.axis)
  276. indices = Tensor(np.array([0, 1]).astype(np.int32))
  277. network = GradWrap(SparseGatherV2())
  278. network(indices)
  279. def test_row_tensor_env_get():
  280. class Loss(nn.Cell):
  281. def __init__(self):
  282. super(Loss, self).__init__()
  283. def construct(self, base, target):
  284. return base
  285. class NetWithSparseGatherV2(nn.Cell):
  286. def __init__(self):
  287. super(NetWithSparseGatherV2, self).__init__()
  288. self.w1 = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name="w1")
  289. self.w2 = Parameter(Tensor(np.ones([2, 1, 2]).astype(np.float32)), name="w2")
  290. self.gatherv2 = MySparseGatherV2()
  291. self.axis = 0
  292. def construct(self, indices):
  293. return self.gatherv2(self.w1, indices, self.axis) * self.w2
  294. inputs = Tensor(np.array([0, 1]).astype(np.int32))
  295. label = Tensor(np.zeros([2, 1, 2]).astype(np.float32))
  296. net = NetWithSparseGatherV2()
  297. net.set_train()
  298. loss = Loss()
  299. optimizer = AdamWeightDecaySparse(net.trainable_params())
  300. net_with_loss = WithLossCell(net, loss)
  301. train_network = TrainOneStepCell(net_with_loss, optimizer)
  302. train_network(inputs, label)
  303. def test_row_tensor_model_train():
  304. class Net(nn.Cell):
  305. def __init__(self, in_features, out_features):
  306. super(Net, self).__init__()
  307. self.weight = Parameter(Tensor(np.ones([out_features, in_features]).astype(np.float32)), name="weight")
  308. self.add = P.Add()
  309. self.cast = P.Cast()
  310. self.flag = True
  311. def construct(self, inputs, label):
  312. x = self.add(inputs, self.weight)
  313. if self.flag:
  314. x = self.cast(x, mstype.float32)
  315. return x
  316. dataset_types = (np.float32, np.float32)
  317. dataset_shapes = ((16, 16), (16, 16))
  318. dataset = MindDataSet(dataset_types, dataset_shapes)
  319. net = Net(16, 16)
  320. net.set_train()
  321. optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
  322. model = Model(net, optimizer=optimizer)
  323. model.train(2, dataset, dataset_sink_mode=False)
  324. def test_row_tensor_values_dim_greater_than_dense_shape_dim():
  325. indices = Tensor(np.array([0, 1], dtype=np.int32))
  326. values = Tensor(np.random.randn(2, 4, 5).astype(np.float32))
  327. dense_shape = (3, 4)
  328. with pytest.raises(TypeError):
  329. RowTensorGetAttr(dense_shape)(indices, values)
  330. def test_row_tensor_values_dim_less_than_dense_shape_dim():
  331. indices = Tensor(np.array([0, 1], dtype=np.int32))
  332. values = Tensor(np.random.randn(2, 4).astype(np.float32))
  333. dense_shape = (3, 4, 5)
  334. with pytest.raises(TypeError):
  335. RowTensorGetAttr(dense_shape)(indices, values)
  336. def test_row_tensor_value_and_dense_shape_illegal():
  337. indices = Tensor(np.array([0, 1], dtype=np.int32))
  338. values = Tensor(np.random.randn(2, 4).astype(np.float32))
  339. dense_shape = (3, 5)
  340. with pytest.raises(TypeError):
  341. RowTensorGetAttr(dense_shape)(indices, values)
  342. class RowTensorValuesDouble(nn.Cell):
  343. def __init__(self):
  344. super().__init__()
  345. def construct(self, x):
  346. indices = x.indices
  347. values = x.values * 2
  348. dense_shape = x.dense_shape
  349. return RowTensor(indices, values, dense_shape)
  350. class RowTensorValuesAdd2(nn.Cell):
  351. def __init__(self):
  352. super().__init__()
  353. def construct(self, x):
  354. indices = x.indices
  355. values = x.values + 2
  356. dense_shape = x.dense_shape
  357. return RowTensor(indices, values, dense_shape)
  358. class RowTensorWithControlIf(nn.Cell):
  359. def __init__(self, dense_shape):
  360. super().__init__()
  361. self.op1 = RowTensorValuesDouble()
  362. self.op2 = RowTensorValuesAdd2()
  363. self.dense_shape = dense_shape
  364. def construct(self, a, b, indices, values):
  365. x = RowTensor(indices, values, self.dense_shape)
  366. if a > b:
  367. x = self.op1(x)
  368. else:
  369. x = self.op2(x)
  370. return x.indices, x.values
  371. def test_row_tensor_with_control_flow_if():
  372. a = Tensor(np.array(0).astype(np.int32))
  373. b = Tensor(np.array(2).astype(np.int32))
  374. indices = Tensor(np.array([0, 2]).astype(np.int32))
  375. values = Tensor(np.ones([2, 2]).astype(np.float32))
  376. dense_shape = (5, 2)
  377. net = RowTensorWithControlIf(dense_shape)
  378. net(a, b, indices, values)
  379. class EmbeddingLookUpBnNet(nn.Cell):
  380. def __init__(self, vocab_size, embedding_size, target='CPU'):
  381. super().__init__()
  382. self.embedding_lookup = nn.EmbeddingLookup(vocab_size, embedding_size, param_init='ones', target=target)
  383. self.bn = nn.BatchNorm2d(num_features=3)
  384. self.mul = P.Mul()
  385. self.reshape = P.Reshape()
  386. self.relu = nn.PReLU()
  387. def construct(self, indices):
  388. x = self.embedding_lookup(indices)
  389. x = self.reshape(x, (2, 3, 2, 2))
  390. x = self.relu(x)
  391. x = self.bn(x)
  392. return x
  393. def test_embedding_lookup_with_mix_precision():
  394. data = Tensor(np.array([0, 1, 2]).astype(np.int32))
  395. label = Tensor(np.random.randn(*(2, 3, 2, 2)).astype(np.float32))
  396. net = EmbeddingLookUpBnNet(8, 8, target='CPU')
  397. criterion = nn.SoftmaxCrossEntropyWithLogits(reduction='mean')
  398. optimizer = nn.Adam(params=net.trainable_params(), learning_rate=0.1)
  399. optimizer.target = 'CPU'
  400. train_network = ms.amp.build_train_network(net, optimizer, criterion, level="O2")
  401. train_network.set_train()
  402. for _ in range(2):
  403. train_network(data, label)