# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ test nn ops """ import functools import numpy as np import mindspore import mindspore.nn as nn import mindspore.context as context import mindspore.common.dtype as mstype from mindspore import Tensor, Parameter from mindspore.common.initializer import initializer from mindspore.ops import Primitive from mindspore.ops import composite as C from mindspore.ops import operations as P from mindspore.ops import functional as F from mindspore.ops import prim_attr_register, PrimitiveWithInfer from mindspore.ops.primitive import constexpr from ..ut_filter import non_graph_engine from ....mindspore_test_framework.mindspore_test import mindspore_test from ....mindspore_test_framework.pipeline.forward.compile_forward \ import pipeline_for_compile_forward_ge_graph_for_case_by_case_config from ....mindspore_test_framework.pipeline.forward.verify_exception \ import pipeline_for_verify_exception_for_case_by_case_config from mindspore import context context.set_context(mode=context.GRAPH_MODE, save_graphs=True) class FakeOp(PrimitiveWithInfer): @prim_attr_register def __init__(self): """""" def infer_shape(self, x, y): self.second_shape = y self.add_prim_attr("second_shape", y) return x def infer_dtype(self, x, y): return x # test the normal case that should generate independent primitive because of different # generated attributes after inference def test_conv2d_same_primitive(): class Conv2DSameNet(nn.Cell): def __init__(self): super(Conv2DSameNet, self).__init__() self.conv1 = nn.Conv2d(16, 64, (1, 41), (1,4), "same", 0, 1, has_bias=True) self.conv2 = nn.Conv2d(16, 64, (1, 41), (1,4), "same", 0, 1, has_bias=True) def construct(self, x, y): r1 = self.conv1(x) r2 = self.conv2(y) return (r1, r2) t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) net = Conv2DSameNet() out = net(t1, t2) # test cell as high order argument # The graph with free variables used as argument is not supported yet # because of the limit of inference specialize system def Xtest_conv2d_op_with_arg(): class Conv2dNet(nn.Cell): def __init__(self): super(Conv2dNet, self).__init__() def construct(self, op, x): return op(x) class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.opnet = net self.conv2 = nn.Conv2d(16, 64, (1, 41), (1, 4), "same", 0, 1, has_bias=True) def construct(self, x, y): conv_op = self.conv2 a = self.opnet(conv_op, x) b = self.opnet(conv_op, y) return (a, b) t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) net = OpsNet(Conv2dNet()) out = net(t1, t2) def test_conv2d_op_with_arg(): class FackOpNet(nn.Cell): def __init__(self): super(FackOpNet, self).__init__() self.op = FakeOp() def construct(self, x, y): return self.op(x, y) class OpNet(nn.Cell): def __init__(self): super(OpNet, self).__init__() def construct(self, op, x, y): return op(x, y) class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.opnet = net self.op = FackOpNet() def construct(self, x, y): op = self.op a = self.opnet(op, x, y) b = self.opnet(op, y, x) return (a, b) t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) net = OpsNet(OpNet()) out = net(t1, t2) def test_conv2d_op_with_arg_same_input(): class FackOpNet(nn.Cell): def __init__(self): super(FackOpNet, self).__init__() self.op = FakeOp() def construct(self, x, y): return self.op(x, y) class OpNet(nn.Cell): def __init__(self): super(OpNet, self).__init__() def construct(self, op, x, y): return op(x, y) class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.opnet = net self.op = FackOpNet() def construct(self, x, y): op = self.op a = self.opnet(op, x, x) b = self.opnet(op, y, x) return (a, b) t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) net = OpsNet(OpNet()) out = net(t1, t2) # test op with partial def test_op_as_partial(): class OpAsPartial(nn.Cell): def __init__(self): super(OpAsPartial, self).__init__() self.op = FakeOp() def construct(self, x, y, z): partial_op = F.partial(self.op, x) a = partial_op(y) b = partial_op(z) return a, b t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) net = OpAsPartial() out = net(t1, t2, t3) # test op with partial def test_op_as_partial_inside(): class OpAsPartial(nn.Cell): def __init__(self): super(OpAsPartial, self).__init__() self.op = FakeOp() def construct(self, x, y, z): partial_op = F.partial(self.op, x) a = partial_op(y) b = partial_op(z) return a, b class OuterNet(nn.Cell): def __init__(self): super(OuterNet, self).__init__() self.net = OpAsPartial() def construct(self, x, y, z): a,b = self.net(x, y, z) return a, b t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) net = OuterNet() out = net(t1, t2, t3) # test op with partial case 2 def test_op_as_partial_independent(): class OpAsPartial(nn.Cell): def __init__(self): super(OpAsPartial, self).__init__() self.op = FakeOp() def construct(self, x, y, z): partial_op1 = F.partial(self.op, x) a = partial_op1(y) partial_op2 = F.partial(self.op, x) b = partial_op2(z) return a, b t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) net = OpAsPartial() out = net(t1, t2, t3) def test_nest_partial(): class NestPartial(nn.Cell): def __init__(self): super(NestPartial, self).__init__() self.op = FakeOp() def construct(self, x, y, z): partial_op1 = F.partial(self.op) partial_op2 = F.partial(partial_op1, x) a = partial_op2(y) partial_op3 = F.partial(self.op) partial_op4 = F.partial(partial_op3, x) b = partial_op4(z) return a, b t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) net = NestPartial() out = net(t1, t2, t3) # high order argument # op and op args as network arguments def test_op_with_arg_as_input(): class WithOpArgNet(nn.Cell): def __init__(self): super(WithOpArgNet, self).__init__() def construct(self, op, x, y): return op(x, y) class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.opnet = net self.op = FakeOp() def construct(self, x, y, z): op = self.op a = self.opnet(op, x, z) b = self.opnet(op, x, y) return (a, b) t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) net = OpsNet(WithOpArgNet()) out = net(t1, t2, t3) # The partial application used as argument is not supported yet # because of the limit of inference specialize system def Xtest_partial_as_arg(): class PartialArgNet(nn.Cell): def __init__(self): super(PartialArgNet, self).__init__() def construct(self, partial_op, y): return partial_op(y) class OpsNet(nn.Cell): def __init__(self, net): super(OpsNet, self).__init__() self.partial_net = net self.op = FakeOp() def construct(self, x, y, z): partial_op = F.partial(self.op, x) a = self.partial_net(partial_op, z) b = self.partial_net(partial_op, y) return (a, b) t1 = Tensor(np.ones([1,16,1,1918]).astype(np.float32)) t2 = Tensor(np.ones([1,16,1,3840]).astype(np.float32)) t3 = Tensor(np.ones([1,16,1,1234]).astype(np.float32)) net = OpsNet(PartialArgNet()) out = net(t1, t2, t3)