| @@ -0,0 +1,65 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_signle_for(): | |||||
| class SignleForNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.add = P.Add() | |||||
| self.mul = P.Mul() | |||||
| def construct(self, x, y, z): | |||||
| x = self.add(x, y) | |||||
| for _ in range(0, 3): | |||||
| z = self.add(z, x) | |||||
| y = self.mul(z, y) | |||||
| return y | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([2], mstype.int32) | |||||
| y = Tensor([5], mstype.int32) | |||||
| z = Tensor([4], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| for_net = SignleForNet() | |||||
| net = GradNet(for_net) | |||||
| graph_forward_res = for_net(x, y, z) | |||||
| graph_backward_res = net(x, y, z) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| for_net = SignleForNet() | |||||
| net = GradNet(for_net) | |||||
| pynative_forward_res = for_net(x, y, z) | |||||
| pynative_backward_res = net(x, y, z) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||
| @@ -0,0 +1,70 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_for_in_if(): | |||||
| class ForInIfNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.mul = P.Mul() | |||||
| self.add = P.Add() | |||||
| param_a = np.full((1,), 5, dtype=np.float32) | |||||
| self.param_a = Parameter(Tensor(param_a), name='a') | |||||
| param_b = np.full((1,), 4, dtype=np.float32) | |||||
| self.param_b = Parameter(Tensor(param_b), name='b') | |||||
| def construct(self, x): | |||||
| if self.param_a > self.param_b: | |||||
| x = self.mul(x, 2) | |||||
| for _ in range(0, 5): | |||||
| x = self.add(x, x) | |||||
| self.param_b += 1 | |||||
| return x | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([10], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| for_in_if_net = ForInIfNet() | |||||
| net = GradNet(for_in_if_net) | |||||
| graph_forward_res = for_in_if_net(x) | |||||
| graph_backward_res = net(x) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| for_in_if_net = ForInIfNet() | |||||
| net = GradNet(for_in_if_net) | |||||
| pynative_forward_res = for_in_if_net(x) | |||||
| pynative_backward_res = net(x) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||
| @@ -0,0 +1,75 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_for_in_while(): | |||||
| class ForInWhileNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.mul = P.Mul() | |||||
| self.add = P.Add() | |||||
| self.sub = P.Sub() | |||||
| self.assign = P.Assign() | |||||
| param_a = np.full((1,), 5, dtype=np.float32) | |||||
| self.param_a = Parameter(Tensor(param_a), name='a') | |||||
| param_b = np.full((1,), 2, dtype=np.float32) | |||||
| self.param_b = Parameter(Tensor(param_b), name='b') | |||||
| def construct(self, x): | |||||
| self.assign(self.param_a, x + self.param_a) | |||||
| while self.param_a > self.param_b: | |||||
| x = self.mul(x, 2) | |||||
| for _ in range(0, 5): | |||||
| x = self.add(x, x) | |||||
| self.param_b = self.param_b + 1 | |||||
| y = self.sub(x, self.param_b) | |||||
| self.assign(self.param_a, y) | |||||
| return x | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([2], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| for_in_while_net = ForInWhileNet() | |||||
| net = GradNet(for_in_while_net) | |||||
| graph_forward_res = for_in_while_net(x) | |||||
| graph_backward_res = net(x) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| for_in_while_net = ForInWhileNet() | |||||
| net = GradNet(for_in_while_net) | |||||
| pynative_forward_res = for_in_while_net(x) | |||||
| pynative_backward_res = net(x) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||
| @@ -0,0 +1,75 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_for_in_for(): | |||||
| class ForInForNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.relu = nn.ReLU() | |||||
| self.mul = P.Mul() | |||||
| self.add = P.Add() | |||||
| self.sub = P.Sub() | |||||
| self.assign = P.Assign() | |||||
| param_a = np.full((1,), 5, dtype=np.float32) | |||||
| self.param_a = Parameter(Tensor(param_a), name='a') | |||||
| param_b = np.full((1,), 2, dtype=np.float32) | |||||
| self.param_b = Parameter(Tensor(param_b), name='b') | |||||
| def construct(self, x): | |||||
| self.assign(self.param_a, x + self.param_a) | |||||
| for _ in range(0, 10): | |||||
| x = self.mul(x, 2) | |||||
| for _ in range(0, 5): | |||||
| x = self.add(x, x) | |||||
| self.param_b += 1 | |||||
| y = self.sub(x, self.param_b) | |||||
| z = self.relu(x + y) | |||||
| return z | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([2], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| for_in_for_net = ForInForNet() | |||||
| net = GradNet(for_in_for_net) | |||||
| graph_forward_res = for_in_for_net(x) | |||||
| graph_backward_res = net(x) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| for_in_for_net = ForInForNet() | |||||
| net = GradNet(for_in_for_net) | |||||
| pynative_forward_res = for_in_for_net(x) | |||||
| pynative_backward_res = net(x) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||
| @@ -0,0 +1,77 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_if_after_for(): | |||||
| class IfAfterForNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.relu = nn.ReLU() | |||||
| self.mul = P.Mul() | |||||
| self.add = P.Add() | |||||
| self.sub = P.Sub() | |||||
| self.assign = P.Assign() | |||||
| param_a = np.full((1,), 5, dtype=np.float32) | |||||
| self.param_a = Parameter(Tensor(param_a), name='a') | |||||
| param_b = np.full((1,), 2, dtype=np.float32) | |||||
| self.param_b = Parameter(Tensor(param_b), name='b') | |||||
| def construct(self, x): | |||||
| self.assign(self.param_a, x + self.param_a) | |||||
| y = self.add(x, self.param_b) | |||||
| for _ in range(0, 2): | |||||
| x = self.sub(x, 2) | |||||
| self.param_b = self.add(self.param_b, 2) | |||||
| if x < self.param_b: | |||||
| y = self.mul(x, self.param_a) | |||||
| z = self.relu(x + y) | |||||
| return z | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([7], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| if_after_for_net = IfAfterForNet() | |||||
| net = GradNet(if_after_for_net) | |||||
| graph_forward_res = if_after_for_net(x) | |||||
| graph_backward_res = net(x) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| if_after_for_net = IfAfterForNet() | |||||
| net = GradNet(if_after_for_net) | |||||
| pynative_forward_res = if_after_for_net(x) | |||||
| pynative_backward_res = net(x) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||
| @@ -0,0 +1,79 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_for_after_for(): | |||||
| class ForAfterForNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.relu = nn.ReLU() | |||||
| self.mul = P.Mul() | |||||
| self.add = P.Add() | |||||
| self.sub = P.Sub() | |||||
| self.div = P.Div() | |||||
| self.assign = P.Assign() | |||||
| param_a = np.full((1,), 5, dtype=np.float32) | |||||
| self.param_a = Parameter(Tensor(param_a), name='a') | |||||
| param_b = np.full((1,), 2, dtype=np.float32) | |||||
| self.param_b = Parameter(Tensor(param_b), name='b') | |||||
| def construct(self, x): | |||||
| self.assign(self.param_a, x + self.param_a) | |||||
| y = self.add(x, self.param_b) | |||||
| for _ in range(0, 2): | |||||
| x = self.sub(x, 3) | |||||
| y = x + self.param_b | |||||
| self.param_a = x + y | |||||
| for _ in range(0, 5): | |||||
| y = self.mul(x, self.param_a) | |||||
| z = self.relu(y + self.param_a) | |||||
| return z | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([7], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| for_after_for_net = ForAfterForNet() | |||||
| net = GradNet(for_after_for_net) | |||||
| graph_forward_res = for_after_for_net(x) | |||||
| graph_backward_res = net(x) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| for_after_for_net = ForAfterForNet() | |||||
| net = GradNet(for_after_for_net) | |||||
| pynative_forward_res = for_after_for_net(x) | |||||
| pynative_backward_res = net(x) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||
| @@ -0,0 +1,92 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_for_after_while_in_if(): | |||||
| class ForAfterWhileInIfNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.relu = nn.ReLU() | |||||
| self.softmax = nn.Softmax() | |||||
| self.mul = P.Mul() | |||||
| self.add = P.Add() | |||||
| self.sub = P.Sub() | |||||
| self.div = P.Div() | |||||
| self.assign = P.Assign() | |||||
| param_a = np.full((1,), 5, dtype=np.float32) | |||||
| self.param_a = Parameter(Tensor(param_a), name='a') | |||||
| param_b = np.full((1,), 2, dtype=np.float32) | |||||
| self.param_b = Parameter(Tensor(param_b), name='b') | |||||
| param_c = np.full((1,), 16, dtype=np.float32) | |||||
| self.param_c = Parameter(Tensor(param_c), name='c') | |||||
| def construct(self, x, y): | |||||
| self.assign(self.param_a, x + self.param_a) | |||||
| y = self.add(y, self.param_b) | |||||
| if self.param_b == y - self.param_a: | |||||
| self.param_c = self.div(self.param_c, self.param_b) | |||||
| while self.param_a > x: | |||||
| self.param_c = self.param_a + 2 | |||||
| x = x + 1 | |||||
| y = self.softmax(self.param_c) | |||||
| self.param_b = self.sub(y, self.param_b) | |||||
| x = self.mul(self.param_b, self.param_c) | |||||
| for _ in range(0, 4): | |||||
| x = self.sub(x, 3) | |||||
| y = y + self.param_b | |||||
| self.param_a = x + y | |||||
| z = self.relu(y + self.param_a) | |||||
| return z | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([11], mstype.int32) | |||||
| y = Tensor([7], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| for_after_while_in_if_net = ForAfterWhileInIfNet() | |||||
| net = GradNet(for_after_while_in_if_net) | |||||
| graph_forward_res = for_after_while_in_if_net(x, y) | |||||
| graph_backward_res = net(x, y) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| for_after_while_in_if_net = ForAfterWhileInIfNet() | |||||
| net = GradNet(for_after_while_in_if_net) | |||||
| pynative_forward_res = for_after_while_in_if_net(x, y) | |||||
| pynative_backward_res = net(x, y) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||
| @@ -0,0 +1,93 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_for_after_while_in_for(): | |||||
| class ForAfterWhileInForNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.relu = nn.ReLU() | |||||
| self.softmax = nn.Softmax() | |||||
| self.mul = P.Mul() | |||||
| self.add = P.Add() | |||||
| self.sub = P.Sub() | |||||
| self.div = P.Div() | |||||
| self.assign = P.Assign() | |||||
| param_a = np.full((1,), 5, dtype=np.float32) | |||||
| self.param_a = Parameter(Tensor(param_a), name='a') | |||||
| param_b = np.full((1,), 2, dtype=np.float32) | |||||
| self.param_b = Parameter(Tensor(param_b), name='b') | |||||
| param_c = np.full((1,), 16, dtype=np.float32) | |||||
| self.param_c = Parameter(Tensor(param_c), name='c') | |||||
| def construct(self, x, y): | |||||
| self.assign(self.param_a, x + self.param_a) | |||||
| y = self.add(y, self.param_b) | |||||
| for _ in range(0, 3): | |||||
| self.param_b = self.add(self.param_c, self.param_b) | |||||
| while self.param_c > x: | |||||
| self.param_b = self.param_a + 2 | |||||
| x = x + 1 | |||||
| y = self.softmax(self.param_c) + self.param_a | |||||
| self.param_b = self.sub(y, self.param_b) | |||||
| x = self.mul(self.param_b, self.param_c) | |||||
| for _ in range(0, 4): | |||||
| x = self.mul(x, 3) | |||||
| y = y + self.param_b | |||||
| x = self.relu(self.param_c) | |||||
| self.param_a = x - y | |||||
| z = y + self.param_b | |||||
| return z | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([11], mstype.int32) | |||||
| y = Tensor([7], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| for_after_while_in_for_net = ForAfterWhileInForNet() | |||||
| net = GradNet(for_after_while_in_for_net) | |||||
| graph_forward_res = for_after_while_in_for_net(x, y) | |||||
| graph_backward_res = net(x, y) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| for_after_while_in_for_net = ForAfterWhileInForNet() | |||||
| net = GradNet(for_after_while_in_for_net) | |||||
| pynative_forward_res = for_after_while_in_for_net(x, y) | |||||
| pynative_backward_res = net(x, y) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||
| @@ -0,0 +1,88 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_for_after_for_in_while(): | |||||
| class ForAfterForInWhileNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.relu = nn.ReLU() | |||||
| self.softmax = nn.Softmax() | |||||
| self.mul = P.Mul() | |||||
| self.add = P.Add() | |||||
| self.sub = P.Sub() | |||||
| self.div = P.Div() | |||||
| self.assign = P.Assign() | |||||
| param_a = np.full((1,), 5, dtype=np.float32) | |||||
| self.param_a = Parameter(Tensor(param_a), name='a') | |||||
| param_b = np.full((1,), 2, dtype=np.float32) | |||||
| self.param_b = Parameter(Tensor(param_b), name='b') | |||||
| param_c = np.full((1,), 16, dtype=np.float32) | |||||
| self.param_c = Parameter(Tensor(param_c), name='c') | |||||
| def construct(self, x, y): | |||||
| while self.param_c > x: | |||||
| self.param_b = self.add(self.param_c, self.param_b) | |||||
| for _ in range(0, 20): | |||||
| self.param_b = self.param_a + 2 | |||||
| self.param_c = self.param_c - 1 | |||||
| x = x + 2 | |||||
| y = self.softmax(self.param_c) + self.param_a | |||||
| self.param_b = self.sub(y, self.param_b) | |||||
| x = self.mul(self.param_b, self.param_a) | |||||
| for _ in range(0, 4): | |||||
| x = self.mul(x, 3) | |||||
| y = y + self.param_b | |||||
| x = self.relu(self.param_c) | |||||
| self.param_a = x - y | |||||
| z = y + self.param_b | |||||
| return z | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([11], mstype.int32) | |||||
| y = Tensor([7], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| for_after_for_in_while_net = ForAfterForInWhileNet() | |||||
| net = GradNet(for_after_for_in_while_net) | |||||
| graph_forward_res = for_after_for_in_while_net(x, y) | |||||
| graph_backward_res = net(x, y) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| for_after_for_in_while_net = ForAfterForInWhileNet() | |||||
| net = GradNet(for_after_for_in_while_net) | |||||
| pynative_forward_res = for_after_for_in_while_net(x, y) | |||||
| pynative_backward_res = net(x, y) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||
| @@ -0,0 +1,84 @@ | |||||
| # Copyright 2021 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import numpy as np | |||||
| from mindspore import context | |||||
| from mindspore import Tensor, nn | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import composite as C | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.common import dtype as mstype | |||||
| grad_all = C.GradOperation(get_all=True) | |||||
| context.set_context(device_target="Ascend") | |||||
| def test_for_after_for_in_for(): | |||||
| class ForAfterForInForNet(nn.Cell): | |||||
| def __init__(self): | |||||
| super().__init__() | |||||
| self.relu = nn.ReLU() | |||||
| self.softmax = nn.Softmax() | |||||
| self.mul = P.Mul() | |||||
| self.add = P.Add() | |||||
| self.sub = P.Sub() | |||||
| self.div = P.Div() | |||||
| self.assign = P.Assign() | |||||
| param_a = np.full((1,), 5, dtype=np.float32) | |||||
| self.param_a = Parameter(Tensor(param_a), name='a') | |||||
| param_b = np.full((1,), 2, dtype=np.float32) | |||||
| self.param_b = Parameter(Tensor(param_b), name='b') | |||||
| param_c = np.full((1,), 20, dtype=np.float32) | |||||
| self.param_c = Parameter(Tensor(param_c), name='c') | |||||
| def construct(self, x, y): | |||||
| for _ in range(0, 4): | |||||
| self.param_b = self.add(self.param_c, self.param_b) | |||||
| for _ in range(0, 8): | |||||
| self.param_b = self.param_a + j | |||||
| self.param_c = self.param_a * self.param_b | |||||
| for _ in range(0, 3): | |||||
| y = y + self.param_b | |||||
| x = self.relu(self.param_c * 3) | |||||
| self.param_a = x - y | |||||
| z = y + self.param_b | |||||
| return z | |||||
| class GradNet(nn.Cell): | |||||
| def __init__(self, net): | |||||
| super(GradNet, self).__init__() | |||||
| self.net = net | |||||
| def construct(self, *inputs): | |||||
| return grad_all(self.net)(*inputs) | |||||
| x = Tensor([11], mstype.int32) | |||||
| y = Tensor([7], mstype.int32) | |||||
| # graph mode | |||||
| context.set_context(mode=context.GRAPH_MODE) | |||||
| for_after_for_in_for_net = ForAfterForInForNet() | |||||
| net = GradNet(for_after_for_in_for_net) | |||||
| graph_forward_res = for_after_for_in_for_net(x, y) | |||||
| graph_backward_res = net(x, y) | |||||
| # pynative mode | |||||
| context.set_context(mode=context.PYNATIVE_MODE) | |||||
| for_after_for_in_for_net = ForAfterForInForNet() | |||||
| net = GradNet(for_after_for_in_for_net) | |||||
| pynative_forward_res = for_after_for_in_for_net(x, y) | |||||
| pynative_backward_res = net(x, y) | |||||
| assert graph_forward_res == pynative_forward_res | |||||
| assert graph_backward_res == pynative_backward_res | |||||