| @@ -127,11 +127,6 @@ void CompileGraph::AddInput(const AnfNodePtr &node) { | |||
| MS_LOG(DEBUG) << "Input node is null " << node->DebugString(true); | |||
| (void)Ref(node); | |||
| return; | |||
| } else if (node->isa<ValueNode>()) { | |||
| // Value node maybe reused in different graph or by different nodes,copy the value node to ensure stack correct. | |||
| auto copy_value_node = NewValueNode(node->cast<ValueNodePtr>()->value()); | |||
| (void)Ref(copy_value_node); | |||
| return; | |||
| } | |||
| AddInst(Instruction::kInput, Ref(node)); | |||
| set_height(height_ + 1); | |||
| @@ -393,6 +388,13 @@ int64_t CompileGraph::AddCall(const FuncGraphPtr &graph, const CNodePtr &node) { | |||
| MS_LOG(DEBUG) << "Call:" << Ref(fn) << ", " << height_ << ", " << (size - 1); | |||
| AddInst(Instruction::kCall, Ref(fn)); | |||
| Ret(static_cast<int64_t>(size - 1)); | |||
| for (size_t i = size - 1; i > 0; i--) { | |||
| const auto iter = slots_.find(inputs[i]); | |||
| if (iter != slots_.end() && iter->second >= height_) { | |||
| slots_.erase(inputs[i]); | |||
| } | |||
| } | |||
| return RET_SUCCESS; | |||
| } | |||
| @@ -13,6 +13,7 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import pytest | |||
| from mindspore import context | |||
| from mindspore import Tensor, nn | |||
| from mindspore.common.parameter import Parameter | |||
| @@ -165,7 +166,7 @@ def test_single_for_03(): | |||
| assert graph_forward_res == pynative_forward_res | |||
| assert graph_backward_res == pynative_backward_res | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_single_for_04(): | |||
| class SingleForNet(nn.Cell): | |||
| def __init__(self): | |||
| @@ -12,6 +12,7 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import pytest | |||
| from mindspore import context | |||
| from mindspore import Tensor, nn | |||
| from mindspore.ops import composite as C | |||
| @@ -143,11 +144,13 @@ def test_if_in_if(): | |||
| control_flow_if_in_if(IfInIfNet, x) | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_if_in_if_01(): | |||
| x = Tensor(2, mstype.int32) | |||
| control_flow_if_in_if(IfInIfNet1, x) | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_if_in_if_02(): | |||
| x = Tensor(2, mstype.int32) | |||
| control_flow_if_in_if(IfInIfNet2, x) | |||
| @@ -13,6 +13,7 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import pytest | |||
| from mindspore.common import dtype as mstype | |||
| from mindspore import nn | |||
| from mindspore import Tensor | |||
| @@ -52,6 +53,7 @@ class BackwardNet(nn.Cell): | |||
| return grads | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_forward(): | |||
| x = Tensor(np.array(1), mstype.int32) | |||
| y = Tensor(np.array(3), mstype.int32) | |||
| @@ -66,6 +68,7 @@ def test_forward(): | |||
| assert graph_mode_out == pynative_mode_out | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_backward(): | |||
| x = Tensor(np.array(1), mstype.int32) | |||
| y = Tensor(np.array(3), mstype.int32) | |||
| @@ -13,6 +13,7 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import pytest | |||
| from mindspore import context | |||
| from mindspore import Tensor, nn | |||
| from mindspore.common.parameter import Parameter | |||
| @@ -21,7 +22,7 @@ from mindspore.ops import operations as P | |||
| from mindspore.common import dtype as mstype | |||
| grad_all = C.GradOperation(get_all=True) | |||
| context.set_context(device_target="Ascend") | |||
| context.set_context(device_target="GPU") | |||
| def test_for_in_for_01(): | |||
| @@ -76,7 +77,9 @@ def test_for_in_for_01(): | |||
| assert graph_forward_res == pynative_forward_res | |||
| assert graph_backward_res == pynative_backward_res | |||
| @pytest.mark.level0 | |||
| @pytest.mark.platform_x86_gpu_training | |||
| @pytest.mark.env_onecard | |||
| def test_for_in_for_02(): | |||
| class ForInForNet(nn.Cell): | |||
| def __init__(self): | |||
| @@ -13,6 +13,7 @@ | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import pytest | |||
| from mindspore.common import dtype as mstype | |||
| from mindspore import nn | |||
| from mindspore import Tensor | |||
| @@ -73,6 +74,7 @@ def test_forward(): | |||
| assert graph_mode_out == pynative_mode_out | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_backward(): | |||
| x = Tensor(np.array(1), mstype.int32) | |||
| y = Tensor(np.array(3), mstype.int32) | |||
| @@ -12,6 +12,7 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import pytest | |||
| from mindspore import context | |||
| from mindspore import Tensor, nn | |||
| from mindspore.ops import composite as C | |||
| @@ -19,7 +20,7 @@ from mindspore.common import dtype as mstype | |||
| from mindspore.common.parameter import Parameter | |||
| grad_all = C.GradOperation(get_all=True) | |||
| context.set_context(device_target="Ascend") | |||
| context.set_context(device_target="GPU") | |||
| class IfAfterIfInIfNet(nn.Cell): | |||
| @@ -145,22 +146,27 @@ def control_flow_if_after_if_in_if(input_net, x): | |||
| assert graph_forward_res == pynative_forward_res | |||
| assert graph_backward_res == pynative_backward_res | |||
| @pytest.mark.level0 | |||
| @pytest.mark.platform_x86_gpu_training | |||
| @pytest.mark.env_onecard | |||
| def test_if_after_if_in_if(): | |||
| x = Tensor(2, mstype.int32) | |||
| control_flow_if_after_if_in_if(IfAfterIfInIfNet, x) | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_if_after_if_in_if_01(): | |||
| x = Tensor(2, mstype.int32) | |||
| control_flow_if_after_if_in_if(IfAfterIfInIfNet1, x) | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_if_after_if_in_if_02(): | |||
| x = Tensor(2, mstype.int32) | |||
| control_flow_if_after_if_in_if(IfAfterIfInIfNet2, x) | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_if_after_if_in_if_03(): | |||
| x = Tensor(2, mstype.int32) | |||
| control_flow_if_after_if_in_if(IfAfterIfInIfNet3, x) | |||
| @@ -14,6 +14,7 @@ | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import pytest | |||
| from mindspore.common import dtype as mstype | |||
| from mindspore import nn | |||
| from mindspore import Tensor | |||
| @@ -21,7 +22,7 @@ from mindspore.ops import composite as C | |||
| from mindspore import context | |||
| from mindspore.common.parameter import Parameter | |||
| context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="Ascend") | |||
| context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU") | |||
| class ForwardNet(nn.Cell): | |||
| @@ -73,6 +74,7 @@ def test_forward(): | |||
| assert graph_mode_out == pynative_mode_out | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_backward(): | |||
| x = Tensor(np.array(1), mstype.int32) | |||
| y = Tensor(np.array(3), mstype.int32) | |||
| @@ -122,6 +124,9 @@ class BackwardNetNoAssign(nn.Cell): | |||
| # This test case has a problem of evaluator endless loop. | |||
| @pytest.mark.level0 | |||
| @pytest.mark.platform_x86_gpu_training | |||
| @pytest.mark.env_onecard | |||
| def test_backward_no_assign(): | |||
| x = Tensor(np.array(1), mstype.int32) | |||
| y = Tensor(np.array(3), mstype.int32) | |||
| @@ -14,6 +14,7 @@ | |||
| # ============================================================================ | |||
| import numpy as np | |||
| import pytest | |||
| from mindspore.common import dtype as mstype | |||
| from mindspore import nn | |||
| from mindspore import Tensor | |||
| @@ -21,7 +22,7 @@ from mindspore.ops import composite as C | |||
| from mindspore import context | |||
| from mindspore.common.parameter import Parameter | |||
| context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="Ascend") | |||
| context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target="GPU") | |||
| class ForwardNet(nn.Cell): | |||
| @@ -69,6 +70,7 @@ def test_forward(): | |||
| assert graph_mode_out == pynative_mode_out | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_backward(): | |||
| x = Tensor(np.array(1), mstype.int32) | |||
| y = Tensor(np.array(3), mstype.int32) | |||
| @@ -83,3 +85,52 @@ def test_backward(): | |||
| pynative_backward_net = BackwardNet(pynative_forward_net) | |||
| pynative_mode_grads = pynative_backward_net(x, y) | |||
| assert graph_mode_grads == pynative_mode_grads | |||
| class ForwardNetNoAssign(nn.Cell): | |||
| def __init__(self, max_cycles=10): | |||
| super(ForwardNetNoAssign, self).__init__() | |||
| self.max_cycles = max_cycles | |||
| self.zero = Tensor(np.array(0), mstype.int32) | |||
| self.weight = Parameter(Tensor(np.array(0), mstype.int32)) | |||
| def construct(self, x, y): | |||
| out = self.zero | |||
| for _ in range(0, self.max_cycles): | |||
| while x < y: | |||
| out = x * y + out | |||
| x = x + 1 | |||
| #self.weight = x | |||
| if out > 20: | |||
| self.weight = out | |||
| out = out - 20 | |||
| return out, self.weight | |||
| class BackwardNetNoAssign(nn.Cell): | |||
| def __init__(self, net): | |||
| super(BackwardNetNoAssign, self).__init__(auto_prefix=False) | |||
| self.forward_net = net | |||
| self.grad = C.GradOperation(get_all=True) | |||
| def construct(self, *inputs): | |||
| grads = self.grad(self.forward_net)(*inputs) | |||
| return grads | |||
| @pytest.mark.level0 | |||
| @pytest.mark.platform_x86_gpu_training | |||
| @pytest.mark.env_onecard | |||
| def test_backward_no_assign(): | |||
| x = Tensor(np.array(1), mstype.int32) | |||
| y = Tensor(np.array(3), mstype.int32) | |||
| # Graph Mode | |||
| context.set_context(mode=context.GRAPH_MODE) | |||
| graph_forward_net = ForwardNetNoAssign(max_cycles=3) | |||
| graph_backward_net = BackwardNetNoAssign(graph_forward_net) | |||
| graph_mode_grads = graph_backward_net(x, y) | |||
| # Pynative Mode | |||
| context.set_context(mode=context.PYNATIVE_MODE) | |||
| pynative_forward_net = ForwardNetNoAssign(max_cycles=3) | |||
| pynative_backward_net = BackwardNetNoAssign(pynative_forward_net) | |||
| pynative_mode_grads = pynative_backward_net(x, y) | |||
| assert graph_mode_grads == pynative_mode_grads | |||
| @@ -12,6 +12,7 @@ | |||
| # See the License for the specific language governing permissions and | |||
| # limitations under the License. | |||
| # ============================================================================ | |||
| import pytest | |||
| from mindspore import context | |||
| from mindspore import Tensor, nn | |||
| from mindspore.ops import composite as C | |||
| @@ -21,6 +22,7 @@ from mindspore.common.parameter import Parameter | |||
| grad_all = C.GradOperation(get_all=True) | |||
| context.set_context(device_target="Ascend") | |||
| @pytest.mark.skip(reason="not supported side effect") | |||
| def test_for_after_for_in_if(): | |||
| class ForAfterForInIfNet(nn.Cell): | |||
| def __init__(self): | |||