|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585 |
- # Copyright 2019 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
-
- import numpy as np
-
- import mindspore as ms
- import mindspore.nn as nn
- from mindspore import Tensor
- from mindspore import context
- from mindspore.common.api import _executor
- from mindspore.ops import composite as C
- from mindspore.ops import operations as P
- from tests.ut.python.ops.test_math_ops import VirtualLoss
-
-
- grad_all = C.GradOperation(get_all=True)
-
-
- class NetWithLossNoBias(nn.Cell):
- def __init__(self, network):
- super(NetWithLossNoBias, self).__init__()
- self.loss = VirtualLoss()
- self.network = network
-
- def construct(self, x, y):
- predict = self.network(x, y)
- return self.loss(predict)
-
-
- class NetWithLoss(nn.Cell):
- def __init__(self, network):
- super(NetWithLoss, self).__init__()
- self.loss = VirtualLoss()
- self.network = network
-
- def construct(self, x, y, b):
- predict = self.network(x, y, b)
- return self.loss(predict)
-
-
- class GradWrapNoBias(nn.Cell):
- def __init__(self, network):
- super(GradWrapNoBias, self).__init__()
- self.network = network
-
- def construct(self, x, y):
- return grad_all(self.network)(x, y)
-
-
- class GradWrap(nn.Cell):
- def __init__(self, network):
- super(GradWrap, self).__init__()
- self.network = network
-
- def construct(self, x, y, b):
- return grad_all(self.network)(x, y, b)
-
-
- def compile_net_no_bias(net, x, y):
- net.set_auto_parallel()
- _executor.compile(net, x, y)
-
-
- def compile_net(net, x, y, b):
- net.set_auto_parallel()
- _executor.compile(net, x, y, b)
-
-
- # model_parallel test
- def test_sum_mul():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_sum = P.ReduceSum(keep_dims=False).shard(strategy2)
- self.mul2 = P.Mul().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul1(x, y)
- out = self.reduce_sum(out, (1,))
- out = self.mul2(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 1, 8), (1, 1, 8))
- strategy2 = ((4, 1, 2),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([128, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_sum_mul2():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_sum = P.ReduceSum(keep_dims=False).shard(strategy2)
- self.mul2 = P.Mul().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul1(x, y)
- out = self.reduce_sum(out, (0, 1))
- out = self.mul2(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 1, 4, 2), (1, 1, 4, 2))
- strategy2 = ((2, 4, 1, 1),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 128, 64, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 128, 64, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_sum_mul3():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_sum = P.ReduceSum(keep_dims=False).shard(strategy2)
- self.mul2 = P.Mul().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul1(x, y)
- out = self.reduce_sum(out, -1)
- out = self.mul2(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 2, 1),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([128, 32]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_sum_mul4():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_sum = P.ReduceSum(keep_dims=True).shard(strategy2)
- self.mul2 = P.Mul().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul1(x, y)
- out = self.reduce_sum(out, -1)
- out = self.mul2(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((2, 2, 2),)
- strategy3 = ((4, 2, 1), (4, 2, 1))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([128, 32, 1]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_sum_mul5():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_sum = P.ReduceSum(keep_dims=True).shard(strategy2)
-
- def construct(self, x, y):
- out = self.mul1(x, y)
- out = self.reduce_sum(out, 0)
- return out
-
- context.set_auto_parallel_context(device_num=64, global_rank=0)
- strategy1 = ((1, 8, 8), (1, 8, 8))
- strategy2 = ((2, 4, 1),)
- net = GradWrapNoBias(NetWithLossNoBias(Net(strategy1, strategy2)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- compile_net_no_bias(net, x, y)
-
-
- def test_sum_mul6():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_sum = P.ReduceSum(keep_dims=True).shard(strategy2)
-
- def construct(self, x, y):
- out = self.mul1(x, y)
- out = self.reduce_sum(out, 1)
- return out
-
- context.set_auto_parallel_context(device_num=64, global_rank=0)
- strategy1 = ((1, 8, 8), (1, 8, 8))
- strategy2 = ((2, 1, 4),)
- net = GradWrapNoBias(NetWithLossNoBias(Net(strategy1, strategy2)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- compile_net_no_bias(net, x, y)
-
-
- def test_sum_mul7():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_sum = P.ReduceSum(keep_dims=True).shard(strategy2)
-
- def construct(self, x, y):
- out = self.mul1(x, y)
- out = self.reduce_sum(out, (0, 1))
- return out
-
- context.set_auto_parallel_context(device_num=64, global_rank=0)
- strategy1 = ((1, 8, 8), (1, 8, 8))
- strategy2 = ((2, 4, 1),)
- net = GradWrapNoBias(NetWithLossNoBias(Net(strategy1, strategy2)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- compile_net_no_bias(net, x, y)
-
-
- def test_max_mul():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_max = P.ReduceMax(keep_dims=False).shard(strategy2)
- self.mul2 = P.Mul().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul1(x, y)
- out = self.reduce_max(out, -1)
- out = self.mul2(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 2),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([128, 32]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_min_mul():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_min = P.ReduceMin(keep_dims=False).shard(strategy2)
- self.mul2 = P.Mul().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul1(x, y)
- out = self.reduce_min(out, 0)
- out = self.mul2(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 2),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([32, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_reduce_mean_mul_float32():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_mean = P.ReduceMean(keep_dims=False).shard(strategy2)
- self.mul2 = P.Mul().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul1(x, y)
- out = self.reduce_mean(out, 0)
- out = self.mul2(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 2),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([32, 64]), dtype=ms.float32)
-
- compile_net(net, x, y, b)
-
-
- class ArgMaxWithValueNet(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.arg_max_with_value = P.ArgMaxWithValue(keep_dims=False, axis=-1).shard(strategy2)
- self.mul2 = P.Mul().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul1(x, y)
- _, out = self.arg_max_with_value(out)
- out = self.mul2(out, b)
- return out
-
-
- class ArgMinWithValueNet(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.arg_min_with_value = P.ArgMinWithValue(keep_dims=False, axis=-1).shard(strategy2)
- self.mul2 = P.Mul().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul1(x, y)
- _, out = self.arg_min_with_value(out)
- out = self.mul2(out, b)
- return out
-
-
- def gen_inputs_and_compile_net(net):
- x = Tensor(np.ones([128, 64, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 64, 64]), dtype=ms.float32)
- b = Tensor(np.ones([128, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def gen_inputs_and_compile_net_no_bias(net):
- x = Tensor(np.ones([128, 64, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 64, 64]), dtype=ms.float32)
- compile_net_no_bias(net, x, y)
-
-
- def tobefixed_test_arg_max_with_value_mul_semi_axis_parallel():
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 2),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(ArgMaxWithValueNet(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- gen_inputs_and_compile_net(net)
-
-
- def test_arg_max_with_value_mul_semi():
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 1),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(ArgMaxWithValueNet(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- gen_inputs_and_compile_net(net)
-
-
- def test_arg_max_with_value_mul_auto():
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = None
- strategy2 = None
- strategy3 = None
- net = GradWrap(NetWithLoss(ArgMaxWithValueNet(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="auto_parallel")
- gen_inputs_and_compile_net(net)
-
-
- def test_arg_min_with_value_mul_semi_axis_parallel():
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 2),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(ArgMinWithValueNet(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- gen_inputs_and_compile_net(net)
-
-
- def test_arg_min_with_value_mul_semi():
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 1),)
- strategy3 = ((2, 4), (2, 4))
- net = GradWrap(NetWithLoss(ArgMinWithValueNet(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- gen_inputs_and_compile_net(net)
-
-
- def test_arg_min_with_value_mul_auto():
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = None
- strategy2 = None
- strategy3 = None
- net = GradWrap(NetWithLoss(ArgMinWithValueNet(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="auto_parallel")
- gen_inputs_and_compile_net(net)
-
-
- class ArgMinWithValueNet2(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.arg_min_with_value = P.ArgMinWithValue(keep_dims=True, axis=-1).shard(strategy2)
- self.relu = P.ReLU().shard(strategy3)
-
- def construct(self, x, y):
- out = self.mul1(x, y)
- _, out = self.arg_min_with_value(out)
- out = self.relu(out)
- return out
-
-
- def tobefixed_test_arg_min_with_value_mul_semi_axis_parallel2():
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 2),)
- strategy3 = ((2, 4, 1),)
- net = GradWrapNoBias(NetWithLossNoBias(ArgMinWithValueNet2(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- gen_inputs_and_compile_net_no_bias(net)
-
-
- def test_arg_min_with_value_mul_semi2():
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 1),)
- strategy3 = ((2, 4, 1),)
- net = GradWrapNoBias(NetWithLossNoBias(ArgMinWithValueNet2(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- gen_inputs_and_compile_net_no_bias(net)
-
-
- def test_arg_min_with_value_mul_auto2():
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = None
- strategy2 = None
- strategy3 = None
- net = GradWrapNoBias(NetWithLossNoBias(ArgMinWithValueNet2(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="auto_parallel")
- gen_inputs_and_compile_net_no_bias(net)
-
-
- def test_cross_batch():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_sum = P.ReduceSum(keep_dims=False).shard(strategy2)
- self.reduce_mean = P.ReduceMean(keep_dims=False).shard(strategy3).add_prim_attr("cross_batch", True)
-
- def construct(self, x, y):
- out = self.mul1(x, y)
- out = self.reduce_sum(out, -1)
- out = self.reduce_mean(out, 0)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((4, 2), (4, 2))
- strategy2 = ((2, 1),)
- strategy3 = ((8,),)
- net = GradWrapNoBias(NetWithLossNoBias(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- compile_net_no_bias(net, x, y)
-
-
- def test_cross_batch2():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul1 = P.Mul().shard(strategy1)
- self.reduce_mean = P.ReduceMean(keep_dims=False).shard(strategy2)
- self.reduce_sum = P.ReduceSum(keep_dims=False).shard(strategy3).add_prim_attr("cross_batch", True)
-
- def construct(self, x, y):
- out = self.mul1(x, y)
- out = self.reduce_mean(out, -1)
- out = self.reduce_sum(out, 0)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((4, 2), (4, 2))
- strategy2 = ((2, 1),)
- strategy3 = ((8,),)
- net = GradWrapNoBias(NetWithLossNoBias(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- compile_net_no_bias(net, x, y)
-
-
- def test_cross_batch_auto():
- class Net(nn.Cell):
- def __init__(self):
- super().__init__()
- self.mul1 = P.Mul()
- self.reduce_mean = P.ReduceMean(keep_dims=False)
- self.reduce_sum = P.ReduceSum(keep_dims=False).add_prim_attr("cross_batch", True)
-
- def construct(self, x, y):
- out = self.mul1(x, y)
- out = self.reduce_mean(out, -1)
- out = self.reduce_sum(out, 0)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- net = GradWrapNoBias(NetWithLossNoBias(Net()))
- context.set_auto_parallel_context(parallel_mode="auto_parallel")
-
- x = Tensor(np.ones([32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- compile_net_no_bias(net, x, y)
-
-
- def test_max_empty_tuple():
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2, strategy3):
- super().__init__()
- self.mul = P.Mul().shard(strategy1)
- self.reduce_max = P.ReduceMax(keep_dims=False).shard(strategy2)
- self.add = P.TensorAdd().shard(strategy3)
-
- def construct(self, x, y, b):
- out = self.mul(x, y)
- out = self.reduce_max(out)
- out = self.add(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- strategy1 = ((1, 4, 2), (1, 4, 2))
- strategy2 = ((4, 1, 2),)
- strategy3 = ((), (1, 1))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2, strategy3)))
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
-
- x = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- y = Tensor(np.ones([128, 32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([128, 32]), dtype=ms.float32)
-
- compile_net(net, x, y, b)
|