|
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198 |
- # Copyright 2019 Huawei Technologies Co., Ltd
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
-
- import numpy as np
-
- import mindspore as ms
- import mindspore.nn as nn
- from mindspore import Parameter, Tensor, context
- from mindspore.common.api import _cell_graph_executor
- from mindspore.ops import composite as C
- from mindspore.ops import operations as P
- from tests.ut.python.ops.test_math_ops import VirtualLoss
-
-
- grad_all = C.GradOperation(get_all=True)
-
-
- class NetWithLoss(nn.Cell):
- def __init__(self, network):
- super(NetWithLoss, self).__init__()
- self.loss = VirtualLoss()
- self.network = network
-
- def construct(self, x, y, b):
- predict = self.network(x, y, b)
- return self.loss(predict)
-
-
- class GradWrap(nn.Cell):
- def __init__(self, network):
- super(GradWrap, self).__init__()
- self.network = network
-
- def construct(self, x, y, b):
- return grad_all(self.network)(x, y, b)
-
-
- def compile_net(net, x, y, b):
- net.set_auto_parallel()
- net.set_train()
- _cell_graph_executor.compile(net, x, y, b)
-
-
- def test_matmul_sub():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-sub net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.sub = P.Sub().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.sub(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_add():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-add net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.add = P.Add().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.add(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_mul():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-mul net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.mul = P.Mul().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.mul(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
- def test_matmul_mod():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-mod net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.mod = P.Mod().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.mod(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
- def test_matmul_floormod():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-floormod net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.floormod = P.FloorMod().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.floormod(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_atan2():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-atan2 net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.atan2 = P.Atan2().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.atan2(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_divNoNan():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-divNoNan net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.divNoNan = P.DivNoNan().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.divNoNan(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_logicaland():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-logical_and net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.equal = P.Equal().shard(strategy2)
- self.notequal = P.NotEqual().shard(strategy2)
- self.logical = P.LogicalAnd().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out1 = self.equal(out, b)
- out = self.matmul(x, y)
- out2 = self.notequal(out, b)
- out = self.logical(out1, out2)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_logicalor():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-logical_or net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.equal = P.Equal().shard(strategy2)
- self.notequal = P.NotEqual().shard(strategy2)
- self.logical = P.LogicalOr().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out1 = self.equal(out, b)
- out = self.matmul(x, y)
- out2 = self.notequal(out, b)
- out = self.logical(out1, out2)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_div():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-div net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.div = P.Div().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.div(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_add_broadcast():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-add broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.add = P.Add().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.add(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (2,))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_add_broadcast2():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-add broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.add = P.Add().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.add(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_sub_broadcast():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-sub broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.sub = P.Sub().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.sub(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (2,))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_sub_broadcast2():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-sub broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.sub = P.Sub().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.sub(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_mul_broadcast():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-mul broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.mul = P.Mul().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.mul(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (2,))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_mul_broadcast2():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-mul broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.mul = P.Mul().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.mul(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_div_broadcast():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-div broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.div = P.Div().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.div(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (2,))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_div_broadcast2():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-div broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.div = P.Div().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.div(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_greater_broadcast():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-greater broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.greater = P.Greater().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.greater(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (2,))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_greater_broadcast2():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-greater broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.greater = P.Greater().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.greater(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_floordiv():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-floordiv net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.floordiv = P.FloorDiv().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.floordiv(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (4, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_floordiv_broadcast():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-floordiv broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.floordiv = P.FloorDiv().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.floordiv(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 2), (2, 2))
- strategy2 = ((4, 2), (2,))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 64]), dtype=ms.float32)
- b = Tensor(np.ones([64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_floordiv_broadcast2():
- """
- Feature: distribute operator sub in auto parallel.
- Description: matmul-floordiv broadcast net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.floordiv = P.FloorDiv().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.floordiv(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_assign_sub():
- """
- Feature: distribute operator sub in auto parallel.
- Description: mul-assign_sub net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self):
- super().__init__()
- self.assign_sub = P.AssignSub()
- self.mul = P.Mul()
- self.mul_weight = Parameter(Tensor(np.full([128, 32],
- 0.5, dtype=np.float32)),
- name="mul_weight")
- self.assignsub_weight = Parameter(Tensor(np.full([128, 32],
- 1.1, dtype=np.float32)),
- name="assignsub_weight")
-
- def construct(self, x):
- out = self.mul(x, self.mul_weight)
- out = self.assign_sub(self.assignsub_weight, out)
- return out
-
- class SubNetWithLoss(nn.Cell):
- def __init__(self, network):
- super(SubNetWithLoss, self).__init__()
- self.loss = VirtualLoss()
- self.network = network
-
- def construct(self, x):
- predict = self.network(x,)
- return self.loss(predict)
-
- class SubGradWrap(nn.Cell):
- def __init__(self, network):
- super(SubGradWrap, self).__init__()
- self.network = network
-
- def construct(self, x):
- return grad_all(self.network)(x)
-
- def compile_sub_net(net, x):
- net.set_auto_parallel()
- net.set_train()
- _cell_graph_executor.compile(net, x)
-
- context.set_auto_parallel_context(device_num=64, global_rank=15)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- net = SubGradWrap(SubNetWithLoss(Net()))
- x = Tensor(np.ones([128, 32]), dtype=ms.float32)
- compile_sub_net(net, x)
-
-
- def test_assign_add():
- """
- Feature: distribute operator sub in auto parallel.
- Description: mul-assign_add net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self):
- super().__init__()
- self.assign_sub = P.AssignAdd()
- self.mul = P.Mul()
- self.mul_weight = Parameter(Tensor(np.full([128, 32],
- 0.5, dtype=np.float32)),
- name="mul_weight")
- self.assignsub_weight = Parameter(Tensor(np.full([128, 32],
- 1.1, dtype=np.float32)),
- name="assignsub_weight")
-
- def construct(self, x):
- out = self.mul(x, self.mul_weight)
- out = self.assign_sub(self.assignsub_weight, out)
- return out
-
- class SubNetWithLoss(nn.Cell):
- def __init__(self, network):
- super(SubNetWithLoss, self).__init__()
- self.loss = VirtualLoss()
- self.network = network
-
- def construct(self, x):
- predict = self.network(x,)
- return self.loss(predict)
-
- class SubGradWrap(nn.Cell):
- def __init__(self, network):
- super(SubGradWrap, self).__init__()
- self.network = network
-
- def construct(self, x):
- return grad_all(self.network)(x)
-
- def compile_sub_net(net, x):
- net.set_auto_parallel()
- net.set_train()
- _cell_graph_executor.compile(net, x)
-
- context.set_auto_parallel_context(device_num=64, global_rank=15)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- net = SubGradWrap(SubNetWithLoss(Net()))
- x = Tensor(np.ones([128, 32]), dtype=ms.float32)
- compile_sub_net(net, x)
-
-
- def test_assign():
- """
- Feature: distribute operator sub in auto parallel.
- Description: mul-assign_sub net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self):
- super().__init__()
- self.assign_sub = P.Assign()
- self.mul = P.Mul()
- self.mul_weight = Parameter(Tensor(np.full([128, 32],
- 0.5, dtype=np.float32)),
- name="mul_weight")
- self.assignsub_weight = Parameter(Tensor(np.full([128, 32],
- 1.1, dtype=np.float32)),
- name="assignsub_weight")
-
- def construct(self, x):
- out = self.mul(x, self.mul_weight)
- out = self.assign_sub(self.assignsub_weight, out)
- return out
-
- class SubNetWithLoss(nn.Cell):
- def __init__(self, network):
- super(SubNetWithLoss, self).__init__()
- self.loss = VirtualLoss()
- self.network = network
-
- def construct(self, x):
- predict = self.network(x,)
- return self.loss(predict)
-
- class SubGradWrap(nn.Cell):
- def __init__(self, network):
- super(SubGradWrap, self).__init__()
- self.network = network
-
- def construct(self, x):
- return grad_all(self.network)(x)
-
- def compile_sub_net(net, x):
- net.set_auto_parallel()
- net.set_train()
- _cell_graph_executor.compile(net, x)
-
- context.set_auto_parallel_context(device_num=64, global_rank=15)
- context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
- net = SubGradWrap(SubNetWithLoss(Net()))
- x = Tensor(np.ones([128, 32]), dtype=ms.float32)
- compile_sub_net(net, x)
-
-
- def test_matmul_bitwise_and_broadcast():
- """
- Feature: distribute operator BitwiseAnd in auto parallel.
- Description: mul-BitwiseAnd net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.bitwise_and = P.BitwiseAnd().shard(strategy1)
- self.matmul = P.MatMul().shard(strategy2)
-
-
- def construct(self, x, y, z):
- out = self.bitwise_and(x, y)
- out = self.matmul(out, z)
-
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 1), (1, 4))
- strategy2 = ((1, 4), (4, 2))
- net = Net(strategy1, strategy2)
-
- x = Tensor(np.ones([64, 1]), dtype=ms.int32)
- y = Tensor(np.ones([1, 64]), dtype=ms.int32)
- z = Tensor(np.ones([64, 32]), dtype=ms.int32)
- compile_net(net, x, y, z)
-
-
- def test_matmul_bitwise_or_broadcast():
- """
- Feature: distribute operator BitwiseOr in auto parallel.
- Description: mul-BitwiseOr net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.bitwise_or = P.BitwiseOr().shard(strategy1)
- self.matmul = P.MatMul().shard(strategy2)
-
- def construct(self, x, y, z):
- out = self.bitwise_or(x, y)
- out = self.matmul(out, z)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 1), (1, 4))
- strategy2 = ((1, 4), (4, 2))
- net = Net(strategy1, strategy2)
-
- x = Tensor(np.ones([64, 1]), dtype=ms.int32)
- y = Tensor(np.ones([1, 64]), dtype=ms.int32)
- z = Tensor(np.ones([64, 32]), dtype=ms.int32)
- compile_net(net, x, y, z)
-
-
- def test_matmul_bitwise_xor_broadcast():
- """
- Feature: distribute operator BitwiseXor in auto parallel.
- Description: mul-BitwiseXor net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.bitwise_xor = P.BitwiseXor().shard(strategy1)
- self.matmul = P.MatMul().shard(strategy2)
-
- def construct(self, x, y, z):
- out = self.bitwise_xor(x, y)
- out = self.matmul(out, z)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 1), (1, 4))
- strategy2 = ((1, 4), (4, 2))
- net = Net(strategy1, strategy2)
-
- x = Tensor(np.ones([64, 1]), dtype=ms.int32)
- y = Tensor(np.ones([1, 64]), dtype=ms.int32)
- z = Tensor(np.ones([64, 32]), dtype=ms.int32)
- compile_net(net, x, y, z)
-
-
- def test_matmul_mul_no_nan_broadcast():
- """
- Feature: distribute operator MulNoNan in auto parallel.
- Description: mul-MulNoNan net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.mul_no_nan = P.MulNoNan().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.mul_no_nan(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_truncate_div_broadcast():
- """
- Feature: distribute operator TruncateDiv in auto parallel.
- Description: mul-TruncateDiv net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.truncate_div = P.TruncateDiv().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.truncate_div(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_truncate_mod_broadcast():
- """
- Feature: distribute operator TruncateMod in auto parallel.
- Description: mul-TruncateMod net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.truncate_mod = P.TruncateMod().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.truncate_mod(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_xdivy_broadcast():
- """
- Feature: distribute operator Xdivy in auto parallel.
- Description: mul-Xdivy net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.xdivy = P.Xdivy().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.xdivy(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_xlogy_broadcast():
- """
- Feature: distribute operator Xlogy in auto parallel.
- Description: mul-Xlogy net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.xlogy = P.Xlogy().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.xlogy(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_squared_difference_broadcast():
- """
- Feature: distribute operator SquaredDifference in auto parallel.
- Description: mul-SquaredDifference net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.squared_difference = P.SquaredDifference().shard(strategy2)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.squared_difference(out, b)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = GradWrap(NetWithLoss(Net(strategy1, strategy2)))
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.float32)
- compile_net(net, x, y, b)
-
-
- def test_matmul_masked_fill_broadcast_with_value_float():
- """
- Feature: distribute operator MaskedFill in auto parallel.
- Description: mul-MaskedFill net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.masked_fill = P.MaskedFill().shard(strategy2)
- self.value = 1.0
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.masked_fill(out, b, self.value)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2))
- net = Net(strategy1, strategy2)
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.bool_)
- compile_net(net, x, y, b)
-
-
- def test_matmul_masked_fill_broadcast_with_value_tensor():
- """
- Feature: distribute operator MaskedFill in auto parallel.
- Description: mul-MaskedFill net with strategy in semi auto parallel.
- Expectation: compile done without error.
- """
- class Net(nn.Cell):
- def __init__(self, strategy1, strategy2):
- super().__init__()
- self.matmul = P.MatMul().shard(strategy1)
- self.masked_fill = P.MaskedFill().shard(strategy2)
- self.value = Tensor(1.0, ms.float32)
-
- def construct(self, x, y, b):
- out = self.matmul(x, y)
- out = self.masked_fill(out, b, self.value)
- return out
-
- context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
- strategy1 = ((2, 4), (4, 1))
- strategy2 = ((4, 1), (1, 2), ())
- net = Net(strategy1, strategy2)
-
- x = Tensor(np.ones([64, 32]), dtype=ms.float32)
- y = Tensor(np.ones([32, 1]), dtype=ms.float32)
- b = Tensor(np.ones([1, 64]), dtype=ms.bool_)
- compile_net(net, x, y, b)
|