| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -47,4 +44,4 @@ def test_net(): | |||||
| expect = 3.0 | expect = 3.0 | ||||
| add = Net() | add = Net() | ||||
| output = add(x, y) | output = add(x, y) | ||||
| assert (output == expect) | |||||
| assert output == expect | |||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") | context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") | ||||
| @@ -37,7 +36,7 @@ def test_net_bool(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_int8(): | def test_net_int8(): | ||||
| @@ -45,7 +44,7 @@ def test_net_int8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_uint8(): | def test_net_uint8(): | ||||
| @@ -53,7 +52,7 @@ def test_net_uint8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_int16(): | def test_net_int16(): | ||||
| @@ -61,7 +60,7 @@ def test_net_int16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_uint16(): | def test_net_uint16(): | ||||
| @@ -69,7 +68,7 @@ def test_net_uint16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_int32(): | def test_net_int32(): | ||||
| @@ -77,7 +76,7 @@ def test_net_int32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_uint32(): | def test_net_uint32(): | ||||
| @@ -85,7 +84,7 @@ def test_net_uint32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_int64(): | def test_net_int64(): | ||||
| @@ -93,7 +92,7 @@ def test_net_int64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_uint64(): | def test_net_uint64(): | ||||
| @@ -101,7 +100,7 @@ def test_net_uint64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_float16(): | def test_net_float16(): | ||||
| @@ -109,7 +108,7 @@ def test_net_float16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_float32(): | def test_net_float32(): | ||||
| @@ -117,7 +116,7 @@ def test_net_float32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| def test_net_float64(): | def test_net_float64(): | ||||
| @@ -125,4 +124,4 @@ def test_net_float64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x), -1) | output = net(Tensor(x), -1) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.expand_dims(x, -1))) | |||||
| assert np.all(output.asnumpy() == np.expand_dims(x, -1)) | |||||
| @@ -36,7 +36,7 @@ def test_net_int8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| def test_net_uint8(): | def test_net_uint8(): | ||||
| @@ -44,7 +44,7 @@ def test_net_uint8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| def test_net_int16(): | def test_net_int16(): | ||||
| @@ -52,7 +52,7 @@ def test_net_int16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| def test_net_uint16(): | def test_net_uint16(): | ||||
| @@ -60,7 +60,7 @@ def test_net_uint16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| def test_net_int32(): | def test_net_int32(): | ||||
| @@ -68,7 +68,7 @@ def test_net_int32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| def test_net_uint32(): | def test_net_uint32(): | ||||
| @@ -76,7 +76,7 @@ def test_net_uint32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| def test_net_int64(): | def test_net_int64(): | ||||
| @@ -84,7 +84,7 @@ def test_net_int64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| def test_net_uint64(): | def test_net_uint64(): | ||||
| @@ -92,7 +92,7 @@ def test_net_uint64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| def test_net_float16(): | def test_net_float16(): | ||||
| @@ -100,7 +100,7 @@ def test_net_float16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| def test_net_float32(): | def test_net_float32(): | ||||
| @@ -108,4 +108,4 @@ def test_net_float32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.flatten())) | |||||
| assert np.all(output.asnumpy() == x.flatten()) | |||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") | context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") | ||||
| @@ -37,7 +36,7 @@ def test_net_bool(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_int8(): | def test_net_int8(): | ||||
| @@ -45,7 +44,7 @@ def test_net_int8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_uint8(): | def test_net_uint8(): | ||||
| @@ -53,7 +52,7 @@ def test_net_uint8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_int16(): | def test_net_int16(): | ||||
| @@ -61,7 +60,7 @@ def test_net_int16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_uint16(): | def test_net_uint16(): | ||||
| @@ -69,7 +68,7 @@ def test_net_uint16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_int32(): | def test_net_int32(): | ||||
| @@ -77,7 +76,7 @@ def test_net_int32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_uint32(): | def test_net_uint32(): | ||||
| @@ -85,7 +84,7 @@ def test_net_uint32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_int64(): | def test_net_int64(): | ||||
| @@ -93,7 +92,7 @@ def test_net_int64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_uint64(): | def test_net_uint64(): | ||||
| @@ -101,7 +100,7 @@ def test_net_uint64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_float16(): | def test_net_float16(): | ||||
| @@ -109,7 +108,7 @@ def test_net_float16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_float32(): | def test_net_float32(): | ||||
| @@ -117,7 +116,7 @@ def test_net_float32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| def test_net_float64(): | def test_net_float64(): | ||||
| @@ -125,4 +124,4 @@ def test_net_float64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.isfinite(x))) | |||||
| assert np.all(output.asnumpy() == np.isfinite(x)) | |||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") | context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend") | ||||
| @@ -37,7 +36,7 @@ def test_net_bool(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_int8(): | def test_net_int8(): | ||||
| @@ -45,7 +44,7 @@ def test_net_int8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_uint8(): | def test_net_uint8(): | ||||
| @@ -53,7 +52,7 @@ def test_net_uint8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_int16(): | def test_net_int16(): | ||||
| @@ -61,7 +60,7 @@ def test_net_int16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_uint16(): | def test_net_uint16(): | ||||
| @@ -69,7 +68,7 @@ def test_net_uint16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_int32(): | def test_net_int32(): | ||||
| @@ -77,7 +76,7 @@ def test_net_int32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_uint32(): | def test_net_uint32(): | ||||
| @@ -85,7 +84,7 @@ def test_net_uint32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_int64(): | def test_net_int64(): | ||||
| @@ -93,7 +92,7 @@ def test_net_int64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_uint64(): | def test_net_uint64(): | ||||
| @@ -101,7 +100,7 @@ def test_net_uint64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_float16(): | def test_net_float16(): | ||||
| @@ -109,7 +108,7 @@ def test_net_float16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_float32(): | def test_net_float32(): | ||||
| @@ -117,7 +116,7 @@ def test_net_float32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| def test_net_float64(): | def test_net_float64(): | ||||
| @@ -125,4 +124,4 @@ def test_net_float64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == np.reshape(x, (4, 4)))) | |||||
| assert np.all(output.asnumpy() == np.reshape(x, (4, 4))) | |||||
| @@ -36,7 +36,7 @@ def test_net_bool(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_int8(): | def test_net_int8(): | ||||
| @@ -44,7 +44,7 @@ def test_net_int8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_uint8(): | def test_net_uint8(): | ||||
| @@ -52,7 +52,7 @@ def test_net_uint8(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_int16(): | def test_net_int16(): | ||||
| @@ -60,7 +60,7 @@ def test_net_int16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_uint16(): | def test_net_uint16(): | ||||
| @@ -68,7 +68,7 @@ def test_net_uint16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_int32(): | def test_net_int32(): | ||||
| @@ -76,7 +76,7 @@ def test_net_int32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_uint32(): | def test_net_uint32(): | ||||
| @@ -84,7 +84,7 @@ def test_net_uint32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_int64(): | def test_net_int64(): | ||||
| @@ -92,7 +92,7 @@ def test_net_int64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_uint64(): | def test_net_uint64(): | ||||
| @@ -100,7 +100,7 @@ def test_net_uint64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_float16(): | def test_net_float16(): | ||||
| @@ -108,7 +108,7 @@ def test_net_float16(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_float32(): | def test_net_float32(): | ||||
| @@ -116,7 +116,7 @@ def test_net_float32(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| def test_net_float64(): | def test_net_float64(): | ||||
| @@ -124,4 +124,4 @@ def test_net_float64(): | |||||
| net = Net() | net = Net() | ||||
| output = net(Tensor(x)) | output = net(Tensor(x)) | ||||
| print(output.asnumpy()) | print(output.asnumpy()) | ||||
| assert (np.all(output.asnumpy() == x.squeeze())) | |||||
| assert np.all(output.asnumpy() == x.squeeze()) | |||||
| @@ -12,12 +12,9 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | |||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -34,11 +31,11 @@ class Net(nn.Cell): | |||||
| self.accumulation = Parameter(initializer( | self.accumulation = Parameter(initializer( | ||||
| 'normal', [2, 3, 3, 4]), name='accumulation') | 'normal', [2, 3, 3, 4]), name='accumulation') | ||||
| self.learning_rate = Parameter(initializer( | self.learning_rate = Parameter(initializer( | ||||
| 'normal', [1, ]), name='learning_rate') | |||||
| 'normal', [1,]), name='learning_rate') | |||||
| self.gradient = Parameter(initializer( | self.gradient = Parameter(initializer( | ||||
| 'normal', [2, 3, 3, 4]), name='gradient') | 'normal', [2, 3, 3, 4]), name='gradient') | ||||
| self.momentum = Parameter(initializer( | self.momentum = Parameter(initializer( | ||||
| 'normal', [1, ]), name='momentum') | |||||
| 'normal', [1,]), name='momentum') | |||||
| def construct(self): | def construct(self): | ||||
| return self.apply_momentum(self.variable, self.accumulation, self.learning_rate, self.gradient, self.momentum) | return self.apply_momentum(self.variable, self.accumulation, self.learning_rate, self.gradient, self.momentum) | ||||
| @@ -18,9 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops.operations import _grad_ops as G | from mindspore.ops.operations import _grad_ops as G | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -16,11 +16,7 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | |||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops.operations import _grad_ops as G | from mindspore.ops.operations import _grad_ops as G | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -18,7 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops.operations import _grad_ops as G | from mindspore.ops.operations import _grad_ops as G | ||||
| @@ -33,8 +33,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -18,9 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | |||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -18,9 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops.composite import GradOperation | from mindspore.ops.composite import GradOperation | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -33,8 +30,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -12,11 +12,9 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | |||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -34,8 +34,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -39,8 +39,8 @@ def test_image_gradients(): | |||||
| expected_dx = np.array([[[[1, 0], [1, 0]]]]).astype(np.int32) | expected_dx = np.array([[[[1, 0], [1, 0]]]]).astype(np.int32) | ||||
| net = Net() | net = Net() | ||||
| dy, dx = net(image) | dy, dx = net(image) | ||||
| assert np.any(dx.asnumpy() - expected_dx) == False | |||||
| assert np.any(dy.asnumpy() - expected_dy) == False | |||||
| assert not np.any(dx.asnumpy() - expected_dx) | |||||
| assert not np.any(dy.asnumpy() - expected_dy) | |||||
| def test_image_gradients_multi_channel_depth(): | def test_image_gradients_multi_channel_depth(): | ||||
| @@ -61,5 +61,5 @@ def test_image_gradients_multi_channel_depth(): | |||||
| net = Net() | net = Net() | ||||
| dy, dx = net(image) | dy, dx = net(image) | ||||
| assert np.any(dx.asnumpy() - expected_dx.asnumpy()) == False | |||||
| assert np.any(dy.asnumpy() - expected_dy.asnumpy()) == False | |||||
| assert not np.any(dx.asnumpy() - expected_dx.asnumpy()) | |||||
| assert not np.any(dy.asnumpy() - expected_dy.asnumpy()) | |||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -31,8 +31,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -16,7 +16,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | |||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| @@ -43,7 +42,6 @@ class Net(nn.Cell): | |||||
| def test_net(): | def test_net(): | ||||
| x = np.random.randn(1, 64, 112, 112).astype(np.float32) | |||||
| maxpool = Net() | maxpool = Net() | ||||
| output = maxpool() | output = maxpool() | ||||
| print("***********output output*********") | print("***********output output*********") | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops.composite import GradOperation | from mindspore.ops.composite import GradOperation | ||||
| @@ -33,8 +31,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops.composite import GradOperation | from mindspore.ops.composite import GradOperation | ||||
| @@ -33,8 +31,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops.composite import GradOperation | from mindspore.ops.composite import GradOperation | ||||
| @@ -33,8 +31,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -59,7 +59,7 @@ def test_net(): | |||||
| '''Compare Numpy with MS type is float32''' | '''Compare Numpy with MS type is float32''' | ||||
| labels_shape = (32,) | labels_shape = (32,) | ||||
| logits_shape = [32, 1001] | logits_shape = [32, 1001] | ||||
| labels, logits, loss_np, bp_np = np_sparse_softmax_cross_entropy_with_logits(labels_shape, logits_shape, np.float32) | |||||
| labels, logits, loss_np, _ = np_sparse_softmax_cross_entropy_with_logits(labels_shape, logits_shape, np.float32) | |||||
| expect = loss_np | expect = loss_np | ||||
| SparseSoftmaxCrossEntropyWithLogits = Net() | SparseSoftmaxCrossEntropyWithLogits = Net() | ||||
| loss_me = SparseSoftmaxCrossEntropyWithLogits(Tensor(logits), Tensor(labels)) | loss_me = SparseSoftmaxCrossEntropyWithLogits(Tensor(logits), Tensor(labels)) | ||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -34,7 +34,7 @@ class Adam: | |||||
| self.epsilon = epsilon | self.epsilon = epsilon | ||||
| def train_mindspore_impl(self): | def train_mindspore_impl(self): | ||||
| input = Tensor(np.random.randn(self.batch_num, self.input_channels).astype(np.float32)) | |||||
| input_ = Tensor(np.random.randn(self.batch_num, self.input_channels).astype(np.float32)) | |||||
| weight_np = Tensor(np.random.randn(self.output_channels, self.input_channels).astype(np.float32)) | weight_np = Tensor(np.random.randn(self.output_channels, self.input_channels).astype(np.float32)) | ||||
| bias = Tensor(np.random.randn(self.output_channels).astype(np.float32)) | bias = Tensor(np.random.randn(self.output_channels).astype(np.float32)) | ||||
| @@ -60,9 +60,9 @@ class Adam: | |||||
| train_network.set_train() | train_network.set_train() | ||||
| print('MS Initialized!') | print('MS Initialized!') | ||||
| for i in range(self.epoch): | |||||
| train_network(input, label) | |||||
| output = ms_dense(input) | |||||
| for _ in range(self.epoch): | |||||
| train_network(input_, label) | |||||
| output = ms_dense(input_) | |||||
| print("===============output=================", output) | print("===============output=================", output) | ||||
| return output.asnumpy() | return output.asnumpy() | ||||
| @@ -12,12 +12,9 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | |||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -32,11 +29,11 @@ class Net(nn.Cell): | |||||
| self.accumulation = Parameter(initializer( | self.accumulation = Parameter(initializer( | ||||
| 'normal', [2, 3, 3, 4]), name='accumulation') | 'normal', [2, 3, 3, 4]), name='accumulation') | ||||
| self.learning_rate = Parameter(initializer( | self.learning_rate = Parameter(initializer( | ||||
| 'normal', [1, ]), name='learning_rate') | |||||
| 'normal', [1,]), name='learning_rate') | |||||
| self.gradient = Parameter(initializer( | self.gradient = Parameter(initializer( | ||||
| 'normal', [2, 3, 3, 4]), name='gradient') | 'normal', [2, 3, 3, 4]), name='gradient') | ||||
| self.momentum = Parameter(initializer( | self.momentum = Parameter(initializer( | ||||
| 'normal', [1, ]), name='momentum') | |||||
| 'normal', [1,]), name='momentum') | |||||
| def construct(self): | def construct(self): | ||||
| return self.apply_momentum(self.variable, self.accumulation, self.learning_rate, self.gradient, self.momentum) | return self.apply_momentum(self.variable, self.accumulation, self.learning_rate, self.gradient, self.momentum) | ||||
| @@ -13,7 +13,6 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| from mindspore import context | from mindspore import context | ||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| @@ -38,7 +37,7 @@ def tf_me_batchmatmul(inputa, inputb): | |||||
| net = Net() | net = Net() | ||||
| net.set_train() | net.set_train() | ||||
| model = Model(net) | model = Model(net) | ||||
| out_me = model.predict(Tensor(inputa), Tensor(inputb)) | |||||
| model.predict(Tensor(inputa), Tensor(inputb)) | |||||
| def test_batchmatmul_normal_shape1(): | def test_batchmatmul_normal_shape1(): | ||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -34,8 +34,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -55,4 +55,4 @@ def test_net(): | |||||
| x = np.random.randn(1, 64, 112, 112).astype(np.float32) | x = np.random.randn(1, 64, 112, 112).astype(np.float32) | ||||
| sens = np.random.randn(1, 64, 112, 112).astype(np.float32) | sens = np.random.randn(1, 64, 112, 112).astype(np.float32) | ||||
| net = Grad(Net()) | net = Grad(Net()) | ||||
| output = net(Tensor(x), Tensor(sens)) | |||||
| net(Tensor(x), Tensor(sens)) | |||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -18,9 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops.operations import _grad_ops as G | from mindspore.ops.operations import _grad_ops as G | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -17,13 +17,10 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore import log as logger | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| from mindspore import log as logger | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -12,32 +12,30 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import math | |||||
| import numpy as np | import numpy as np | ||||
| import pytest | import pytest | ||||
| from mindspore import context | from mindspore import context | ||||
| from mindspore import log as logger | from mindspore import log as logger | ||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| from mindspore.nn import GELU, Cell | |||||
| from mindspore.ops import operations as P | |||||
| from mindspore.nn import GELU | |||||
| from mindspore.train.model import Model | from mindspore.train.model import Model | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| def gelu_forward_me_impl(input): | |||||
| def gelu_forward_me_impl(input_): | |||||
| n = GELU() | n = GELU() | ||||
| n.set_train() | n.set_train() | ||||
| m = Model(n) | m = Model(n) | ||||
| out = m.predict(input) | |||||
| out = m.predict(input_) | |||||
| return out.asnumpy() | return out.asnumpy() | ||||
| def gelu_forward_cmp(input_shape, data_type=np.float32): | def gelu_forward_cmp(input_shape, data_type=np.float32): | ||||
| input_np = np.random.randn(*input_shape).astype(data_type) | input_np = np.random.randn(*input_shape).astype(data_type) | ||||
| input_me = Tensor(input_np) | input_me = Tensor(input_np) | ||||
| out_me = gelu_forward_me_impl(input_me) | |||||
| gelu_forward_me_impl(input_me) | |||||
| @pytest.mark.skip(reason="scalar") | @pytest.mark.skip(reason="scalar") | ||||
| @@ -12,11 +12,8 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import math | |||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| import mindspore as ms | |||||
| from mindspore import context | from mindspore import context | ||||
| from mindspore import log as logger | from mindspore import log as logger | ||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| @@ -33,15 +30,15 @@ class Grad(Cell): | |||||
| self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) | self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) | ||||
| self.network = network | self.network = network | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| def gelu_backward_me_impl(input, output_grad): | |||||
| def gelu_backward_me_impl(input_, output_grad): | |||||
| n = GELU() | n = GELU() | ||||
| grad_with_sense = Grad(n) | grad_with_sense = Grad(n) | ||||
| grad_with_sense.set_train() | grad_with_sense.set_train() | ||||
| input_grad = grad_with_sense(input, output_grad) | |||||
| input_grad = grad_with_sense(input_, output_grad) | |||||
| return input_grad.asnumpy() | return input_grad.asnumpy() | ||||
| @@ -86,7 +83,7 @@ def gelu_backward_me_large_in_impl(x1, x2, output_grad): | |||||
| grad_with_sense = GradLargeIn(n) | grad_with_sense = GradLargeIn(n) | ||||
| grad_with_sense.set_train() | grad_with_sense.set_train() | ||||
| input_grad = grad_with_sense(x1, x2, output_grad) | input_grad = grad_with_sense(x1, x2, output_grad) | ||||
| return input_grad[0].asnumpy(), input_grad[1].asnumpy(), | |||||
| return input_grad[0].asnumpy(), input_grad[1].asnumpy() | |||||
| def test_grad_gelu_input_10240_1024(): | def test_grad_gelu_input_10240_1024(): | ||||
| @@ -30,8 +30,8 @@ class Net(Cell): | |||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.layernorm = LayerNorm(input_shape, begin_norm_axis, begin_params_axis, gamma, beta) | self.layernorm = LayerNorm(input_shape, begin_norm_axis, begin_params_axis, gamma, beta) | ||||
| def construct(self, input): | |||||
| x = self.layernorm(input) | |||||
| def construct(self, input_): | |||||
| x = self.layernorm(input_) | |||||
| return x | return x | ||||
| @@ -30,8 +30,8 @@ class Grad(Cell): | |||||
| self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) | self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) | ||||
| self.network = network | self.network = network | ||||
| def construct(self, input, output_grad, ): | |||||
| gout = self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad,): | |||||
| gout = self.grad(self.network)(input_, output_grad) | |||||
| return gout | return gout | ||||
| @@ -40,8 +40,8 @@ class Net(Cell): | |||||
| super(Net, self).__init__() | super(Net, self).__init__() | ||||
| self.layernorm = LayerNorm(input_shape, begin_norm_axis, begin_params_axis, gamma, beta) | self.layernorm = LayerNorm(input_shape, begin_norm_axis, begin_params_axis, gamma, beta) | ||||
| def construct(self, input): | |||||
| x = self.layernorm(input) | |||||
| def construct(self, input_): | |||||
| x = self.layernorm(input_) | |||||
| return x | return x | ||||
| @@ -12,7 +12,6 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| @@ -12,7 +12,6 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| @@ -12,7 +12,6 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -39,9 +39,9 @@ def me_max(inputa, inputb, dtype=ms.float32): | |||||
| net.set_train() | net.set_train() | ||||
| model = Model(net) | model = Model(net) | ||||
| print(type(inputa)) | print(type(inputa)) | ||||
| if isinstance(inputa, np.ndarray) == True: | |||||
| if isinstance(inputa, np.ndarray): | |||||
| inputa = Tensor(inputa) | inputa = Tensor(inputa) | ||||
| if isinstance(inputb, np.ndarray) == True: | |||||
| if isinstance(inputb, np.ndarray): | |||||
| inputb = Tensor(inputb) | inputb = Tensor(inputb) | ||||
| out = model.predict(inputa, inputb) | out = model.predict(inputa, inputb) | ||||
| print(out) | print(out) | ||||
| @@ -46,12 +46,12 @@ class GradWrap(Cell): | |||||
| def gen_data(inputA_np, inputB_np, grad=None): | def gen_data(inputA_np, inputB_np, grad=None): | ||||
| inputA_me = inputA_np | inputA_me = inputA_np | ||||
| if isinstance(inputA_np, np.ndarray) == True: | |||||
| if isinstance(inputA_np, np.ndarray): | |||||
| inputA_me = Tensor(inputA_me) | inputA_me = Tensor(inputA_me) | ||||
| inputB_me = inputB_np | inputB_me = inputB_np | ||||
| if isinstance(inputB_np, np.ndarray) == True: | |||||
| if isinstance(inputB_np, np.ndarray): | |||||
| inputB_me = Tensor(inputB_np) | inputB_me = Tensor(inputB_np) | ||||
| if grad == None: | |||||
| if grad is None: | |||||
| grad = np.random.randn(2).astype(np.float32) | grad = np.random.randn(2).astype(np.float32) | ||||
| print("----inputA---") | print("----inputA---") | ||||
| print(inputA_np) | print(inputA_np) | ||||
| @@ -31,8 +31,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input, output_grad): | |||||
| return self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| return self.grad(self.network)(input_, output_grad) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -18,9 +18,6 @@ import mindspore as ms | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.train.model import Model | from mindspore.train.model import Model | ||||
| @@ -42,9 +39,9 @@ def me_min(inputa, inputb, dtype=ms.float32): | |||||
| net.set_train() | net.set_train() | ||||
| model = Model(net) | model = Model(net) | ||||
| print(type(inputa)) | print(type(inputa)) | ||||
| if isinstance(inputa, np.ndarray) == True: | |||||
| if isinstance(inputa, np.ndarray): | |||||
| inputa = Tensor(inputa) | inputa = Tensor(inputa) | ||||
| if isinstance(inputb, np.ndarray) == True: | |||||
| if isinstance(inputb, np.ndarray): | |||||
| inputb = Tensor(inputb) | inputb = Tensor(inputb) | ||||
| out = model.predict(inputa, inputb) | out = model.predict(inputa, inputb) | ||||
| print(out) | print(out) | ||||
| @@ -15,7 +15,6 @@ | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.nn import Cell | from mindspore.nn import Cell | ||||
| from mindspore.ops import composite as C | from mindspore.ops import composite as C | ||||
| @@ -47,11 +46,11 @@ class GradWrap(Cell): | |||||
| def gen_data(inputA_np, inputB_np, grad=None): | def gen_data(inputA_np, inputB_np, grad=None): | ||||
| inputA_me = inputA_np | inputA_me = inputA_np | ||||
| if isinstance(inputA_np, np.ndarray) == True: | |||||
| if isinstance(inputA_np, np.ndarray): | |||||
| inputA_me = Tensor(inputA_me) | inputA_me = Tensor(inputA_me) | ||||
| inputB_me = inputB_np | inputB_me = inputB_np | ||||
| if isinstance(inputB_np, np.ndarray) == True: | |||||
| if isinstance(inputB_np, np.ndarray): | |||||
| inputB_me = Tensor(inputB_np) | inputB_me = Tensor(inputB_np) | ||||
| if grad is None: | if grad is None: | ||||
| @@ -12,11 +12,9 @@ | |||||
| # See the License for the specific language governing permissions and | # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | |||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| @@ -16,11 +16,7 @@ import numpy as np | |||||
| import mindspore as ms | import mindspore as ms | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.nn import Cell | from mindspore.nn import Cell | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.train.model import Model | from mindspore.train.model import Model | ||||
| @@ -33,31 +29,29 @@ class PowMe(Cell): | |||||
| super(PowMe, self).__init__() | super(PowMe, self).__init__() | ||||
| self.pow = P.Pow() | self.pow = P.Pow() | ||||
| def construct(self, input, exp): | |||||
| return self.pow(input, exp) | |||||
| def construct(self, input_, exp): | |||||
| return self.pow(input_, exp) | |||||
| def pow_forward_me_impl(input, exp): | |||||
| def pow_forward_me_impl(input_, exp): | |||||
| n = PowMe() | n = PowMe() | ||||
| n.set_train() | n.set_train() | ||||
| m = Model(n) | m = Model(n) | ||||
| out = m.predict(input, exp) | |||||
| out = m.predict(input_, exp) | |||||
| return out.asnumpy() | return out.asnumpy() | ||||
| def pow_forward_cmp(input_shape, exp_shape): | def pow_forward_cmp(input_shape, exp_shape): | ||||
| if len(input_shape) == 0: | |||||
| if not input_shape: | |||||
| input_np = np.absolute(np.random.randn()) | input_np = np.absolute(np.random.randn()) | ||||
| else: | else: | ||||
| input_np = np.absolute(np.random.randn(*input_shape).astype(np.float32)) | input_np = np.absolute(np.random.randn(*input_shape).astype(np.float32)) | ||||
| input_tf = input_np | |||||
| input_me = Tensor(input_np, dtype=ms.float32) | input_me = Tensor(input_np, dtype=ms.float32) | ||||
| if len(exp_shape) == 0: | |||||
| if not exp_shape: | |||||
| exp_np = np.absolute(np.random.randn()) | exp_np = np.absolute(np.random.randn()) | ||||
| else: | else: | ||||
| exp_np = np.absolute(np.random.randn(*exp_shape).astype(np.float32)) | exp_np = np.absolute(np.random.randn(*exp_shape).astype(np.float32)) | ||||
| exp_tf = exp_np | |||||
| exp_me = Tensor(exp_np, dtype=ms.float32) | exp_me = Tensor(exp_np, dtype=ms.float32) | ||||
| out_me = pow_forward_me_impl(input_me, exp_me) | out_me = pow_forward_me_impl(input_me, exp_me) | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops.composite import GradOperation | from mindspore.ops.composite import GradOperation | ||||
| @@ -18,8 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops.composite import GradOperation | from mindspore.ops.composite import GradOperation | ||||
| @@ -33,8 +31,8 @@ class Grad(nn.Cell): | |||||
| self.network = network | self.network = network | ||||
| @ms_function | @ms_function | ||||
| def construct(self, input): | |||||
| return self.grad(self.network)(input) | |||||
| def construct(self, input_): | |||||
| return self.grad(self.network)(input_) | |||||
| class Net(nn.Cell): | class Net(nn.Cell): | ||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -16,11 +16,7 @@ import numpy as np | |||||
| import mindspore as ms | import mindspore as ms | ||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.nn import Cell | from mindspore.nn import Cell | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.train.model import Model | from mindspore.train.model import Model | ||||
| @@ -41,11 +37,11 @@ def me_select(cond, inputa, inputb, dtype=ms.float32): | |||||
| net = Select(dtype) | net = Select(dtype) | ||||
| net.set_train() | net.set_train() | ||||
| model = Model(net) | model = Model(net) | ||||
| if isinstance(inputa, np.ndarray) == True: | |||||
| if isinstance(inputa, np.ndarray): | |||||
| inputa = Tensor(inputa) | inputa = Tensor(inputa) | ||||
| if isinstance(inputb, np.ndarray) == True: | |||||
| if isinstance(inputb, np.ndarray): | |||||
| inputb = Tensor(inputb) | inputb = Tensor(inputb) | ||||
| if isinstance(cond, np.bool_) == True: | |||||
| if isinstance(cond, np.bool_): | |||||
| cond = np.array(cond) | cond = np.array(cond) | ||||
| out = model.predict(Tensor(cond), inputa, inputb) | out = model.predict(Tensor(cond), inputa, inputb) | ||||
| @@ -18,7 +18,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.ops import operations as P | |||||
| context.set_context(device_target="Ascend") | context.set_context(device_target="Ascend") | ||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -37,5 +37,5 @@ def test_net(): | |||||
| features = np.random.randn(32, 1001).astype(np.float16) | features = np.random.randn(32, 1001).astype(np.float16) | ||||
| labels = np.random.randn(32, 1001).astype(np.float16) | labels = np.random.randn(32, 1001).astype(np.float16) | ||||
| SoftmaxCrossEntropyWithLogits = Net() | SoftmaxCrossEntropyWithLogits = Net() | ||||
| output = SoftmaxCrossEntropyWithLogits(Tensor(features), Tensor(labels)) | |||||
| SoftmaxCrossEntropyWithLogits(Tensor(features), Tensor(labels)) | |||||
| # print(output.asnumpy()) | # print(output.asnumpy()) | ||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -13,7 +13,6 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.ops.operations as P | import mindspore.ops.operations as P | ||||
| @@ -32,8 +31,8 @@ class Net(Cell): | |||||
| self.end = end | self.end = end | ||||
| self.stride = stride | self.stride = stride | ||||
| def construct(self, input): | |||||
| x = self.stridedslice(input, self.begin, self.end, self.stride) | |||||
| def construct(self, input_): | |||||
| x = self.stridedslice(input_, self.begin, self.end, self.stride) | |||||
| return x | return x | ||||
| @@ -47,17 +46,17 @@ def me_stridedslice(input1, begin, end, stride): | |||||
| def test_stridedslice_input_2d(): | def test_stridedslice_input_2d(): | ||||
| input = np.random.randn(5, 5).astype(np.int32) | |||||
| input_ = np.random.randn(5, 5).astype(np.int32) | |||||
| begin = (0, 0) | begin = (0, 0) | ||||
| end = (2, 2) | end = (2, 2) | ||||
| stride = (1, 1) | stride = (1, 1) | ||||
| me_stridedslice(input, begin, end, stride) | |||||
| me_stridedslice(input_, begin, end, stride) | |||||
| def test_stridedslice_input_3d(): | def test_stridedslice_input_3d(): | ||||
| input = np.random.randn(5, 5, 5).astype(np.float32) | |||||
| input_ = np.random.randn(5, 5, 5).astype(np.float32) | |||||
| begin = (0, 0, 0) | begin = (0, 0, 0) | ||||
| end = (3, 3, 3) | end = (3, 3, 3) | ||||
| stride = (1, 1, 1) | stride = (1, 1, 1) | ||||
| me_stridedslice(input, begin, end, stride) | |||||
| me_stridedslice(input_, begin, end, stride) | |||||
| @@ -13,7 +13,6 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| import numpy as np | import numpy as np | ||||
| import pytest | |||||
| from mindspore import context | from mindspore import context | ||||
| from mindspore.common.tensor import Tensor | from mindspore.common.tensor import Tensor | ||||
| @@ -30,8 +29,8 @@ class Grad(Cell): | |||||
| self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) | self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) | ||||
| self.network = network | self.network = network | ||||
| def construct(self, input, output_grad): | |||||
| gout = self.grad(self.network)(input, output_grad) | |||||
| def construct(self, input_, output_grad): | |||||
| gout = self.grad(self.network)(input_, output_grad) | |||||
| return gout | return gout | ||||
| @@ -43,13 +42,13 @@ class Net(Cell): | |||||
| self.end = end | self.end = end | ||||
| self.stride = stride | self.stride = stride | ||||
| def construct(self, input): | |||||
| x = self.stridedslice(input, self.begin, self.end, self.stride) | |||||
| def construct(self, input_): | |||||
| x = self.stridedslice(input_, self.begin, self.end, self.stride) | |||||
| return x | return x | ||||
| def me_stridedslice(input, begin, end, stride, gradients): | |||||
| input_me = Tensor(input) | |||||
| def me_stridedslice(input_, begin, end, stride, gradients): | |||||
| input_me = Tensor(input_) | |||||
| out_grad_me = Tensor(gradients) | out_grad_me = Tensor(gradients) | ||||
| net_me = Grad(Net(begin, end, stride)) | net_me = Grad(Net(begin, end, stride)) | ||||
| net_me.set_train() | net_me.set_train() | ||||
| @@ -58,9 +57,9 @@ def me_stridedslice(input, begin, end, stride, gradients): | |||||
| def test_grad_stridedslice_1d(): | def test_grad_stridedslice_1d(): | ||||
| input = np.random.randn(2).astype(np.float32) | |||||
| input_ = np.random.randn(2).astype(np.float32) | |||||
| begin = (0,) | begin = (0,) | ||||
| end = (2,) | end = (2,) | ||||
| stride = (1,) | stride = (1,) | ||||
| gradients = np.random.randn(2).astype(np.float32) | gradients = np.random.randn(2).astype(np.float32) | ||||
| me_stridedslice(input, begin, end, stride, gradients) | |||||
| me_stridedslice(input_, begin, end, stride, gradients) | |||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -17,7 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops.operations import _grad_ops as G | from mindspore.ops.operations import _grad_ops as G | ||||
| from mindspore.train.model import Model | from mindspore.train.model import Model | ||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -17,9 +17,6 @@ import numpy as np | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | |||||
| from mindspore.common.initializer import initializer | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") | ||||
| @@ -29,14 +29,14 @@ class Net(nn.Cell): | |||||
| self.transpose = P.Transpose() | self.transpose = P.Transpose() | ||||
| self.perm = perm_in | self.perm = perm_in | ||||
| def construct(self, input): | |||||
| x = self.transpose(input, self.perm) | |||||
| def construct(self, input_): | |||||
| x = self.transpose(input_, self.perm) | |||||
| return x | return x | ||||
| def ms_transpose(input, perm_in): | |||||
| def ms_transpose(input_, perm_in): | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| input_me = Tensor(input) | |||||
| input_me = Tensor(input_) | |||||
| net = Net(perm_in) | net = Net(perm_in) | ||||
| net.set_train() | net.set_train() | ||||
| model = Model(net) | model = Model(net) | ||||
| @@ -47,6 +47,6 @@ def ms_transpose(input, perm_in): | |||||
| def test_net(): | def test_net(): | ||||
| input = np.random.randn(8, 24, 1, 1).astype(np.float16) | |||||
| input_ = np.random.randn(8, 24, 1, 1).astype(np.float16) | |||||
| perm = (0, 2, 3, 1) | perm = (0, 2, 3, 1) | ||||
| ms_transpose(input, perm) | |||||
| ms_transpose(input_, perm) | |||||
| @@ -33,19 +33,19 @@ class Net(nn.Cell): | |||||
| return self.seg_sum(x, segment_ids, self.num_segments) | return self.seg_sum(x, segment_ids, self.num_segments) | ||||
| def me_un_seg_sum(input, indices, num_segments): | |||||
| def me_un_seg_sum(input_, indices, num_segments): | |||||
| context.set_context(mode=context.GRAPH_MODE) | context.set_context(mode=context.GRAPH_MODE) | ||||
| net = Net(num_segments) | net = Net(num_segments) | ||||
| net.set_train() | net.set_train() | ||||
| model = Model(net) | model = Model(net) | ||||
| out = model.predict(Tensor(input), Tensor(indices)) | |||||
| out = model.predict(Tensor(input_), Tensor(indices)) | |||||
| return out.asnumpy() | return out.asnumpy() | ||||
| def comapre_un_seg_sum(shape, indices, num_segments, dtype): | def comapre_un_seg_sum(shape, indices, num_segments, dtype): | ||||
| input = np.random.randn(*shape).astype(dtype) | |||||
| input_ = np.random.randn(*shape).astype(dtype) | |||||
| indices_me = np.array(indices).astype(np.int32) | indices_me = np.array(indices).astype(np.int32) | ||||
| out_me = me_un_seg_sum(input, indices_me, num_segments) | |||||
| out_me = me_un_seg_sum(input_, indices_me, num_segments) | |||||
| print("-------------ms------------------") | print("-------------ms------------------") | ||||
| print(out_me) | print(out_me) | ||||
| @@ -87,8 +87,8 @@ if __name__ == '__main__': | |||||
| super(dataiter, self).__init__() | super(dataiter, self).__init__() | ||||
| def construct(self): | def construct(self): | ||||
| input, label = get_next() | |||||
| return tadd(input) | |||||
| input_, _ = get_next() | |||||
| return tadd(input_) | |||||
| net = dataiter() | net = dataiter() | ||||
| @@ -18,7 +18,6 @@ import pytest | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops.operations import _grad_ops as G | from mindspore.ops.operations import _grad_ops as G | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | ||||
| @@ -21,7 +21,6 @@ import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops.operations import _grad_ops as G | from mindspore.ops.operations import _grad_ops as G | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | ||||
| @@ -57,7 +57,7 @@ def test_momentum(): | |||||
| train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer | train_network = TrainOneStepCell(net_with_criterion, optimizer) # optimizer | ||||
| train_network.set_train() | train_network.set_train() | ||||
| losses = [] | losses = [] | ||||
| for i in range(epoch): | |||||
| for _ in range(epoch): | |||||
| data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01) | data = Tensor(np.arange(0, 16).reshape(1, 1, 4, 4).astype(np.float32) * 0.01) | ||||
| label = Tensor(np.array([0]).astype(np.int32)) | label = Tensor(np.array([0]).astype(np.int32)) | ||||
| loss = train_network(data, label) | loss = train_network(data, label) | ||||
| @@ -70,6 +70,5 @@ def test_momentum(): | |||||
| [[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167 | [[0.04132498 0.00874167 0.00874167 0.00874167 0.00874167 | ||||
| 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]] | 0.00874167 0.00874167 0.00874167 0.00874167 0.00874167]] | ||||
| """ | """ | ||||
| error = np.ones(shape=[1, 10]) * 1.0e-6 | |||||
| return losses | return losses | ||||
| @@ -20,7 +20,6 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.api import ms_function | from mindspore.common.api import ms_function | ||||
| from mindspore.ops import operations as P | |||||
| context.set_context(device_target='CPU') | context.set_context(device_target='CPU') | ||||
| @@ -21,7 +21,6 @@ import mindspore.nn as nn | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common.initializer import initializer | from mindspore.common.initializer import initializer | ||||
| from mindspore.common.parameter import Parameter | from mindspore.common.parameter import Parameter | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops.operations import _grad_ops as G | from mindspore.ops.operations import _grad_ops as G | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | context.set_context(mode=context.GRAPH_MODE, device_target='CPU') | ||||
| @@ -48,7 +47,7 @@ class NetReluGrad(nn.Cell): | |||||
| def test_relu_grad(): | def test_relu_grad(): | ||||
| relu_grad = NetReluGrad() | relu_grad = NetReluGrad() | ||||
| output = relu_grad() | output = relu_grad() | ||||
| expect = np.array([[[[0, 0, 1, ], [0, 0, 0, ], [1, 1, 0.]]]]).astype(np.float32) | |||||
| expect = np.array([[[[0, 0, 1,], [0, 0, 0,], [1, 1, 0.]]]]).astype(np.float32) | |||||
| error = np.ones(shape=[3, 3]) * 1.0e-6 | error = np.ones(shape=[3, 3]) * 1.0e-6 | ||||
| diff = output.asnumpy() - expect | diff = output.asnumpy() - expect | ||||
| assert np.all(diff < error) | assert np.all(diff < error) | ||||
| @@ -44,8 +44,8 @@ class NetRelu(nn.Cell): | |||||
| def test_relu(): | def test_relu(): | ||||
| relu = NetRelu() | relu = NetRelu() | ||||
| output = relu() | output = relu() | ||||
| expect = np.array([[[[0, 1, 10, ], | |||||
| [1, 0, 1, ], | |||||
| expect = np.array([[[[0, 1, 10,], | |||||
| [1, 0, 1,], | |||||
| [10, 1, 0.]]]]).astype(np.float32) | [10, 1, 0.]]]]).astype(np.float32) | ||||
| print(output) | print(output) | ||||
| assert (output.asnumpy() == expect).all() | assert (output.asnumpy() == expect).all() | ||||
| @@ -0,0 +1,519 @@ | |||||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||||
| # | |||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| # you may not use this file except in compliance with the License. | |||||
| # You may obtain a copy of the License at | |||||
| # | |||||
| # http://www.apache.org/licenses/LICENSE-2.0 | |||||
| # | |||||
| # Unless required by applicable law or agreed to in writing, software | |||||
| # distributed under the License is distributed on an "AS IS" BASIS, | |||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| # See the License for the specific language governing permissions and | |||||
| # limitations under the License. | |||||
| # ============================================================================ | |||||
| import te.lang.cce | |||||
| from te import tvm | |||||
| from te.platform import CUBE_MKN | |||||
| from topi import generic | |||||
| from topi.cce import util | |||||
| from topi.cce.util import is_v200_version | |||||
| # pylint: disable=R0912,R0913,R0914,R0915,E1101 | |||||
| # the dim of shape in conv must be 4 | |||||
| PAD_SHAPE_DIM = 2 | |||||
| NONETYPE = type(None) | |||||
| @util.check_input_type((list, tuple), (list, tuple), str, str, str, (list, int), (list, int), | |||||
| int, int, (list, tuple), (list, tuple), | |||||
| str, str, str, | |||||
| str, str, str, | |||||
| str, bool, str) | |||||
| def conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, | |||||
| strideh, stridew, quantize_config, scale_sqrt, | |||||
| scale_q_dtype, offset_q_dtype, scale_dq_dtype, | |||||
| scale_rq_dtype, offset_rq_dtype, offset_w_dtype, | |||||
| offset_pad_dtype, bias, kernel_name): | |||||
| # conv shape check | |||||
| util.check_kernel_name(kernel_name) | |||||
| # conv data type check | |||||
| util.check_dtype_rule(in_dtype, ['float16', 'int8', 'uint8']) | |||||
| util.check_dtype_rule(w_dtype, ['float16', 'int8', 'uint8']) | |||||
| res_dtype_list = ['float16', 'int8', 'uint8'] | |||||
| if is_v200_version(): | |||||
| res_dtype_list.append('int32') | |||||
| util.check_dtype_rule(res_dtype, res_dtype_list) | |||||
| util.check_dtype_rule(scale_q_dtype, ['float16']) | |||||
| util.check_dtype_rule(offset_q_dtype, ['float16']) | |||||
| util.check_dtype_rule(scale_dq_dtype, ['float16']) | |||||
| util.check_dtype_rule(scale_rq_dtype, ['float16']) | |||||
| util.check_dtype_rule(offset_rq_dtype, ['float16']) | |||||
| util.check_dtype_rule(offset_w_dtype, ['int32']) | |||||
| util.check_dtype_rule(offset_pad_dtype, ['uint8']) | |||||
| if not isinstance(bias, bool): | |||||
| raise RuntimeError("bias dtype should be bool.") | |||||
| if quantize_config[0] == 0: | |||||
| if is_v200_version(): | |||||
| util.check_dtype_rule(in_dtype, ('int8',)) | |||||
| util.check_dtype_rule(w_dtype, ('int8',)) | |||||
| util.check_dtype_rule(res_dtype, ('int32',)) | |||||
| else: | |||||
| util.check_dtype_rule(in_dtype, ['float16']) | |||||
| util.check_dtype_rule(w_dtype, ['float16']) | |||||
| util.check_dtype_rule(res_dtype, ['float16']) | |||||
| if quantize_config[0] == 1: | |||||
| util.check_dtype_rule(w_dtype, ['int8']) | |||||
| if quantize_config[1] == 0: | |||||
| util.check_dtype_rule(in_dtype, ['int8', 'float16']) | |||||
| util.check_dtype_rule(res_dtype, ['int8', 'float16']) | |||||
| elif quantize_config[1] == 1: | |||||
| util.check_dtype_rule(in_dtype, ['uint8', 'float16']) | |||||
| util.check_dtype_rule(res_dtype, ['uint8', 'float16']) | |||||
| elif quantize_config[1] == 2: | |||||
| raise RuntimeError("All Offset mode quantize not support.") | |||||
| else: | |||||
| raise RuntimeError("Invalid quantize algorithm.") | |||||
| # quantize switch on | |||||
| if quantize_config[0] == 1: | |||||
| # quantize -> DeQuantize dataflow | |||||
| if in_dtype == 'float16' and w_dtype == 'int8' and res_dtype == 'float16': | |||||
| pass | |||||
| # DeQuantize dataflow | |||||
| elif (in_dtype in ['int8', 'uint8'] and w_dtype == 'int8' and | |||||
| res_dtype == 'float16'): | |||||
| pass | |||||
| # quantize -> ReQuantize dataflow | |||||
| elif (in_dtype == 'float16' and w_dtype == 'int8' and res_dtype in | |||||
| ['int8', 'uint8']): | |||||
| pass | |||||
| # ReQuantize dataflow | |||||
| elif (in_dtype in ['int8', 'uint8'] and w_dtype == 'int8' and res_dtype in | |||||
| ['int8', 'uint8']): | |||||
| pass | |||||
| else: | |||||
| raise RuntimeError("Not support in/out data type for quantize.") | |||||
| if quantize_config not in ([1, 0, 0], [1, 1, 0], [1, 0, 1], [1, 1, 1]): | |||||
| raise RuntimeError("Invalid Quantize Config.") | |||||
| if scale_sqrt not in ([0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], | |||||
| [1, 0, 1], [0, 1, 1], [1, 1, 1]): | |||||
| raise RuntimeError("Invalid Quantize Config.") | |||||
| # quantize switch off | |||||
| elif quantize_config[0] == 0: | |||||
| if quantize_config != [0, 0, 0]: | |||||
| raise RuntimeError("Invalid Quantize Config.") | |||||
| if scale_sqrt != [0, 0, 0]: | |||||
| raise RuntimeError("Invalid Quantize Config.") | |||||
| else: | |||||
| raise RuntimeError("Invalid Quantize Config.") | |||||
| if isinstance(padh, list): | |||||
| if len(padh) != PAD_SHAPE_DIM: | |||||
| raise RuntimeError("Dimension must be %d when padh is a list." % PAD_SHAPE_DIM) | |||||
| pad_top = padh[0] | |||||
| pad_bottom = padh[1] | |||||
| else: | |||||
| pad_top = padh | |||||
| pad_bottom = padh | |||||
| if isinstance(padw, list): | |||||
| if len(padw) != PAD_SHAPE_DIM: | |||||
| raise RuntimeError("Dimension must be %d when padw is a list." % PAD_SHAPE_DIM) | |||||
| pad_left = padw[0] | |||||
| pad_right = padw[1] | |||||
| else: | |||||
| pad_left = padw | |||||
| pad_right = padw | |||||
| shape_in, shape_w = te.lang.cce.check_conv_shape(shape_in, shape_w, pad_top, pad_bottom, \ | |||||
| pad_left, pad_right, strideh, \ | |||||
| stridew, in_dtype, w_dtype, res_dtype) | |||||
| return shape_in, shape_w | |||||
| @util.check_input_type((list, tuple), (list, tuple), str, str, str, \ | |||||
| (list, int), (list, int), int, int, | |||||
| (list, NONETYPE), (list, NONETYPE), | |||||
| str, str, str, | |||||
| str, str, str, str, | |||||
| bool, str, bool, bool) | |||||
| def conv_layer_cce(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh, stridew, | |||||
| quantize_config=None, scale_sqrt=None, | |||||
| scale_q_dtype='float16', offset_q_dtype='float16', scale_dq_dtype='float16', | |||||
| scale_rq_dtype='float16', offset_rq_dtype='float16', offset_w_dtype='int32', | |||||
| offset_pad_dtype='uint8', bias=False, kernel_name="cce_conv", need_build=False, | |||||
| need_print=False): | |||||
| """ | |||||
| Parameters | |||||
| ---------- | |||||
| shape_in : shape of data_in | |||||
| shape_w : shape of filter | |||||
| in_dtype : the feature map data type | |||||
| w_dtype : the weight data type | |||||
| res_dtype : the result data type | |||||
| padh: the padding shape in H | |||||
| padw: the padding shape in weight | |||||
| strideh: the stride value in H | |||||
| stridew: the stride value in weight | |||||
| quantize_config: quantize config table, default [0, 0, 0] | |||||
| quantize_config[0] - quantize function switch | |||||
| 0: quantize off | |||||
| 1: quantize on | |||||
| quantize_config[1] - quantize_algorithm | |||||
| 0: non offset | |||||
| 1: half offset | |||||
| 2: all offset ( Not supported now ) | |||||
| quantize_config[2] - QuantizeScaleType (for Dequantize/Requantize, quantize always scalar) | |||||
| 0: scalar | |||||
| 1: vector | |||||
| scale_sqrt: scale mode | |||||
| scale_sqrt[0] - Quantize scale mode | |||||
| 0: non sqrt | |||||
| 1: sqrt | |||||
| scale_sqrt[1] - DeQuantize scale mode | |||||
| 0: non sqrt | |||||
| 1: sqrt | |||||
| scale_sqrt[2] - ReQuantize scale mode | |||||
| 0: non sqrt | |||||
| 1: sqrt | |||||
| scale_q_dtype: Quantize scale data type, default 'float16' | |||||
| offset_q_dtype: Quantize offset data type, default 'float16' | |||||
| scale_dq_dtype: DeQuantize scale data type, default 'float16' | |||||
| scale_rq_dtype: ReQuantize scale data type, default 'float16' | |||||
| offset_rq_dtype: ReQuantize offset data type, default 'float16' | |||||
| offset_w_dtype: weight offset data type, default 'int32' | |||||
| offset_pad_dtype: Quantize Cube offset data type, default 'uint8' | |||||
| bias: the tag for bias or not | |||||
| kernel_name : cce kernel name, default value is "cce_conv" | |||||
| need_build : if need to build CCEC kernel, default value is False | |||||
| need_print : if need to print the ir, default value is False | |||||
| Returns | |||||
| ------- | |||||
| wrapped_tensor | |||||
| """ | |||||
| # for pylint, otherwise "Dangerous default value [] as argument" | |||||
| if quantize_config is None: | |||||
| quantize_config = [0, 0, 0] | |||||
| if scale_sqrt is None: | |||||
| scale_sqrt = [0, 0, 0] | |||||
| in_dtype = in_dtype.lower() | |||||
| w_dtype = w_dtype.lower() | |||||
| res_dtype = res_dtype.lower() | |||||
| scale_q_dtype = scale_q_dtype.lower() | |||||
| offset_q_dtype = offset_q_dtype.lower() | |||||
| scale_dq_dtype = scale_dq_dtype.lower() | |||||
| scale_rq_dtype = scale_rq_dtype.lower() | |||||
| offset_rq_dtype = offset_rq_dtype.lower() | |||||
| offset_w_dtype = offset_w_dtype.lower() | |||||
| offset_pad_dtype = offset_pad_dtype.lower() | |||||
| mad_dtype = 'float32' | |||||
| if w_dtype == 'int8': | |||||
| mad_dtype = 'int32' | |||||
| shape_in = list(shape_in) | |||||
| shape_w = list(shape_w) | |||||
| shape_in, shape_w = conv_layer_cce_para_check(shape_in, shape_w, in_dtype, w_dtype, res_dtype, padh, padw, strideh, | |||||
| stridew, | |||||
| quantize_config, scale_sqrt, scale_q_dtype, offset_q_dtype, | |||||
| scale_dq_dtype, | |||||
| scale_rq_dtype, offset_rq_dtype, offset_w_dtype, offset_pad_dtype, | |||||
| bias, kernel_name) | |||||
| # quantize switch on | |||||
| if quantize_config[0] == 1: | |||||
| quantize_turn_on = True | |||||
| # quantize -> DeQuantize dataflow | |||||
| if in_dtype == 'float16' and w_dtype == 'int8' and res_dtype == 'float16': | |||||
| is_quantize = True | |||||
| is_dequantize = True | |||||
| is_requantize = False | |||||
| # DeQuantize dataflow | |||||
| elif (in_dtype in ['int8', 'uint8'] and w_dtype == 'int8' and | |||||
| res_dtype == 'float16'): | |||||
| is_quantize = False | |||||
| is_dequantize = True | |||||
| is_requantize = False | |||||
| # quantize -> ReQuantize dataflow | |||||
| elif (in_dtype == 'float16' and w_dtype == 'int8' and res_dtype in | |||||
| ['int8', 'uint8']): | |||||
| is_quantize = True | |||||
| is_dequantize = False | |||||
| is_requantize = True | |||||
| # ReQuantize dataflow | |||||
| elif (in_dtype in ['int8', 'uint8'] and w_dtype == 'int8' and res_dtype in | |||||
| ['int8', 'uint8']): | |||||
| is_quantize = False | |||||
| is_dequantize = False | |||||
| is_requantize = True | |||||
| else: | |||||
| raise RuntimeError("Not support in/out data type for quantize.") | |||||
| # quantize switch off | |||||
| elif quantize_config[0] == 0: | |||||
| quantize_turn_on = False | |||||
| is_quantize = False | |||||
| is_dequantize = False | |||||
| is_requantize = False | |||||
| if quantize_config != [0, 0, 0]: | |||||
| raise RuntimeError("Invalid Quantize Config.") | |||||
| if scale_sqrt != [0, 0, 0]: | |||||
| raise RuntimeError("Invalid Quantize Config.") | |||||
| else: | |||||
| raise RuntimeError("Invalid Quantize Config.") | |||||
| batch_size = shape_in[0] | |||||
| in_channel = shape_in[1] | |||||
| feature_map_h = shape_in[2] | |||||
| feature_map_w = shape_in[3] | |||||
| block_size_k = CUBE_MKN[in_dtype]['mac'][1] | |||||
| fmap_shape_nc1hwc0 = (batch_size, (in_channel + block_size_k - 1) // block_size_k, | |||||
| feature_map_h, feature_map_w, block_size_k) | |||||
| out_channel = shape_w[0] | |||||
| in_channel_weight = shape_w[1] | |||||
| filter_h = shape_w[2] | |||||
| filter_w = shape_w[3] | |||||
| block_size_k = CUBE_MKN[w_dtype]['mac'][1] | |||||
| block_size_n = CUBE_MKN[w_dtype]['mac'][2] | |||||
| filter_shape_frac_z = (in_channel_weight * filter_h * filter_w // block_size_k, | |||||
| out_channel // block_size_n, block_size_n, block_size_k) | |||||
| with tvm.target.cce(): | |||||
| data = tvm.placeholder( | |||||
| fmap_shape_nc1hwc0, name='Fmap', dtype=in_dtype) | |||||
| weight = tvm.placeholder( | |||||
| filter_shape_frac_z, name='Filter', dtype=w_dtype) | |||||
| bias_tensor = None | |||||
| scale_q = None | |||||
| scale_dq = None | |||||
| scale_rq = None | |||||
| offset_pad = None | |||||
| offset_rq = None | |||||
| offset_q = None | |||||
| scale_drq = None | |||||
| # bias or fusion_bias(half offset) | |||||
| if bias or (quantize_config[1] == 1 and quantize_turn_on): | |||||
| bias_tensor = tvm.placeholder( | |||||
| (out_channel,), name='bias_tensor', \ | |||||
| dtype="int32" if quantize_turn_on else res_dtype) | |||||
| # quantize on | |||||
| if quantize_turn_on: | |||||
| quantize_algorithm = quantize_config[1] | |||||
| if is_quantize: | |||||
| scale_q = tvm.placeholder( | |||||
| (CUBE_MKN[scale_q_dtype]['mac'][1],), name='scaleQ', dtype=scale_q_dtype) | |||||
| if quantize_algorithm == 1: | |||||
| offset_q = tvm.placeholder( | |||||
| (CUBE_MKN[offset_q_dtype]['mac'][1],), name='offsetQ', dtype=offset_q_dtype) | |||||
| if is_dequantize: | |||||
| scale_dq_shape = (CUBE_MKN[scale_dq_dtype]['mac'][1],) if quantize_config[2] == 0 \ | |||||
| else (out_channel,) | |||||
| scale_dq = tvm.placeholder( | |||||
| scale_dq_shape, name='scaleDq', dtype=scale_dq_dtype) | |||||
| if is_requantize: | |||||
| scale_rq_shape = (CUBE_MKN[scale_rq_dtype]['mac'][1],) if quantize_config[2] == 0 \ | |||||
| else (out_channel,) | |||||
| scale_rq = tvm.placeholder( | |||||
| scale_rq_shape, name='scaleRq', dtype=scale_rq_dtype) | |||||
| if quantize_algorithm == 1: | |||||
| offset_rq_shape = (CUBE_MKN[offset_rq_dtype]['mac'][1],) | |||||
| offset_rq = tvm.placeholder( | |||||
| offset_rq_shape, name='offsetRq', dtype=offset_rq_dtype) | |||||
| # need offset_pad , for half offset | |||||
| if quantize_algorithm == 1: | |||||
| offset_pad = tvm.placeholder( | |||||
| (CUBE_MKN[offset_pad_dtype]['mac'][1],), name='offset_pad', | |||||
| dtype=offset_pad_dtype) | |||||
| if quantize_algorithm == 0: | |||||
| if is_quantize: | |||||
| if is_dequantize: | |||||
| scale_drq = scale_dq | |||||
| else: | |||||
| scale_drq = scale_rq | |||||
| conv_res = te.lang.cce.conv( | |||||
| data, weight, {"bias_tensor": bias_tensor, | |||||
| "scale_q": scale_q, | |||||
| "offset_q": offset_q, | |||||
| "scale_drq": scale_drq, | |||||
| "offset_pad": offset_pad, | |||||
| "offset_rq": offset_rq, | |||||
| "quantize_config": quantize_config, | |||||
| "is_quantize": is_quantize, | |||||
| "is_dequantize": is_dequantize, | |||||
| "is_requantize": is_requantize, | |||||
| "scale_sqrt": scale_sqrt, | |||||
| "pad_h": padh, "pad_w": padw, | |||||
| "stride_h": strideh, "stride_w": stridew, | |||||
| "filter_h": filter_h, "filter_w": filter_w, | |||||
| "res_dtype": res_dtype, "mad_dtype": mad_dtype}, | |||||
| dsl_flag=False) | |||||
| if bias: | |||||
| tensor_list = [data, weight, bias_tensor, scale_q, | |||||
| scale_drq, conv_res] | |||||
| else: | |||||
| tensor_list = [data, weight, scale_q, | |||||
| scale_drq, conv_res] | |||||
| else: | |||||
| if is_dequantize: | |||||
| scale_drq = scale_dq | |||||
| else: | |||||
| scale_drq = scale_rq | |||||
| conv_res = te.lang.cce.conv( | |||||
| data, weight, {"bias_tensor": bias_tensor, | |||||
| "scale_q": scale_q, | |||||
| "offset_q": offset_q, | |||||
| "scale_drq": scale_drq, | |||||
| "offset_pad": offset_pad, | |||||
| "offset_rq": offset_rq, | |||||
| "quantize_config": quantize_config, | |||||
| "is_quantize": is_quantize, | |||||
| "is_dequantize": is_dequantize, | |||||
| "is_requantize": is_requantize, | |||||
| "scale_sqrt": scale_sqrt, | |||||
| "pad_h": padh, "pad_w": padw, | |||||
| "stride_h": strideh, "stride_w": stridew, | |||||
| "filter_h": filter_h, "filter_w": filter_w, | |||||
| "res_dtype": res_dtype, "mad_dtype": mad_dtype}, | |||||
| dsl_flag=False) | |||||
| if bias: | |||||
| tensor_list = [data, weight, bias_tensor, | |||||
| scale_drq, conv_res] | |||||
| else: | |||||
| tensor_list = [data, weight, | |||||
| scale_drq, conv_res] | |||||
| # half offset | |||||
| else: | |||||
| if is_quantize: | |||||
| if is_dequantize: | |||||
| scale_drq = scale_dq | |||||
| else: | |||||
| scale_drq = scale_rq | |||||
| conv_res = te.lang.cce.conv( | |||||
| data, weight, {"bias_tensor": bias_tensor, | |||||
| "scale_q": scale_q, | |||||
| "offset_q": offset_q, | |||||
| "scale_drq": scale_drq, | |||||
| "offset_pad": offset_pad, | |||||
| "offset_rq": offset_rq, | |||||
| "quantize_config": quantize_config, | |||||
| "is_quantize": is_quantize, | |||||
| "is_dequantize": is_dequantize, | |||||
| "is_requantize": is_requantize, | |||||
| "scale_sqrt": scale_sqrt, | |||||
| "pad_h": padh, "pad_w": padw, | |||||
| "stride_h": strideh, "stride_w": stridew, | |||||
| "filter_h": filter_h, "filter_w": filter_w, | |||||
| "res_dtype": res_dtype, "mad_dtype": mad_dtype}, | |||||
| dsl_flag=False) | |||||
| if is_dequantize: | |||||
| tensor_list = [data, weight, bias_tensor, scale_q, offset_q, | |||||
| scale_drq, offset_pad, conv_res] | |||||
| else: | |||||
| tensor_list = [data, weight, bias_tensor, scale_q, offset_q, | |||||
| scale_drq, offset_rq, offset_pad, conv_res] | |||||
| else: | |||||
| if is_dequantize: | |||||
| scale_drq = scale_dq | |||||
| else: | |||||
| scale_drq = scale_rq | |||||
| conv_res = te.lang.cce.conv( | |||||
| data, weight, {"bias_tensor": bias_tensor, | |||||
| "scale_q": scale_q, | |||||
| "offset_q": offset_q, | |||||
| "scale_drq": scale_drq, | |||||
| "offset_pad": offset_pad, | |||||
| "offset_rq": offset_rq, | |||||
| "quantize_config": quantize_config, | |||||
| "is_quantize": is_quantize, | |||||
| "is_dequantize": is_dequantize, | |||||
| "is_requantize": is_requantize, | |||||
| "scale_sqrt": scale_sqrt, | |||||
| "pad_h": padh, "pad_w": padw, | |||||
| "stride_h": strideh, "stride_w": stridew, | |||||
| "filter_h": filter_h, "filter_w": filter_w, | |||||
| "res_dtype": res_dtype, "mad_dtype": mad_dtype}, | |||||
| dsl_flag=False) | |||||
| if is_dequantize: | |||||
| tensor_list = [data, weight, bias_tensor, | |||||
| scale_drq, offset_pad, conv_res] | |||||
| else: | |||||
| tensor_list = [data, weight, bias_tensor, | |||||
| scale_drq, offset_rq, offset_pad, conv_res] | |||||
| else: | |||||
| conv_res = te.lang.cce.conv( | |||||
| data, weight, {"bias_tensor": bias_tensor, | |||||
| "scale_q": scale_q, | |||||
| "offset_q": offset_q, | |||||
| "scale_drq": scale_drq, | |||||
| "offset_pad": offset_pad, | |||||
| "offset_rq": offset_rq, | |||||
| "quantize_config": quantize_config, | |||||
| "is_quantize": is_quantize, | |||||
| "is_dequantize": is_dequantize, | |||||
| "is_requantize": is_requantize, | |||||
| "scale_sqrt": scale_sqrt, | |||||
| "pad_h": padh, "pad_w": padw, | |||||
| "stride_h": strideh, "stride_w": stridew, | |||||
| "filter_h": filter_h, "filter_w": filter_w, | |||||
| "res_dtype": res_dtype, "mad_dtype": mad_dtype}, | |||||
| dsl_flag=False) | |||||
| if bias: | |||||
| tensor_list = [data, weight, bias_tensor, conv_res] | |||||
| else: | |||||
| tensor_list = [data, weight, conv_res] | |||||
| sch = generic.auto_schedule(conv_res) | |||||
| config = { | |||||
| "print_ir": need_print, | |||||
| "need_build": need_build, | |||||
| "name": kernel_name, | |||||
| "tensor_list": tensor_list | |||||
| } | |||||
| te.lang.cce.cce_build_code(sch, config) | |||||
| @@ -20,12 +20,12 @@ from mindspore.ops import operations as P | |||||
| # y = x^2 | # y = x^2 | ||||
| class CusSquare(PrimitiveWithInfer): | class CusSquare(PrimitiveWithInfer): | ||||
| """CusSquare definition""" | """CusSquare definition""" | ||||
| from square_impl import CusSquareImpl | |||||
| @prim_attr_register | @prim_attr_register | ||||
| def __init__(self): | def __init__(self): | ||||
| """init CusSquare""" | """init CusSquare""" | ||||
| self.init_prim_io_names(inputs=['x'], outputs=['y']) | self.init_prim_io_names(inputs=['x'], outputs=['y']) | ||||
| from square_impl import CusSquareImpl | |||||
| def vm_impl(self, x): | def vm_impl(self, x): | ||||
| x = x.asnumpy() | x = x.asnumpy() | ||||
| @@ -119,7 +119,7 @@ def test_4d_transpose_ab(): | |||||
| [[5612, 5810, 6008, 6206]]]] | [[5612, 5810, 6008, 6206]]]] | ||||
| assert (output.asnumpy() == expect).all() | assert (output.asnumpy() == expect).all() | ||||
| def test_4d_fp16(): | |||||
| def test_4D_fp16(): | |||||
| input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16) | input_x = Tensor(np.arange(2 * 4 * 1 * 3).reshape(2, 4, 1, 3), mstype.float16) | ||||
| input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16) | input_y = Tensor(np.arange(2 * 4 * 3 * 4).reshape(2, 4, 3, 4), mstype.float16) | ||||
| @@ -68,10 +68,11 @@ def test_batchnrom_fold2(): | |||||
| current_step = np.array([0]).astype('int32') | current_step = np.array([0]).astype('int32') | ||||
| output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), | output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), | ||||
| Tensor(running_std), Tensor(running_mean), Tensor(current_step)) | Tensor(running_std), Tensor(running_mean), Tensor(current_step)) | ||||
| expect = ((x + beta.reshape(-1, 1, 1) - | |||||
| (gamma * running_mean / running_std).reshape(-1, 1, 1) if current_step >= freeze_bn else | |||||
| x * (running_std / batch_std).reshape(-1, 1, 1) + | |||||
| (beta - gamma * batch_mean / batch_std).reshape(-1, 1, 1))) | |||||
| expect = (x + beta.reshape(-1, 1, | |||||
| 1) - (gamma * running_mean / running_std).reshape(-1, 1, | |||||
| 1) if current_step >= freeze_bn else | |||||
| x * (running_std / batch_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1, | |||||
| 1)) | |||||
| error = np.ones(shape=expect.shape) * 1.0e-6 | error = np.ones(shape=expect.shape) * 1.0e-6 | ||||
| diff = output.asnumpy() - expect | diff = output.asnumpy() - expect | ||||
| assert np.all(diff < error) | assert np.all(diff < error) | ||||
| @@ -80,9 +81,11 @@ def test_batchnrom_fold2(): | |||||
| current_step = np.array([100000]).astype('int32') | current_step = np.array([100000]).astype('int32') | ||||
| output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), Tensor(running_std), | output = net(Tensor(x), Tensor(beta), Tensor(gamma), Tensor(batch_std), Tensor(batch_mean), Tensor(running_std), | ||||
| Tensor(running_mean), Tensor(current_step)) | Tensor(running_mean), Tensor(current_step)) | ||||
| expect = ((x + beta.reshape(-1, 1, 1) - (gamma * running_mean / running_std).reshape(-1, 1, 1) | |||||
| if current_step >= freeze_bn else x * (batch_std / running_std).reshape(-1, 1, 1) + | |||||
| (beta - gamma * batch_mean / batch_std).reshape(-1, 1, 1))) | |||||
| expect = (x + beta.reshape(-1, 1, | |||||
| 1) - (gamma * running_mean / running_std).reshape(-1, 1, | |||||
| 1) if current_step >= freeze_bn else | |||||
| x * (batch_std / running_std).reshape(-1, 1, 1) + (beta - gamma * batch_mean / batch_std).reshape(-1, 1, | |||||
| 1)) | |||||
| error = np.ones(shape=expect.shape) * 1.0e-6 | error = np.ones(shape=expect.shape) * 1.0e-6 | ||||
| diff = output.asnumpy() - expect | diff = output.asnumpy() - expect | ||||
| assert np.all(diff < error) | assert np.all(diff < error) | ||||
| @@ -38,8 +38,8 @@ class Net(nn.Cell): | |||||
| def np_result(d_batch_mean, d_batch_std, x, batch_mean, batch_std): | def np_result(d_batch_mean, d_batch_std, x, batch_mean, batch_std): | ||||
| n = x.shape[0] * x.shape[2] * x.shape[3] | n = x.shape[0] * x.shape[2] * x.shape[3] | ||||
| dx = (d_batch_mean.reshape(1, -1, 1, 1) / n + d_batch_std.reshape(1, -1, 1, 1) * | |||||
| (x - batch_mean.reshape(1, -1, 1, 1)) / batch_std.reshape(1, -1, 1, 1) / n) | |||||
| dx = d_batch_mean.reshape(1, -1, 1, 1) / n + d_batch_std.reshape(1, -1, 1, 1) * ( | |||||
| x - batch_mean.reshape(1, -1, 1, 1)) / batch_std.reshape(1, -1, 1, 1) / n | |||||
| return dx | return dx | ||||
| @@ -172,8 +172,8 @@ class Grad(nn.Cell): | |||||
| self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) | self.grad = GradOperation(name="get_all", get_all=True, sens_param=True) | ||||
| self.network = network | self.network = network | ||||
| def construct(self, inputs, bias, dy): | |||||
| return self.grad(self.network)(inputs, bias, dy) | |||||
| def construct(self, input_, bias, dy): | |||||
| return self.grad(self.network)(input_, bias, dy) | |||||
| @pytest.mark.level0 | @pytest.mark.level0 | ||||
| @@ -783,10 +783,6 @@ def test_grad(): | |||||
| bidirectional = True | bidirectional = True | ||||
| dropout = 0.0 | dropout = 0.0 | ||||
| num_directions = 1 | |||||
| if bidirectional: | |||||
| num_directions = 2 | |||||
| net = Grad(Net(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)) | net = Grad(Net(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout)) | ||||
| dy = np.array([[[-3.5471e-01, 7.0540e-01, -7.5945e-01, -1.2322e+00], | dy = np.array([[[-3.5471e-01, 7.0540e-01, -7.5945e-01, -1.2322e+00], | ||||
| @@ -804,7 +800,7 @@ def test_grad(): | |||||
| [[-1.6032e+00, -1.8818e-01, 7.0441e-01, -2.8765e+00], | [[-1.6032e+00, -1.8818e-01, 7.0441e-01, -2.8765e+00], | ||||
| [1.0065e-01, 9.2045e-01, 2.7426e-01, 2.6196e-01]]]).astype(np.float32) | [1.0065e-01, 9.2045e-01, 2.7426e-01, 2.6196e-01]]]).astype(np.float32) | ||||
| dx, dh, dc, dw = net(Tensor(dy)) | |||||
| dx, dh, dc, _ = net(Tensor(dy)) | |||||
| expect_dx = np.array([[[0.01697153, -0.0096909, 0.01306139, 0.00863109, -0.00122794, -0.00746152, -0.00879683, | expect_dx = np.array([[[0.01697153, -0.0096909, 0.01306139, 0.00863109, -0.00122794, -0.00746152, -0.00879683, | ||||
| 0.00643571, 0.0015958, 0.01480642], | 0.00643571, 0.0015958, 0.01480642], | ||||
| [0.05794962, -0.02326604, 0.01862703, 0.02053947, 0.02607713, -0.01278067, 0.04250786, | [0.05794962, -0.02326604, 0.01862703, 0.02053947, 0.02607713, -0.01278067, 0.04250786, | ||||
| @@ -964,12 +960,8 @@ def test_lstm_dropout(): | |||||
| bidirectional = False | bidirectional = False | ||||
| dropout = 1.0 | dropout = 1.0 | ||||
| num_directions = 1 | |||||
| if bidirectional: | |||||
| num_directions = 2 | |||||
| net = LstmNetWithDropout(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout) | net = LstmNetWithDropout(seq_len, batch_size, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout) | ||||
| y, h, c, _, _ = net() | |||||
| y, _, _, _, _ = net() | |||||
| expect_y = np.array([[[-0.45210335, -0.0844336], | expect_y = np.array([[[-0.45210335, -0.0844336], | ||||
| [-0.14677924, 0.07140275]], | [-0.14677924, 0.07140275]], | ||||
| @@ -178,7 +178,8 @@ def test_broadcast(): | |||||
| expect_dx2 = np.array([[[[0., 4.250458, 2.5030296, 3.623167, 6.4171505, 7.2115746]], | expect_dx2 = np.array([[[[0., 4.250458, 2.5030296, 3.623167, 6.4171505, 7.2115746]], | ||||
| [[0., 4.367449, 2.803152, 2.5352, 0., 0.]], | [[0., 4.367449, 2.803152, 2.5352, 0., 0.]], | ||||
| [[0.7087075, 0., 2.040332, 2.1372325, 0., 2.9222295]], | [[0.7087075, 0., 2.040332, 2.1372325, 0., 2.9222295]], | ||||
| [[1.0278877, 5.247942, 2.6855955, 5.494814, 3.5657988, 0.66265094]]]]).astype(np.float32) | |||||
| [[1.0278877, 5.247942, 2.6855955, 5.494814, 3.5657988, | |||||
| 0.66265094]]]]).astype(np.float32) | |||||
| net = Grad(MinimumNet()) | net = Grad(MinimumNet()) | ||||
| output_ms = net(Tensor(x1_np), Tensor(x2_np), Tensor(dy_np)) | output_ms = net(Tensor(x1_np), Tensor(x2_np), Tensor(dy_np)) | ||||
| @@ -182,88 +182,88 @@ def test_ReduceSum(): | |||||
| diff0 = abs(output[0].asnumpy() - expect0) | diff0 = abs(output[0].asnumpy() - expect0) | ||||
| error0 = np.ones(shape=expect0.shape) * 1.0e-5 | error0 = np.ones(shape=expect0.shape) * 1.0e-5 | ||||
| assert np.all(diff0 < error0) | assert np.all(diff0 < error0) | ||||
| assert (output[0].shape() == expect0.shape) | |||||
| assert output[0].shape() == expect0.shape | |||||
| expect1 = np.sum(x1, axis=axis1, keepdims=keep_dims1) | expect1 = np.sum(x1, axis=axis1, keepdims=keep_dims1) | ||||
| diff1 = abs(output[1].asnumpy() - expect1) | diff1 = abs(output[1].asnumpy() - expect1) | ||||
| error1 = np.ones(shape=expect1.shape) * 1.0e-5 | error1 = np.ones(shape=expect1.shape) * 1.0e-5 | ||||
| assert np.all(diff1 < error1) | assert np.all(diff1 < error1) | ||||
| assert (output[1].shape() == expect1.shape) | |||||
| assert output[1].shape() == expect1.shape | |||||
| expect2 = np.sum(x2, axis=axis2, keepdims=keep_dims2) | expect2 = np.sum(x2, axis=axis2, keepdims=keep_dims2) | ||||
| diff2 = abs(output[2].asnumpy() - expect2) | diff2 = abs(output[2].asnumpy() - expect2) | ||||
| error2 = np.ones(shape=expect2.shape) * 1.0e-5 | error2 = np.ones(shape=expect2.shape) * 1.0e-5 | ||||
| assert np.all(diff2 < error2) | assert np.all(diff2 < error2) | ||||
| assert (output[2].shape() == expect2.shape) | |||||
| assert output[2].shape() == expect2.shape | |||||
| expect3 = np.sum(x3, axis=axis3, keepdims=keep_dims3) | expect3 = np.sum(x3, axis=axis3, keepdims=keep_dims3) | ||||
| diff3 = abs(output[3].asnumpy() - expect3) | diff3 = abs(output[3].asnumpy() - expect3) | ||||
| error3 = np.ones(shape=expect3.shape) * 1.0e-5 | error3 = np.ones(shape=expect3.shape) * 1.0e-5 | ||||
| assert np.all(diff3 < error3) | assert np.all(diff3 < error3) | ||||
| assert (output[3].shape() == expect3.shape) | |||||
| assert output[3].shape() == expect3.shape | |||||
| expect4 = np.sum(x4, axis=np_axis4, keepdims=keep_dims4) | expect4 = np.sum(x4, axis=np_axis4, keepdims=keep_dims4) | ||||
| diff4 = abs(output[4].asnumpy() - expect4) | diff4 = abs(output[4].asnumpy() - expect4) | ||||
| error4 = np.ones(shape=expect4.shape) * 1.0e-5 | error4 = np.ones(shape=expect4.shape) * 1.0e-5 | ||||
| assert np.all(diff4 < error4) | assert np.all(diff4 < error4) | ||||
| assert (output[4].shape() == expect4.shape) | |||||
| assert output[4].shape() == expect4.shape | |||||
| expect5 = np.sum(x5, axis=np_axis5, keepdims=keep_dims5) | expect5 = np.sum(x5, axis=np_axis5, keepdims=keep_dims5) | ||||
| diff5 = abs(output[5].asnumpy() - expect5) | diff5 = abs(output[5].asnumpy() - expect5) | ||||
| error5 = np.ones(shape=expect5.shape) * 1.0e-5 | error5 = np.ones(shape=expect5.shape) * 1.0e-5 | ||||
| assert np.all(diff5 < error5) | assert np.all(diff5 < error5) | ||||
| assert (output[5].shape() == expect5.shape) | |||||
| assert output[5].shape() == expect5.shape | |||||
| expect6 = np.sum(x6, axis=axis6, keepdims=keep_dims6) | expect6 = np.sum(x6, axis=axis6, keepdims=keep_dims6) | ||||
| diff6 = abs(output[6].asnumpy() - expect6) | diff6 = abs(output[6].asnumpy() - expect6) | ||||
| error6 = np.ones(shape=expect6.shape) * 1.0e-5 | error6 = np.ones(shape=expect6.shape) * 1.0e-5 | ||||
| assert np.all(diff6 < error6) | assert np.all(diff6 < error6) | ||||
| assert (output[6].shape() == expect6.shape) | |||||
| assert output[6].shape() == expect6.shape | |||||
| expect7 = np.sum(x7, axis=axis7, keepdims=keep_dims7) | expect7 = np.sum(x7, axis=axis7, keepdims=keep_dims7) | ||||
| diff7 = abs(output[7].asnumpy() - expect7) | diff7 = abs(output[7].asnumpy() - expect7) | ||||
| error7 = np.ones(shape=expect7.shape) * 1.0e-5 | error7 = np.ones(shape=expect7.shape) * 1.0e-5 | ||||
| assert np.all(diff7 < error7) | assert np.all(diff7 < error7) | ||||
| assert (output[7].shape() == expect7.shape) | |||||
| assert output[7].shape() == expect7.shape | |||||
| expect8 = np.sum(x8, axis=axis8, keepdims=keep_dims8) | expect8 = np.sum(x8, axis=axis8, keepdims=keep_dims8) | ||||
| diff8 = abs(output[8].asnumpy() - expect8) | diff8 = abs(output[8].asnumpy() - expect8) | ||||
| error8 = np.ones(shape=expect8.shape) * 1.0e-5 | error8 = np.ones(shape=expect8.shape) * 1.0e-5 | ||||
| assert np.all(diff8 < error8) | assert np.all(diff8 < error8) | ||||
| assert (output[8].shape() == expect8.shape) | |||||
| assert output[8].shape() == expect8.shape | |||||
| expect9 = np.sum(x9, axis=axis9, keepdims=keep_dims9) | expect9 = np.sum(x9, axis=axis9, keepdims=keep_dims9) | ||||
| diff9 = abs(output[9].asnumpy() - expect9) | diff9 = abs(output[9].asnumpy() - expect9) | ||||
| error9 = np.ones(shape=expect9.shape) * 1.0e-5 | error9 = np.ones(shape=expect9.shape) * 1.0e-5 | ||||
| assert np.all(diff9 < error9) | assert np.all(diff9 < error9) | ||||
| assert (output[9].shape() == expect9.shape) | |||||
| assert output[9].shape() == expect9.shape | |||||
| expect10 = np.sum(x10, axis=axis10, keepdims=keep_dims10) | expect10 = np.sum(x10, axis=axis10, keepdims=keep_dims10) | ||||
| diff10 = abs(output[10].asnumpy() - expect10) | diff10 = abs(output[10].asnumpy() - expect10) | ||||
| error10 = np.ones(shape=expect10.shape) * 1.0e-5 | error10 = np.ones(shape=expect10.shape) * 1.0e-5 | ||||
| assert np.all(diff10 < error10) | assert np.all(diff10 < error10) | ||||
| assert (output[10].shape() == expect10.shape) | |||||
| assert output[10].shape() == expect10.shape | |||||
| expect11 = np.sum(x11, axis=axis11, keepdims=keep_dims11) | expect11 = np.sum(x11, axis=axis11, keepdims=keep_dims11) | ||||
| diff11 = abs(output[11].asnumpy() - expect11) | diff11 = abs(output[11].asnumpy() - expect11) | ||||
| error11 = np.ones(shape=expect11.shape) * 1.0e-5 | error11 = np.ones(shape=expect11.shape) * 1.0e-5 | ||||
| assert np.all(diff11 < error11) | assert np.all(diff11 < error11) | ||||
| assert (output[11].shape() == expect11.shape) | |||||
| assert output[11].shape() == expect11.shape | |||||
| expect12 = np.sum(x12, axis=axis12, keepdims=keep_dims12) | expect12 = np.sum(x12, axis=axis12, keepdims=keep_dims12) | ||||
| diff12 = abs(output[12].asnumpy() - expect12) | diff12 = abs(output[12].asnumpy() - expect12) | ||||
| error12 = np.ones(shape=expect12.shape) * 1.0e-5 | error12 = np.ones(shape=expect12.shape) * 1.0e-5 | ||||
| assert np.all(diff12 < error12) | assert np.all(diff12 < error12) | ||||
| assert (output[12].shape() == expect12.shape) | |||||
| assert output[12].shape() == expect12.shape | |||||
| expect13 = np.sum(x13, axis=axis13, keepdims=keep_dims13) | expect13 = np.sum(x13, axis=axis13, keepdims=keep_dims13) | ||||
| diff13 = abs(output[13].asnumpy() - expect13) | diff13 = abs(output[13].asnumpy() - expect13) | ||||
| error13 = np.ones(shape=expect13.shape) * 1.0e-5 | error13 = np.ones(shape=expect13.shape) * 1.0e-5 | ||||
| assert np.all(diff13 < error13) | assert np.all(diff13 < error13) | ||||
| assert (output[13].shape() == expect13.shape) | |||||
| assert output[13].shape() == expect13.shape | |||||
| expect14 = np.sum(x14, axis=np_axis14, keepdims=keep_dims14) | expect14 = np.sum(x14, axis=np_axis14, keepdims=keep_dims14) | ||||
| diff14 = abs(output[14].asnumpy() - expect14) | diff14 = abs(output[14].asnumpy() - expect14) | ||||
| error14 = np.ones(shape=expect14.shape) * 1.0e-5 | error14 = np.ones(shape=expect14.shape) * 1.0e-5 | ||||
| assert np.all(diff14 < error14) | assert np.all(diff14 < error14) | ||||
| assert (output[14].shape() == expect14.shape) | |||||
| assert output[14].shape() == expect14.shape | |||||
| @@ -19,7 +19,6 @@ import pytest | |||||
| import mindspore.context as context | import mindspore.context as context | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | |||||
| from mindspore.ops.operations import _grad_ops as G | from mindspore.ops.operations import _grad_ops as G | ||||
| @@ -42,7 +41,7 @@ def test_relu_grad(): | |||||
| dy = Tensor(np.array([[[[1, 0, 1], | dy = Tensor(np.array([[[[1, 0, 1], | ||||
| [0, 1, 0], | [0, 1, 0], | ||||
| [1, 1, 1]]]]).astype(np.float32)) | [1, 1, 1]]]]).astype(np.float32)) | ||||
| expect = np.array([[[[0, 0, 1, ], [0, 0, 0, ], [1, 1, 0.]]]]).astype(np.float32) | |||||
| expect = np.array([[[[0, 0, 1,], [0, 0, 0,], [1, 1, 0.]]]]).astype(np.float32) | |||||
| error = np.ones(shape=[3, 3]) * 1.0e-6 | error = np.ones(shape=[3, 3]) * 1.0e-6 | ||||
| context.set_context(mode=context.GRAPH_MODE, device_target="GPU") | context.set_context(mode=context.GRAPH_MODE, device_target="GPU") | ||||
| @@ -20,6 +20,7 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| class NetSoftmaxCrossEntropyWithLogits(nn.Cell): | class NetSoftmaxCrossEntropyWithLogits(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(NetSoftmaxCrossEntropyWithLogits, self).__init__() | super(NetSoftmaxCrossEntropyWithLogits, self).__init__() | ||||
| @@ -20,6 +20,7 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| class NetSparseSoftmaxCrossEntropyWithLogits(nn.Cell): | class NetSparseSoftmaxCrossEntropyWithLogits(nn.Cell): | ||||
| def __init__(self): | def __init__(self): | ||||
| super(NetSparseSoftmaxCrossEntropyWithLogits, self).__init__() | super(NetSparseSoftmaxCrossEntropyWithLogits, self).__init__() | ||||
| @@ -36,8 +36,7 @@ class NetRMSProp(nn.Cell): | |||||
| def construct(self, var, g, mg, rms, mom, lr, decay, momentum, epsilon): | def construct(self, var, g, mg, rms, mom, lr, decay, momentum, epsilon): | ||||
| if self.use_centered: | if self.use_centered: | ||||
| return self.rms_opt(var, mg, rms, mom, g, lr, decay, momentum, epsilon) | return self.rms_opt(var, mg, rms, mom, g, lr, decay, momentum, epsilon) | ||||
| else: | |||||
| return self.rms_opt(var, rms, mom, lr, g, decay, momentum, epsilon) | |||||
| return self.rms_opt(var, rms, mom, lr, g, decay, momentum, epsilon) | |||||
| def rmsprop_numpy(variable, gradients, mean_square, moment, | def rmsprop_numpy(variable, gradients, mean_square, moment, | ||||
| @@ -26,7 +26,8 @@ from mindspore.common import dtype as mstype | |||||
| from mindspore.nn import Cell | from mindspore.nn import Cell | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops import prim_attr_register | from mindspore.ops import prim_attr_register | ||||
| from mindspore.ops.primitive import Primitive, PrimitiveWithInfer | |||||
| from mindspore.ops.primitive import PrimitiveWithInfer | |||||
| import mindspore.context as context | |||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| from ....mindspore_test_framework.mindspore_test import mindspore_test | from ....mindspore_test_framework.mindspore_test import mindspore_test | ||||
| from ....mindspore_test_framework.pipeline.forward.compile_forward \ | from ....mindspore_test_framework.pipeline.forward.compile_forward \ | ||||
| @@ -305,8 +306,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) | |||||
| # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | ||||
| import mindspore.context as context | |||||
| @non_graph_engine | @non_graph_engine | ||||
| @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | ||||
| @@ -13,26 +13,15 @@ | |||||
| # limitations under the License. | # limitations under the License. | ||||
| # ============================================================================ | # ============================================================================ | ||||
| """ test ops """ | """ test ops """ | ||||
| import functools | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| import mindspore.ops.composite as C | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore import ops | |||||
| from mindspore.common import dtype as mstype | from mindspore.common import dtype as mstype | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore.common.parameter import Parameter | |||||
| from mindspore.ops import functional as F | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops.operations import _grad_ops as G | |||||
| from ..ut_filter import non_graph_engine | |||||
| from ....mindspore_test_framework.mindspore_test import mindspore_test | from ....mindspore_test_framework.mindspore_test import mindspore_test | ||||
| from ....mindspore_test_framework.pipeline.forward.compile_forward \ | from ....mindspore_test_framework.pipeline.forward.compile_forward \ | ||||
| import (pipeline_for_compile_forward_ge_graph_for_case_by_case_config, | |||||
| pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception) | |||||
| from ....mindspore_test_framework.pipeline.gradient.compile_gradient \ | |||||
| import pipeline_for_compile_grad_ge_graph_for_case_by_case_config | |||||
| import pipeline_for_compile_forward_ge_graph_for_case_by_case_config_exception | |||||
| class ExpandDimsNet(nn.Cell): | class ExpandDimsNet(nn.Cell): | ||||
| @@ -17,6 +17,7 @@ import functools | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| import mindspore.context as context | |||||
| from mindspore import Tensor, Parameter | from mindspore import Tensor, Parameter | ||||
| from mindspore.common.parameter import ParameterTuple | from mindspore.common.parameter import ParameterTuple | ||||
| from mindspore.ops import composite as C | from mindspore.ops import composite as C | ||||
| @@ -89,8 +90,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) | |||||
| # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | ||||
| import mindspore.context as context | |||||
| @non_graph_engine | @non_graph_engine | ||||
| @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | ||||
| @@ -42,10 +42,10 @@ def cond_data_test(x_init, y_init): | |||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| cond = self.less(x, y) | cond = self.less(x, y) | ||||
| st1, sf1 = self.switch(x, cond) | |||||
| st2, sf2 = self.switch(y, cond) | |||||
| st1, _ = self.switch(x, cond) | |||||
| st2, _ = self.switch(y, cond) | |||||
| add_ret = self.add(st1, st2) | add_ret = self.add(st1, st2) | ||||
| st3, sf3 = self.switch(self.value, cond) | |||||
| _, sf3 = self.switch(self.value, cond) | |||||
| sq_ret = self.square(sf3) | sq_ret = self.square(sf3) | ||||
| ret = self.merge((add_ret, sq_ret)) | ret = self.merge((add_ret, sq_ret)) | ||||
| return ret[0] | return ret[0] | ||||
| @@ -125,7 +125,7 @@ def test_if_str_is_not_none_right(): | |||||
| self.z = z | self.z = z | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| if self.z == None: | |||||
| if self.z is None: | |||||
| ret = x | ret = x | ||||
| else: | else: | ||||
| ret = y | ret = y | ||||
| @@ -146,7 +146,7 @@ def test_if_str_is_not_none_left(): | |||||
| self.z = z | self.z = z | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| if None == self.z: | |||||
| if self.z is None: | |||||
| ret = x | ret = x | ||||
| else: | else: | ||||
| ret = y | ret = y | ||||
| @@ -167,7 +167,7 @@ def test_if_none_equal_none(): | |||||
| self.z = z | self.z = z | ||||
| def construct(self, x, y): | def construct(self, x, y): | ||||
| if self.z == None: | |||||
| if self.z is None: | |||||
| ret = x | ret = x | ||||
| else: | else: | ||||
| ret = y | ret = y | ||||
| @@ -16,6 +16,7 @@ import functools | |||||
| import numpy as np | import numpy as np | ||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| import mindspore.context as context | |||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -132,7 +133,7 @@ def test_list_append_2(): | |||||
| class ListOperate(nn.Cell): | class ListOperate(nn.Cell): | ||||
| def __init__(self, ): | |||||
| def __init__(self,): | |||||
| super(ListOperate, self).__init__() | super(ListOperate, self).__init__() | ||||
| def construct(self, t, l): | def construct(self, t, l): | ||||
| @@ -211,9 +212,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) | |||||
| # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | ||||
| import mindspore.context as context | |||||
| @non_graph_engine | @non_graph_engine | ||||
| @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | ||||
| def test_exec(): | def test_exec(): | ||||
| @@ -22,9 +22,7 @@ import mindspore.context as context | |||||
| import mindspore.nn as nn | import mindspore.nn as nn | ||||
| from mindspore import Tensor | from mindspore import Tensor | ||||
| from mindspore.common import dtype as mstype | from mindspore.common import dtype as mstype | ||||
| from mindspore.common.api import _executor | |||||
| from mindspore.ops import composite as C | from mindspore.ops import composite as C | ||||
| from mindspore.ops import functional as F | |||||
| from mindspore.ops import operations as P | from mindspore.ops import operations as P | ||||
| from mindspore.ops import prim_attr_register, PrimitiveWithInfer | from mindspore.ops import prim_attr_register, PrimitiveWithInfer | ||||
| from ..ut_filter import non_graph_engine | from ..ut_filter import non_graph_engine | ||||
| @@ -306,8 +304,8 @@ class NetWithLossCumSum(nn.Cell): | |||||
| self.loss = VirtualLoss() | self.loss = VirtualLoss() | ||||
| self.network = network | self.network = network | ||||
| def construct(self, input): | |||||
| predict = self.network(input) | |||||
| def construct(self, input_): | |||||
| predict = self.network(input_) | |||||
| return self.loss(predict) | return self.loss(predict) | ||||
| @@ -318,8 +316,8 @@ class GradWrapCumSum(nn.Cell): | |||||
| super(GradWrapCumSum, self).__init__() | super(GradWrapCumSum, self).__init__() | ||||
| self.network = network | self.network = network | ||||
| def construct(self, input): | |||||
| return C.grad(self.network)(input) | |||||
| def construct(self, input_): | |||||
| return C.grad(self.network)(input_) | |||||
| class NetCumSum(nn.Cell): | class NetCumSum(nn.Cell): | ||||
| @@ -330,8 +328,8 @@ class NetCumSum(nn.Cell): | |||||
| self.cumsum = P.CumSum() | self.cumsum = P.CumSum() | ||||
| self.axis = 1 | self.axis = 1 | ||||
| def construct(self, input): | |||||
| return self.cumsum(input, self.axis) | |||||
| def construct(self, input_): | |||||
| return self.cumsum(input_, self.axis) | |||||
| class SignNet(nn.Cell): | class SignNet(nn.Cell): | ||||
| @@ -444,9 +442,6 @@ test_exec_case = functools.reduce(lambda x, y: x + y, test_case_lists) | |||||
| # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | # pytest tests/python/ops/test_ops.py::test_backward -k LayerNorm | ||||
| import mindspore.context as context | |||||
| @non_graph_engine | @non_graph_engine | ||||
| @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | @mindspore_test(pipeline_for_compile_forward_ge_graph_for_case_by_case_config) | ||||
| def test_exec(): | def test_exec(): | ||||