|
|
|
@@ -20,15 +20,13 @@ import mindspore.context as context |
|
|
|
import mindspore.nn as nn |
|
|
|
from mindspore import Tensor |
|
|
|
|
|
|
|
|
|
|
|
class NetSparseSoftmaxCrossEntropyWithLogits(nn.Cell): |
|
|
|
def __init__(self): |
|
|
|
super(NetSparseSoftmaxCrossEntropyWithLogits, self).__init__() |
|
|
|
self.loss = nn.SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True) |
|
|
|
self.dlogits = nn.SoftmaxCrossEntropyWithLogits(is_grad=True, sparse=True) |
|
|
|
self.loss = self.loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True) |
|
|
|
|
|
|
|
def construct(self, logits, labels): |
|
|
|
return (self.loss(logits, labels), self.dlogits(logits, labels)) |
|
|
|
return self.loss(logits, labels) |
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.level0 |
|
|
|
@@ -39,29 +37,18 @@ def test_sparse_softmax_cross_entropy_with_logits(): |
|
|
|
[1, 10, 1], |
|
|
|
[10, 1, 1]]).astype(np.float32)) |
|
|
|
labels = Tensor(np.array([2, 1, 0]).astype(np.int32)) |
|
|
|
expect_loss = 0.0002467 |
|
|
|
expect_dlogits = np.array([[4.1126452e-05, 4.1126452e-05, -8.2234539e-05], |
|
|
|
[4.1126452e-05, -8.2234539e-05, 4.1126452e-05], |
|
|
|
[-8.2234539e-05, 4.1126452e-05, 4.1126452e-05]]).astype(np.float32) |
|
|
|
expect_loss = [0.00024673, 0.00024673, 0.00024673] |
|
|
|
|
|
|
|
context.set_context(mode=context.GRAPH_MODE, device_target='GPU') |
|
|
|
sparse_softmax_cross_entropy_with_logits = NetSparseSoftmaxCrossEntropyWithLogits() |
|
|
|
output = sparse_softmax_cross_entropy_with_logits(logits, labels) |
|
|
|
error0 = 1.0e-6 |
|
|
|
diff0 = output[0].asnumpy() - expect_loss |
|
|
|
diff0 = output.asnumpy() - expect_loss |
|
|
|
assert np.all(abs(diff0) < error0) |
|
|
|
|
|
|
|
error1 = np.ones(shape=[3, 3]) * 1.0e-6 |
|
|
|
diff1 = output[1].asnumpy() - expect_dlogits |
|
|
|
assert np.all(abs(diff1) < error1) |
|
|
|
|
|
|
|
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU') |
|
|
|
sparse_softmax_cross_entropy_with_logits = NetSparseSoftmaxCrossEntropyWithLogits() |
|
|
|
output = sparse_softmax_cross_entropy_with_logits(logits, labels) |
|
|
|
error0 = 1.0e-6 |
|
|
|
diff0 = output[0].asnumpy() - expect_loss |
|
|
|
diff0 = output.asnumpy() - expect_loss |
|
|
|
assert np.all(abs(diff0) < error0) |
|
|
|
|
|
|
|
error1 = np.ones(shape=[3, 3]) * 1.0e-6 |
|
|
|
diff1 = output[1].asnumpy() - expect_dlogits |
|
|
|
assert np.all(abs(diff1) < error1) |