Merge pull request !840 from jinyaohui/conv2dtransposetags/v0.3.0-alpha
| @@ -37,7 +37,8 @@ class _Conv(Cell): | |||
| group, | |||
| has_bias, | |||
| weight_init, | |||
| bias_init): | |||
| bias_init, | |||
| transposed=False): | |||
| super(_Conv, self).__init__() | |||
| self.in_channels = check_int_positive(in_channels) | |||
| self.out_channels = check_int_positive(out_channels) | |||
| @@ -65,9 +66,11 @@ class _Conv(Cell): | |||
| if out_channels % group != 0: | |||
| raise ValueError("Attr 'out_channels' of 'Conv2D' Op must be divisible by " | |||
| "attr 'group' of 'Conv2D' Op.") | |||
| self.weight = Parameter(initializer(weight_init, [out_channels, in_channels // group, *kernel_size]), | |||
| name='weight') | |||
| if transposed: | |||
| shape = [in_channels, out_channels // group, *kernel_size] | |||
| else: | |||
| shape = [out_channels, in_channels // group, *kernel_size] | |||
| self.weight = Parameter(initializer(weight_init, shape), name='weight') | |||
| if check_bool(has_bias): | |||
| self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias') | |||
| @@ -312,8 +315,8 @@ class Conv2dTranspose(_Conv): | |||
| # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel, | |||
| # then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel. | |||
| super(Conv2dTranspose, self).__init__( | |||
| out_channels, | |||
| in_channels, | |||
| out_channels, | |||
| kernel_size, | |||
| stride, | |||
| pad_mode, | |||
| @@ -322,10 +325,11 @@ class Conv2dTranspose(_Conv): | |||
| group, | |||
| has_bias, | |||
| weight_init, | |||
| bias_init) | |||
| bias_init, | |||
| transposed=True) | |||
| self.out_channels = out_channels | |||
| self.in_channels = in_channels | |||
| self.out_channels = out_channels | |||
| self.shape = P.Shape() | |||
| if pad_mode not in ('valid', 'same', 'pad'): | |||
| raise ValueError('Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed ' | |||
| @@ -20,7 +20,6 @@ import mindspore.nn as nn | |||
| from mindspore import Tensor | |||
| from ..ut_filter import non_graph_engine | |||
| weight = Tensor(np.ones([2, 2])) | |||
| in_channels = 3 | |||
| out_channels = 64 | |||
| @@ -28,6 +27,7 @@ out_channels = 64 | |||
| class Net(nn.Cell): | |||
| """ Net definition """ | |||
| def __init__(self, | |||
| cin, | |||
| cout, | |||
| @@ -93,12 +93,14 @@ def test_compile_pad_pad(): | |||
| input_data = Tensor(np.ones([1, 3, 16, 50], dtype=np.float32)) | |||
| net(input_data) | |||
| def test_conv_group_error(): | |||
| with pytest.raises(ValueError): | |||
| nn.Conv2d(6, 8, 3, group=3) | |||
| with pytest.raises(ValueError): | |||
| nn.Conv2d(6, 9, 3, group=2) | |||
| def test_conv_check(): | |||
| """ test_conv_check """ | |||
| with pytest.raises(ValueError): | |||
| @@ -139,15 +141,15 @@ class NetConv2dTranspose(nn.Cell): | |||
| super(NetConv2dTranspose, self).__init__() | |||
| self.conv = nn.Conv2dTranspose(cin, | |||
| cout, | |||
| kernel_size, | |||
| stride, | |||
| pad_mode, | |||
| padding, | |||
| dilation, | |||
| group, | |||
| has_bias, | |||
| weight_init, | |||
| bias_init) | |||
| kernel_size, | |||
| stride, | |||
| pad_mode, | |||
| padding, | |||
| dilation, | |||
| group, | |||
| has_bias, | |||
| weight_init, | |||
| bias_init) | |||
| def construct(self, input_x): | |||
| return self.conv(input_x) | |||
| @@ -165,6 +167,13 @@ def test_compile_transpose_bias(): | |||
| net(input_data) | |||
| def test_compile_transpose_bias_init(): | |||
| bias = Tensor(np.random.randn(64).astype(np.float32)) | |||
| net = NetConv2dTranspose(3, 64, 4, has_bias=True, weight_init='normal', bias_init=bias) | |||
| input_data = Tensor(np.ones([1, 3, 16, 50], dtype=np.float32)) | |||
| net(input_data) | |||
| def test_compile_transpose_valid(): | |||
| net = NetConv2dTranspose(3, 64, 4, pad_mode='valid', weight_init='normal') | |||
| input_data = Tensor(np.ones([1, 3, 16, 50], dtype=np.float32)) | |||