You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

conv.py 42 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """conv"""
  16. import numpy as np
  17. from mindspore import log as logger
  18. from mindspore import context
  19. from mindspore.ops import operations as P
  20. from mindspore.ops.primitive import constexpr
  21. from mindspore.common.parameter import Parameter
  22. from mindspore.common.initializer import initializer, Initializer
  23. from mindspore.common.tensor import Tensor
  24. from mindspore._checkparam import Validator, Rel, twice
  25. from mindspore._extends import cell_attr_register
  26. from ..cell import Cell
  27. __all__ = ['Conv2d', 'Conv2dTranspose', 'Conv1d', 'Conv1dTranspose']
  28. class _Conv(Cell):
  29. """
  30. Applies a N-D convolution over an input signal composed of several input planes.
  31. """
  32. def __init__(self,
  33. in_channels,
  34. out_channels,
  35. kernel_size,
  36. stride,
  37. pad_mode,
  38. padding,
  39. dilation,
  40. group,
  41. has_bias,
  42. weight_init,
  43. bias_init,
  44. transposed=False):
  45. super(_Conv, self).__init__()
  46. self.in_channels = Validator.check_positive_int(in_channels)
  47. self.out_channels = Validator.check_positive_int(out_channels)
  48. self.kernel_size = kernel_size
  49. self.stride = stride
  50. self.pad_mode = pad_mode
  51. self.weight_init = weight_init
  52. self.bias_init = bias_init
  53. if isinstance(padding, int):
  54. Validator.check_non_negative_int(padding, 'padding', self.cls_name)
  55. self.padding = padding
  56. elif isinstance(padding, tuple):
  57. for pad in padding:
  58. Validator.check_non_negative_int(pad, 'padding item', self.cls_name)
  59. self.padding = padding
  60. else:
  61. raise TypeError("padding type must be int/tuple(int) cannot be {}!".format(type(padding)))
  62. self.dilation = dilation
  63. self.group = Validator.check_positive_int(group)
  64. self.has_bias = has_bias
  65. if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \
  66. isinstance(kernel_size[0], bool) or isinstance(kernel_size[1], bool) or \
  67. kernel_size[0] < 1 or kernel_size[1] < 1:
  68. raise ValueError("Attr 'kernel_size' of 'Conv2D' Op passed "
  69. + str(self.kernel_size) + ", should be a int or tuple and equal to or greater than 1.")
  70. if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or \
  71. isinstance(stride[0], bool) or isinstance(stride[1], bool) or stride[0] < 1 or stride[1] < 1:
  72. raise ValueError("Attr 'stride' of 'Conv2D' Op passed "
  73. + str(self.stride) + ", should be a int or tuple and equal to or greater than 1.")
  74. if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \
  75. isinstance(dilation[0], bool) or isinstance(dilation[1], bool) or dilation[0] < 1 or dilation[1] < 1:
  76. raise ValueError("Attr 'dilation' of 'Conv2D' Op passed "
  77. + str(self.dilation) + ", should be a int or tuple and equal to or greater than 1.")
  78. if in_channels % group != 0:
  79. raise ValueError("Attr 'in_channels' of 'Conv2D' Op must be divisible by "
  80. "attr 'group' of 'Conv2D' Op.")
  81. if out_channels % group != 0:
  82. raise ValueError("Attr 'out_channels' of 'Conv2D' Op must be divisible by "
  83. "attr 'group' of 'Conv2D' Op.")
  84. if transposed:
  85. shape = [in_channels, out_channels // group, *kernel_size]
  86. else:
  87. shape = [out_channels, in_channels // group, *kernel_size]
  88. self.weight = Parameter(initializer(self.weight_init, shape), name='weight')
  89. if Validator.check_bool(has_bias):
  90. self.bias = Parameter(initializer(self.bias_init, [out_channels]), name='bias')
  91. else:
  92. if self.bias_init != 'zeros':
  93. logger.warning("Value of 'has_bias' is False, value of 'bias_init' will be ignored.")
  94. self.bias = None
  95. def construct(self, *inputs):
  96. """Must be overridden by all subclasses."""
  97. raise NotImplementedError
  98. class Conv2d(_Conv):
  99. r"""
  100. 2D convolution layer.
  101. Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
  102. where :math:`N` is batch size, :math:`C_{in}` is channel number, and :math:`H_{in}, W_{in})` are height and width.
  103. For each batch of shape :math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
  104. .. math::
  105. out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
  106. where :math:`ccor` is the cross-correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
  107. from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
  108. filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
  109. of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
  110. :math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
  111. :math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
  112. to split the input in the channel dimension.
  113. If the 'pad_mode' is set to be "valid", the output height and width will be
  114. :math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
  115. (\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
  116. :math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
  117. (\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
  118. The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
  119. <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
  120. Args:
  121. in_channels (int): The number of input channel :math:`C_{in}`.
  122. out_channels (int): The number of output channel :math:`C_{out}`.
  123. kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the height
  124. and width of the 2D convolution window. Single int means the value is for both the height and the width of
  125. the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
  126. width of the kernel.
  127. stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
  128. the height and width of movement are both strides, or a tuple of two int numbers that
  129. represent height and width of movement respectively. Default: 1.
  130. pad_mode (str): Specifies padding mode. The optional values are
  131. "same", "valid", "pad". Default: "same".
  132. - same: Adopts the way of completion. The height and width of the output will be the same as
  133. the input. The total number of padding will be calculated in horizontal and vertical
  134. directions and evenly distributed to top and bottom, left and right if possible. Otherwise, the
  135. last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
  136. must be 0.
  137. - valid: Adopts the way of discarding. The possible largest height and width of output will be returned
  138. without padding. Extra pixels will be discarded. If this mode is set, `padding`
  139. must be 0.
  140. - pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
  141. Tensor borders. `padding` must be greater than or equal to 0.
  142. padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
  143. the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
  144. with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
  145. padding[1], padding[2], and padding[3] accordingly. Default: 0.
  146. dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
  147. to use for dilated convolution. If set to be :math:`k > 1`, there will
  148. be :math:`k - 1` pixels skipped for each sampling location. Its value must
  149. be greater or equal to 1 and bounded by the height and width of the
  150. input. Default: 1.
  151. group (int): Splits filter into groups, `in_ channels` and `out_channels` must be
  152. divisible by the number of groups. If the group is equal to `in_channels` and `out_channels`,
  153. this 2D convolution layer also can be called 2D depthwise convolution layer. Default: 1.
  154. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
  155. weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
  156. It can be a Tensor, a string, an Initializer or a number. When a string is specified,
  157. values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
  158. as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
  159. and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
  160. Initializer for more details. Default: 'normal'.
  161. bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
  162. Initializer and string are the same as 'weight_init'. Refer to the values of
  163. Initializer for more details. Default: 'zeros'.
  164. Inputs:
  165. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
  166. Outputs:
  167. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
  168. Examples:
  169. >>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
  170. >>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
  171. >>> net(input).shape
  172. (1, 240, 1024, 640)
  173. """
  174. @cell_attr_register
  175. def __init__(self,
  176. in_channels,
  177. out_channels,
  178. kernel_size,
  179. stride=1,
  180. pad_mode='same',
  181. padding=0,
  182. dilation=1,
  183. group=1,
  184. has_bias=False,
  185. weight_init='normal',
  186. bias_init='zeros'):
  187. kernel_size = twice(kernel_size)
  188. stride = twice(stride)
  189. self._dilation = dilation
  190. dilation = twice(dilation)
  191. super(Conv2d, self).__init__(
  192. in_channels,
  193. out_channels,
  194. kernel_size,
  195. stride,
  196. pad_mode,
  197. padding,
  198. dilation,
  199. group,
  200. has_bias,
  201. weight_init,
  202. bias_init)
  203. self.conv2d = P.Conv2D(out_channel=self.out_channels,
  204. kernel_size=self.kernel_size,
  205. mode=1,
  206. pad_mode=self.pad_mode,
  207. pad=self.padding,
  208. stride=self.stride,
  209. dilation=self.dilation,
  210. group=self.group)
  211. self._init_depthwise_conv2d()
  212. self.bias_add = P.BiasAdd()
  213. def _init_depthwise_conv2d(self):
  214. """Initialize depthwise conv2d op"""
  215. if context.get_context("device_target") == "Ascend" and self.group > 1:
  216. self.dilation = self._dilation
  217. Validator.check_equal_int(self.group, self.in_channels, 'group')
  218. Validator.check_equal_int(self.group, self.out_channels, 'group')
  219. self.conv2d = P.DepthwiseConv2dNative(channel_multiplier=1,
  220. kernel_size=self.kernel_size,
  221. pad_mode=self.pad_mode,
  222. pad=self.padding,
  223. stride=self.stride,
  224. dilation=self.dilation)
  225. weight_shape = [1, self.in_channels, *self.kernel_size]
  226. if isinstance(self.weight_init, Tensor):
  227. self.weight_init = Tensor(self.weight_init.asnumpy().swapaxes(0, 1), self.weight_init.dtype)
  228. if isinstance(self.weight_init, Initializer):
  229. self.weight_init.shape = weight_shape
  230. self.weight = Parameter(initializer(self.weight_init, weight_shape), name='weight')
  231. def construct(self, x):
  232. output = self.conv2d(x, self.weight)
  233. if self.has_bias:
  234. output = self.bias_add(output, self.bias)
  235. return output
  236. def extend_repr(self):
  237. s = 'input_channels={}, output_channels={}, kernel_size={},' \
  238. 'stride={}, pad_mode={}, padding={}, dilation={}, ' \
  239. 'group={}, has_bias={},' \
  240. 'weight_init={}, bias_init={}'.format(
  241. self.in_channels,
  242. self.out_channels,
  243. self.kernel_size,
  244. self.stride,
  245. self.pad_mode,
  246. self.padding,
  247. self.dilation,
  248. self.group,
  249. self.has_bias,
  250. self.weight_init,
  251. self.bias_init)
  252. return s
  253. @constexpr
  254. def _check_input_3d(input_shape):
  255. if len(input_shape) != 3:
  256. raise ValueError(f"Input should be 3d, but got shape {input_shape}")
  257. class Conv1d(_Conv):
  258. r"""
  259. 1D convolution layer.
  260. Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, W_{in})`,
  261. where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape
  262. :math:`(C_{in}, W_{in})`, the formula is defined as:
  263. .. math::
  264. out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
  265. where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
  266. from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
  267. filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
  268. of kernel and it has shape :math:`(\text{ks_w})`, where :math:`\text{ks_w}` is the width of the convolution kernel.
  269. The full kernel has shape :math:`(C_{out}, C_{in} // \text{group}, \text{ks_w})`, where group is the group number
  270. to split the input in the channel dimension.
  271. If the 'pad_mode' is set to be "valid", the output width will be
  272. :math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
  273. (\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
  274. The first introduction of convolution layer can be found in paper `Gradient Based Learning Applied to Document
  275. Recognition <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
  276. Args:
  277. in_channels (int): The number of input channel :math:`C_{in}`.
  278. out_channels (int): The number of output channel :math:`C_{out}`.
  279. kernel_size (int): The data type is int. Specifies the
  280. width of the 1D convolution window.
  281. stride (int): The distance of kernel moving, an int number that represents
  282. the width of movement. Default: 1.
  283. pad_mode (str): Specifies padding mode. The optional values are
  284. "same", "valid", "pad". Default: "same".
  285. - same: Adopts the way of completion. The output width will be the same as the input.
  286. The total number of padding will be calculated in the horizontal
  287. direction and evenly distributed to left and right if possible. Otherwise, the
  288. last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
  289. must be 0.
  290. - valid: Adopts the way of discarding. The possible largest width of the output will be returned
  291. without padding. Extra pixels will be discarded. If this mode is set, `padding`
  292. must be 0.
  293. - pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
  294. Tensor borders. `padding` must be greater than or equal to 0.
  295. padding (int): Implicit paddings on both sides of the input. Default: 0.
  296. dilation (int): The data type is int. Specifies the dilation rate
  297. to use for dilated convolution. If set to be :math:`k > 1`, there will
  298. be :math:`k - 1` pixels skipped for each sampling location. Its value must
  299. be greater or equal to 1 and bounded by the height and width of the
  300. input. Default: 1.
  301. group (int): Splits filter into groups, `in_ channels` and `out_channels` must be
  302. divisible by the number of groups. Default: 1.
  303. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
  304. weight_init (Union[Tensor, str, Initializer, numbers.Number]): An initializer for the convolution kernel.
  305. It can be a Tensor, a string, an Initializer or a number. When a string is specified,
  306. values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
  307. as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
  308. and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
  309. Initializer for more details. Default: 'normal'.
  310. bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
  311. Initializer and string are the same as 'weight_init'. Refer to the values of
  312. Initializer for more details. Default: 'zeros'.
  313. Inputs:
  314. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
  315. Outputs:
  316. Tensor of shape :math:`(N, C_{out}, W_{out})`.
  317. Examples:
  318. >>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal')
  319. >>> input = Tensor(np.ones([1, 120, 640]), mindspore.float32)
  320. >>> net(input).shape
  321. (1, 240, 640)
  322. """
  323. @cell_attr_register
  324. def __init__(self,
  325. in_channels,
  326. out_channels,
  327. kernel_size,
  328. stride=1,
  329. pad_mode='same',
  330. padding=0,
  331. dilation=1,
  332. group=1,
  333. has_bias=False,
  334. weight_init='normal',
  335. bias_init='zeros'):
  336. Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
  337. Validator.check_value_type("stride", stride, [int], self.cls_name)
  338. Validator.check_value_type("padding", padding, [int], self.cls_name)
  339. Validator.check_value_type("dilation", dilation, [int], self.cls_name)
  340. Validator.check_int(kernel_size, 1, Rel.GE, 'kernel_size', self.cls_name)
  341. Validator.check_int(stride, 1, Rel.GE, 'stride', self.cls_name)
  342. Validator.check_non_negative_int(padding, 'padding', self.cls_name)
  343. Validator.check_int(dilation, 1, Rel.GE, 'dilation', self.cls_name)
  344. kernel_size = (1, kernel_size)
  345. stride = (1, stride)
  346. dilation = (1, dilation)
  347. get_shape = P.Shape()
  348. get_dtype = P.DType()
  349. if isinstance(weight_init, Tensor):
  350. weight_init_shape = get_shape(weight_init)
  351. Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
  352. weight_init_dtype = get_dtype(weight_init)
  353. weight_init_value = weight_init.asnumpy()
  354. weight_init_value = np.expand_dims(weight_init_value, 2)
  355. weight_init = Tensor(weight_init_value, weight_init_dtype)
  356. super(Conv1d, self).__init__(
  357. in_channels,
  358. out_channels,
  359. kernel_size,
  360. stride,
  361. pad_mode,
  362. padding,
  363. dilation,
  364. group,
  365. has_bias,
  366. weight_init,
  367. bias_init)
  368. self.padding = (0, 0, padding, padding)
  369. self.conv2d = P.Conv2D(out_channel=self.out_channels,
  370. kernel_size=self.kernel_size,
  371. mode=1,
  372. pad_mode=self.pad_mode,
  373. pad=self.padding,
  374. stride=self.stride,
  375. dilation=self.dilation,
  376. group=self.group)
  377. self.bias_add = P.BiasAdd()
  378. if pad_mode not in ('valid', 'same', 'pad'):
  379. raise ValueError('Attr \'pad_mode\' of \'Conv1d\' Op passed '
  380. + str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
  381. self.expand_dims = P.ExpandDims()
  382. self.squeeze = P.Squeeze(2)
  383. self.shape = P.Shape()
  384. def construct(self, x):
  385. x_shape = self.shape(x)
  386. _check_input_3d(x_shape)
  387. x = self.expand_dims(x, 2)
  388. output = self.conv2d(x, self.weight)
  389. if self.has_bias:
  390. output = self.bias_add(output, self.bias)
  391. output = self.squeeze(output)
  392. return output
  393. def extend_repr(self):
  394. s = 'input_channels={}, output_channels={}, kernel_size={},' \
  395. 'stride={}, pad_mode={}, padding={}, dilation={}, ' \
  396. 'group={}, has_bias={},' \
  397. 'weight_init={}, bias_init={}'.format(
  398. self.in_channels,
  399. self.out_channels,
  400. self.kernel_size,
  401. self.stride,
  402. self.pad_mode,
  403. self.padding,
  404. self.dilation,
  405. self.group,
  406. self.has_bias,
  407. self.weight_init,
  408. self.bias_init)
  409. return s
  410. class Conv2dTranspose(_Conv):
  411. r"""
  412. 2D transposed convolution layer.
  413. Compute a 2D transposed convolution, which is also known as a deconvolution
  414. (although it is not an actual deconvolution).
  415. Input is typically of shape :math:`(N, C, H, W)`, where :math:`N` is batch size and :math:`C` is channel number.
  416. Args:
  417. in_channels (int): The number of channels in the input space.
  418. out_channels (int): The number of channels in the output space.
  419. kernel_size (Union[int, tuple]): int or a tuple of 2 integers, which specifies the height
  420. and width of the 2D convolution window. Single int means the value is for both the height and the width of
  421. the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
  422. width of the kernel.
  423. stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
  424. the height and width of movement are both strides, or a tuple of two int numbers that
  425. represent height and width of movement respectively. Its value must be equal to or greater than 1.
  426. Default: 1.
  427. pad_mode (str): Select the mode of the pad. The optional values are
  428. "pad", "same", "valid". Default: "same".
  429. - pad: Implicit paddings on both sides of the input.
  430. - same: Adopted the way of completion.
  431. - valid: Adopted the way of discarding.
  432. padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
  433. the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
  434. with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
  435. padding[1], padding[2], and padding[3] accordingly. Default: 0.
  436. dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
  437. to use for dilated convolution. If set to be :math:`k > 1`, there will
  438. be :math:`k - 1` pixels skipped for each sampling location. Its value must
  439. be greater than or equal to 1 and bounded by the height and width of the
  440. input. Default: 1.
  441. group (int): Splits filter into groups, `in_channels` and `out_channels` must be
  442. divisible by the number of groups. This does not support for Davinci devices when group > 1. Default: 1.
  443. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
  444. weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
  445. It can be a Tensor, a string, an Initializer or a number. When a string is specified,
  446. values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
  447. as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
  448. and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
  449. Initializer for more details. Default: 'normal'.
  450. bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
  451. Initializer and string are the same as 'weight_init'. Refer to the values of
  452. Initializer for more details. Default: 'zeros'.
  453. Inputs:
  454. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
  455. Outputs:
  456. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
  457. Examples:
  458. >>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
  459. >>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32)
  460. >>> net(input)
  461. """
  462. def __init__(self,
  463. in_channels,
  464. out_channels,
  465. kernel_size,
  466. stride=1,
  467. pad_mode='same',
  468. padding=0,
  469. dilation=1,
  470. group=1,
  471. has_bias=False,
  472. weight_init='normal',
  473. bias_init='zeros'):
  474. kernel_size = twice(kernel_size)
  475. stride = twice(stride)
  476. dilation = twice(dilation)
  477. Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
  478. if isinstance(padding, tuple):
  479. Validator.check_equal_int(len(padding), 4, 'padding size', self.cls_name)
  480. # out_channels and in_channels swap.
  481. # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
  482. # then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
  483. super(Conv2dTranspose, self).__init__(
  484. in_channels,
  485. out_channels,
  486. kernel_size,
  487. stride,
  488. pad_mode,
  489. padding,
  490. dilation,
  491. group,
  492. has_bias,
  493. weight_init,
  494. bias_init,
  495. transposed=True)
  496. self.in_channels = in_channels
  497. self.out_channels = out_channels
  498. self.shape = P.Shape()
  499. if pad_mode not in ('valid', 'same', 'pad'):
  500. raise ValueError('Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed '
  501. + str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
  502. self.is_valid = self.pad_mode == 'valid'
  503. self.is_same = self.pad_mode == 'same'
  504. self.is_pad = self.pad_mode == 'pad'
  505. if Validator.check_bool(has_bias):
  506. self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
  507. # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
  508. self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
  509. kernel_size=kernel_size,
  510. mode=1,
  511. pad_mode=pad_mode,
  512. pad=padding,
  513. stride=stride,
  514. dilation=dilation,
  515. group=group)
  516. self.bias_add = P.BiasAdd()
  517. if isinstance(self.padding, int):
  518. self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = (self.padding,) * 4
  519. else:
  520. self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = self.padding
  521. def shard(self, strategy):
  522. self.conv2d_transpose.shard(strategy)
  523. return self
  524. def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
  525. """Calculate the width and height of output."""
  526. length = 0
  527. filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
  528. if self.is_valid:
  529. if filter_size - stride_size > 0:
  530. length = input_length * stride_size + filter_size - stride_size
  531. else:
  532. length = input_length * stride_size
  533. elif self.is_same:
  534. length = input_length * stride_size
  535. elif self.is_pad:
  536. length = input_length * stride_size - padding + filter_size - stride_size
  537. return length
  538. def construct(self, x):
  539. n, _, h, w = self.shape(x)
  540. h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
  541. self.padding_top + self.padding_bottom)
  542. w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
  543. self.padding_left + self.padding_right)
  544. if self.has_bias:
  545. return self.bias_add(self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out)),
  546. self.bias)
  547. return self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
  548. def extend_repr(self):
  549. s = 'input_channels={}, output_channels={}, kernel_size={},' \
  550. 'stride={}, pad_mode={}, padding={}, dilation={}, ' \
  551. 'group={}, has_bias={},' \
  552. 'weight_init={}, bias_init={}'.format(self.in_channels,
  553. self.out_channels,
  554. self.kernel_size,
  555. self.stride,
  556. self.pad_mode,
  557. self.padding,
  558. self.dilation,
  559. self.group,
  560. self.has_bias,
  561. self.weight_init,
  562. self.bias_init)
  563. return s
  564. class Conv1dTranspose(_Conv):
  565. r"""
  566. 1D transposed convolution layer.
  567. Compute a 1D transposed convolution, which is also known as a deconvolution
  568. (although it is not an actual deconvolution).
  569. Input is typically of shape :math:`(N, C, W)`, where :math:`N` is batch size and :math:`C` is channel number.
  570. Args:
  571. in_channels (int): The number of channels in the input space.
  572. out_channels (int): The number of channels in the output space.
  573. kernel_size (int): int, which specifies the width of the 1D convolution window.
  574. stride (int): The distance of kernel moving, an int number that represents
  575. the width of movement. Default: 1.
  576. pad_mode (str): Select the mode of the pad. The optional values are
  577. "pad", "same", "valid". Default: "same".
  578. - pad: Implicit paddings on both sides of the input.
  579. - same: Adopted the way of completion.
  580. - valid: Adopted the way of discarding.
  581. padding (int): Implicit paddings on both sides of the input. Default: 0.
  582. dilation (int): The data type is int. Specifies the dilation rate
  583. to use for dilated convolution. If set to be :math:`k > 1`, there will
  584. be :math:`k - 1` pixels skipped for each sampling location. Its value must
  585. be greater or equal to 1 and bounded by the width of the
  586. input. Default: 1.
  587. group (int): Splits filter into groups, `in_channels` and `out_channels` must be
  588. divisible by the number of groups. This is not support for Davinci devices when group > 1. Default: 1.
  589. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
  590. weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
  591. It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
  592. values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
  593. as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
  594. and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
  595. Initializer for more details. Default: 'normal'.
  596. bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
  597. Initializer and string are the same as 'weight_init'. Refer to the values of
  598. Initializer for more details. Default: 'zeros'.
  599. Inputs:
  600. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
  601. Outputs:
  602. Tensor of shape :math:`(N, C_{out}, W_{out})`.
  603. Examples:
  604. >>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
  605. >>> input = Tensor(np.ones([1, 3, 50]), mindspore.float32)
  606. >>> net(input)
  607. """
  608. def __init__(self,
  609. in_channels,
  610. out_channels,
  611. kernel_size,
  612. stride=1,
  613. pad_mode='same',
  614. padding=0,
  615. dilation=1,
  616. group=1,
  617. has_bias=False,
  618. weight_init='normal',
  619. bias_init='zeros'):
  620. Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
  621. Validator.check_value_type("stride", stride, [int], self.cls_name)
  622. Validator.check_value_type("padding", padding, [int], self.cls_name)
  623. Validator.check_value_type("dilation", dilation, [int], self.cls_name)
  624. Validator.check_int(kernel_size, 1, Rel.GE, 'kernel_size', self.cls_name)
  625. Validator.check_int(stride, 1, Rel.GE, 'stride', self.cls_name)
  626. Validator.check_non_negative_int(padding, 'padding', self.cls_name)
  627. Validator.check_int(dilation, 1, Rel.GE, 'dilation', self.cls_name)
  628. kernel_size = (1, kernel_size)
  629. stride = (1, stride)
  630. dilation = (1, dilation)
  631. get_shape = P.Shape()
  632. get_dtype = P.DType()
  633. if isinstance(weight_init, Tensor):
  634. weight_init_shape = get_shape(weight_init)
  635. Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
  636. weight_init_dtype = get_dtype(weight_init)
  637. weight_init_value = weight_init.asnumpy()
  638. weight_init_value = np.expand_dims(weight_init_value, 2)
  639. weight_init = Tensor(weight_init_value, weight_init_dtype)
  640. # out_channels and in_channels swap.
  641. # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
  642. # then Conv1dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
  643. super(Conv1dTranspose, self).__init__(
  644. in_channels,
  645. out_channels,
  646. kernel_size,
  647. stride,
  648. pad_mode,
  649. padding,
  650. dilation,
  651. group,
  652. has_bias,
  653. weight_init,
  654. bias_init,
  655. transposed=True)
  656. self.padding = (0, 0, padding, padding)
  657. self.in_channels = in_channels
  658. self.out_channels = out_channels
  659. self.shape = P.Shape()
  660. if pad_mode not in ('valid', 'same', 'pad'):
  661. raise ValueError('Attr \'pad_mode\' of \'Conv1dTranspose\' Op passed '
  662. + str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
  663. self.is_valid = self.pad_mode == 'valid'
  664. self.is_same = self.pad_mode == 'same'
  665. self.is_pad = self.pad_mode == 'pad'
  666. if Validator.check_bool(has_bias):
  667. self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
  668. # cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
  669. self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
  670. kernel_size=kernel_size,
  671. mode=1,
  672. pad_mode=pad_mode,
  673. pad=self.padding,
  674. stride=stride,
  675. dilation=dilation,
  676. group=group)
  677. self.bias_add = P.BiasAdd()
  678. self.expand_dims = P.ExpandDims()
  679. self.squeeze = P.Squeeze(2)
  680. def shard(self, strategy):
  681. self.conv2d_transpose.shard(strategy)
  682. return self
  683. def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
  684. """Calculate the width and height of output."""
  685. length = 0
  686. filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
  687. if self.is_valid:
  688. if filter_size - stride_size > 0:
  689. length = input_length * stride_size + filter_size - stride_size
  690. else:
  691. length = input_length * stride_size
  692. elif self.is_same:
  693. length = input_length * stride_size
  694. elif self.is_pad:
  695. length = input_length * stride_size - padding + filter_size - stride_size
  696. return length
  697. def construct(self, x):
  698. x_shape = self.shape(x)
  699. _check_input_3d(x_shape)
  700. x = self.expand_dims(x, 2)
  701. n, _, h, w = self.shape(x)
  702. h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
  703. self.padding[0] + self.padding[1])
  704. w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
  705. self.padding[2] + self.padding[3])
  706. output = self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
  707. if self.has_bias:
  708. output = self.bias_add(output, self.bias)
  709. output = self.squeeze(output)
  710. return output
  711. def extend_repr(self):
  712. s = 'input_channels={}, output_channels={}, kernel_size={},' \
  713. 'stride={}, pad_mode={}, padding={}, dilation={}, ' \
  714. 'group={}, has_bias={},' \
  715. 'weight_init={}, bias_init={}'.format(self.in_channels,
  716. self.out_channels,
  717. self.kernel_size,
  718. self.stride,
  719. self.pad_mode,
  720. self.padding,
  721. self.dilation,
  722. self.group,
  723. self.has_bias,
  724. self.weight_init,
  725. self.bias_init)
  726. return s