You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

resnet_quant.py 8.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """ResNet."""
  16. import mindspore.nn as nn
  17. from mindspore.ops import operations as P
  18. class ConvBNReLU(nn.Cell):
  19. """
  20. Convolution/Depthwise fused with Batchnorm and ReLU block definition.
  21. Args:
  22. in_planes (int): Input channel.
  23. out_planes (int): Output channel.
  24. kernel_size (int): Input kernel size.
  25. stride (int): Stride size for the first convolutional layer. Default: 1.
  26. groups (int): channel group. Convolution is 1 while Depthiwse is input channel. Default: 1.
  27. Returns:
  28. Tensor, output tensor.
  29. Examples:
  30. >>> ConvBNReLU(16, 256, kernel_size=1, stride=1, groups=1)
  31. """
  32. def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
  33. super(ConvBNReLU, self).__init__()
  34. padding = (kernel_size - 1) // 2
  35. conv = nn.Conv2dBnAct(in_planes, out_planes, kernel_size, stride, pad_mode='pad', padding=padding,
  36. group=groups, has_bn=True, activation='relu')
  37. self.features = conv
  38. def construct(self, x):
  39. output = self.features(x)
  40. return output
  41. class ResidualBlock(nn.Cell):
  42. """
  43. ResNet V1 residual block definition.
  44. Args:
  45. in_channel (int): Input channel.
  46. out_channel (int): Output channel.
  47. stride (int): Stride size for the first convolutional layer. Default: 1.
  48. Returns:
  49. Tensor, output tensor.
  50. Examples:
  51. >>> ResidualBlock(3, 256, stride=2)
  52. """
  53. expansion = 4
  54. def __init__(self,
  55. in_channel,
  56. out_channel,
  57. stride=1):
  58. super(ResidualBlock, self).__init__()
  59. channel = out_channel // self.expansion
  60. self.conv1 = ConvBNReLU(in_channel, channel, kernel_size=1, stride=1)
  61. self.conv2 = ConvBNReLU(channel, channel, kernel_size=3, stride=stride)
  62. self.conv3 = nn.Conv2dBnAct(channel, out_channel, kernel_size=1, stride=1, pad_mode='same', padding=0,
  63. has_bn=True, activation='relu')
  64. self.down_sample = False
  65. if stride != 1 or in_channel != out_channel:
  66. self.down_sample = True
  67. self.down_sample_layer = None
  68. if self.down_sample:
  69. self.down_sample_layer = nn.Conv2dBnAct(in_channel, out_channel,
  70. kernel_size=1, stride=stride,
  71. pad_mode='same', padding=0, has_bn=True, activation='relu')
  72. self.add = P.TensorAdd()
  73. self.relu = P.ReLU()
  74. def construct(self, x):
  75. identity = x
  76. out = self.conv1(x)
  77. out = self.conv2(out)
  78. out = self.conv3(out)
  79. if self.down_sample:
  80. identity = self.down_sample_layer(identity)
  81. out = self.add(out, identity)
  82. out = self.relu(out)
  83. return out
  84. class ResNet(nn.Cell):
  85. """
  86. ResNet architecture.
  87. Args:
  88. block (Cell): Block for network.
  89. layer_nums (list): Numbers of block in different layers.
  90. in_channels (list): Input channel in each layer.
  91. out_channels (list): Output channel in each layer.
  92. strides (list): Stride size in each layer.
  93. num_classes (int): The number of classes that the training images are belonging to.
  94. Returns:
  95. Tensor, output tensor.
  96. Examples:
  97. >>> ResNet(ResidualBlock,
  98. >>> [3, 4, 6, 3],
  99. >>> [64, 256, 512, 1024],
  100. >>> [256, 512, 1024, 2048],
  101. >>> [1, 2, 2, 2],
  102. >>> 10)
  103. """
  104. def __init__(self,
  105. block,
  106. layer_nums,
  107. in_channels,
  108. out_channels,
  109. strides,
  110. num_classes):
  111. super(ResNet, self).__init__()
  112. if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
  113. raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
  114. self.conv1 = ConvBNReLU(3, 64, kernel_size=7, stride=2)
  115. self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")
  116. self.layer1 = self._make_layer(block,
  117. layer_nums[0],
  118. in_channel=in_channels[0],
  119. out_channel=out_channels[0],
  120. stride=strides[0])
  121. self.layer2 = self._make_layer(block,
  122. layer_nums[1],
  123. in_channel=in_channels[1],
  124. out_channel=out_channels[1],
  125. stride=strides[1])
  126. self.layer3 = self._make_layer(block,
  127. layer_nums[2],
  128. in_channel=in_channels[2],
  129. out_channel=out_channels[2],
  130. stride=strides[2])
  131. self.layer4 = self._make_layer(block,
  132. layer_nums[3],
  133. in_channel=in_channels[3],
  134. out_channel=out_channels[3],
  135. stride=strides[3])
  136. self.mean = P.ReduceMean(keep_dims=True)
  137. self.flatten = nn.Flatten()
  138. self.end_point = nn.DenseBnAct(out_channels[3], num_classes, has_bias=True, has_bn=False)
  139. def _make_layer(self, block, layer_num, in_channel, out_channel, stride):
  140. """
  141. Make stage network of ResNet.
  142. Args:
  143. block (Cell): Resnet block.
  144. layer_num (int): Layer number.
  145. in_channel (int): Input channel.
  146. out_channel (int): Output channel.
  147. stride (int): Stride size for the first convolutional layer.
  148. Returns:
  149. SequentialCell, the output layer.
  150. Examples:
  151. >>> _make_layer(ResidualBlock, 3, 128, 256, 2)
  152. """
  153. layers = []
  154. resnet_block = block(in_channel, out_channel, stride=stride)
  155. layers.append(resnet_block)
  156. for _ in range(1, layer_num):
  157. resnet_block = block(out_channel, out_channel, stride=1)
  158. layers.append(resnet_block)
  159. return nn.SequentialCell(layers)
  160. def construct(self, x):
  161. x = self.conv1(x)
  162. c1 = self.maxpool(x)
  163. c2 = self.layer1(c1)
  164. c3 = self.layer2(c2)
  165. c4 = self.layer3(c3)
  166. c5 = self.layer4(c4)
  167. out = self.mean(c5, (2, 3))
  168. out = self.flatten(out)
  169. out = self.end_point(out)
  170. return out
  171. def resnet50_quant(class_num=10001):
  172. """
  173. Get ResNet50 neural network.
  174. Args:
  175. class_num (int): Class number.
  176. Returns:
  177. Cell, cell instance of ResNet50 neural network.
  178. Examples:
  179. >>> net = resnet50_quant(10)
  180. """
  181. return ResNet(ResidualBlock,
  182. [3, 4, 6, 3],
  183. [64, 256, 512, 1024],
  184. [256, 512, 1024, 2048],
  185. [1, 2, 2, 2],
  186. class_num)
  187. def resnet101_quant(class_num=1001):
  188. """
  189. Get ResNet101 neural network.
  190. Args:
  191. class_num (int): Class number.
  192. Returns:
  193. Cell, cell instance of ResNet101 neural network.
  194. Examples:
  195. >>> net = resnet101(1001)
  196. """
  197. return ResNet(ResidualBlock,
  198. [3, 4, 23, 3],
  199. [64, 256, 512, 1024],
  200. [256, 512, 1024, 2048],
  201. [1, 2, 2, 2],
  202. class_num)