You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

quant.py 65 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Quantization aware training."""
  16. from functools import partial
  17. from collections import namedtuple
  18. import numpy as np
  19. import mindspore.common.dtype as mstype
  20. from mindspore.ops.primitive import Primitive
  21. from mindspore.ops import operations as P
  22. from mindspore.ops import functional as F
  23. from mindspore.common.parameter import Parameter
  24. from mindspore.common.initializer import initializer
  25. from mindspore.common.tensor import Tensor
  26. from mindspore._checkparam import Validator, Rel, twice
  27. from mindspore.compression.common import QuantDtype
  28. import mindspore.context as context
  29. from .normalization import BatchNorm2d
  30. from .activation import get_activation, ReLU
  31. from ..cell import Cell
  32. from ...ops.operations import _quant_ops as Q
  33. __all__ = [
  34. 'FakeQuantWithMinMaxObserver',
  35. 'Conv2dBnFoldQuantOneConv',
  36. 'Conv2dBnFoldQuant',
  37. 'Conv2dBnWithoutFoldQuant',
  38. 'Conv2dQuant',
  39. 'DenseQuant',
  40. 'ActQuant',
  41. 'TensorAddQuant',
  42. 'MulQuant',
  43. ]
  44. class BatchNormFoldCell(Cell):
  45. """
  46. Batch normalization folded.
  47. Args:
  48. momentum (float): Momentum value must be [0, 1]. Default: 0.9.
  49. epsilon (float): A small float number to avoid dividing by 0. 1e-5 if dtype in
  50. float32 else 1e-3. Default: 1e-5.
  51. freeze_bn (int): Delay in steps at which computation switches from regular batch
  52. norm to frozen mean and std. Default: 0.
  53. Inputs:
  54. - **x** (Tensor) - Tensor of shape :math:`(N, C, H, W)`.
  55. - **mean** (Tensor) - Tensor of shape :math:`(C,)`.
  56. - **variance** (Tensor) - Tensor of shape :math:`(C,)`.
  57. - **global_step** (Tensor) - Tensor to record current global step.
  58. Outputs:
  59. Tuple of 4 Tensor, the normalized input and the updated parameters.
  60. - **batch_mean** (Tensor) - Tensor of shape :math:`(C,)`.
  61. - **batch_std** (Tensor) - Tensor of shape :math:`(C,)`.
  62. - **running_mean** (Tensor) - Tensor of shape :math:`(C,)`.
  63. - **running_std** (Tensor) - Tensor of shape :math:`(C,)`.
  64. """
  65. def __init__(self, momentum=0.9, epsilon=1e-5, freeze_bn=0):
  66. """Initialize batch norm fold layer"""
  67. super(BatchNormFoldCell, self).__init__()
  68. self.epsilon = epsilon
  69. self.is_gpu = context.get_context('device_target') == "GPU"
  70. if self.is_gpu:
  71. self.bn_train = Q.BatchNormFold(momentum, epsilon, is_training=True, freeze_bn=freeze_bn)
  72. self.bn_infer = Q.BatchNormFold(momentum, epsilon, is_training=False, freeze_bn=freeze_bn)
  73. else:
  74. self.bn_reduce = P.BNTrainingReduce()
  75. self.bn_update = Q.BatchNormFoldD(momentum, epsilon, is_training=True, freeze_bn=freeze_bn)
  76. def construct(self, x, mean, variance, global_step):
  77. if self.is_gpu:
  78. if self.training:
  79. batch_mean, batch_std, running_mean, running_std = self.bn_train(x, mean, variance, global_step)
  80. else:
  81. batch_mean, batch_std, running_mean, running_std = self.bn_infer(x, mean, variance, global_step)
  82. else:
  83. if self.training:
  84. x_sum, x_square_sum = self.bn_reduce(x)
  85. _, batch_mean, batch_std, running_mean, running_std, mean_updated, variance_updated = \
  86. self.bn_update(x, x_sum, x_square_sum, mean, variance)
  87. P.Assign()(mean, mean_updated)
  88. P.Assign()(variance, variance_updated)
  89. else:
  90. batch_mean = P.ZerosLike()(variance)
  91. batch_std = P.OnesLike()(variance)
  92. running_mean = P.Add()(mean, 0.)
  93. running_std = P.Sqrt()(P.Add()(variance, self.epsilon))
  94. return batch_mean, batch_std, running_mean, running_std
  95. def _partial_init(cls_or_self, **kwargs):
  96. """
  97. Wrapper that allows creation of class factories.
  98. This can be useful when there is a need to create classes with the same
  99. constructor arguments, but different instances.
  100. Examples:
  101. >>> Foo.partial_init = classmethod(_partial_init)
  102. >>> foo_builder = Foo.partial_init(a=3, b=4).partial_init(answer=42)
  103. >>> foo_instance1 = foo_builder()
  104. >>> foo_instance2 = foo_builder()
  105. >>> result = (id(foo_instance1) == id(foo_instance2))
  106. >>> print(result)
  107. False
  108. """
  109. class _PartialWrapper:
  110. r"""
  111. class of wrapper that allows creation of class factories.
  112. """
  113. def __init__(self, p):
  114. self.p = p
  115. def __call__(self, *args, **keywords):
  116. return self.p(*args, **keywords)
  117. def __repr__(self):
  118. return self.p.__repr__()
  119. partial_init = _partial_init
  120. r = _PartialWrapper(partial(cls_or_self, **kwargs))
  121. return r
  122. class _Observer(Cell):
  123. """
  124. Base class of Observer. Observer is used to calculate the statistics of specific layer.
  125. Notes:
  126. This class is an abstract class.
  127. Args:
  128. quant_dtype (QuantDtype): The type of FakeQuant data.
  129. """
  130. def __init__(self, quant_dtype):
  131. super(_Observer, self).__init__()
  132. self.quant_dtype = quant_dtype
  133. def extend_repr(self):
  134. s = f"quant_dtype={self.quant_dtype}"
  135. return s
  136. def construct(self):
  137. pass
  138. partial_init = classmethod(_partial_init)
  139. class UniformQuantObserver(_Observer):
  140. """
  141. The base class of Uniform Quantization Observer.
  142. Args:
  143. quant_dtype (QuantDtype): The type of FakeQuant data. Default: QuantDtype.INT8.
  144. per_channel (bool): Quantization granularity based on layer or on channel. Default: False.
  145. symmetric (bool): Whether the quantization algorithm is symmetric or not. Default: False.
  146. narrow_range (bool): Whether the quantization algorithm uses narrow range or not. Default: False.
  147. num_channels (int): declarate the min and max channel size, Default: 1.
  148. Returns:
  149. Tensor.
  150. """
  151. min_max_map = {
  152. QuantDtype.INT2: (-2, 1),
  153. QuantDtype.INT3: (-4, 3),
  154. QuantDtype.INT4: (-8, 7),
  155. QuantDtype.INT5: (-16, 15),
  156. QuantDtype.INT6: (-32, 31),
  157. QuantDtype.INT7: (-64, 63),
  158. QuantDtype.INT8: (-128, 127),
  159. QuantDtype.UINT2: (0, 3),
  160. QuantDtype.UINT3: (0, 7),
  161. QuantDtype.UINT4: (0, 15),
  162. QuantDtype.UINT5: (0, 31),
  163. QuantDtype.UINT6: (0, 63),
  164. QuantDtype.UINT7: (0, 127),
  165. QuantDtype.UINT8: (0, 255)
  166. }
  167. def __init__(self, quant_dtype=QuantDtype.INT8, per_channel=False, symmetric=False, narrow_range=False,
  168. num_channels=1):
  169. super(UniformQuantObserver, self).__init__(quant_dtype)
  170. self.per_channel = per_channel
  171. self.symmetric = symmetric
  172. self.narrow_range = narrow_range
  173. self.num_channels = num_channels
  174. class FakeQuantWithMinMaxObserver(UniformQuantObserver):
  175. r"""
  176. Quantization aware operation which provides the fake quantization observer function on data with min and max.
  177. The running min/max :math:`x_{min}` and :math:`x_{max}` are computed as:
  178. .. math::
  179. \begin{array}{ll} \\
  180. x_{min} =
  181. \begin{cases}
  182. \min(\min(X), 0)
  183. & \text{ if } ema = \text{False} \\
  184. \min((1 - c) \min(X) + \text{c } x_{min}, 0)
  185. & \text{ if } \text{otherwise}
  186. \end{cases}\\
  187. x_{max} =
  188. \begin{cases}
  189. \max(\max(X), 0)
  190. & \text{ if } ema = \text{False} \\
  191. \max((1 - c) \max(X) + \text{c } x_{max}, 0)
  192. & \text{ if } \text{otherwise}
  193. \end{cases}
  194. \end{array}
  195. where X is the input tensor, and :math:`c` is the `ema_decay`.
  196. The scale and zero point zp is computed as:
  197. .. math::
  198. \begin{array}{ll} \\
  199. scale =
  200. \begin{cases}
  201. \frac{x_{max} - x_{min}}{Q_{max} - Q_{min}}
  202. & \text{ if } symmetric = \text{False} \\
  203. \frac{2\max(x_{max}, \left | x_{min} \right |) }{Q_{max} - Q_{min}}
  204. & \text{ if } \text{otherwise}
  205. \end{cases}\\
  206. zp\_min = Q_{min} - \frac{x_{min}}{scale} \\
  207. zp = \left \lfloor \min(Q_{max}, \max(Q_{min}, zp\_min)) + 0.5 \right \rfloor
  208. \end{array}
  209. where :math:`Q_{max}` and :math:`Q_{min}` is decided by quant_dtype, for example, if quant_dtype=INT8,
  210. then :math:`Q_{max} = 127` and :math:`Q_{min} = -128`.
  211. The fake quant output is computed as:
  212. .. math::
  213. \begin{array}{ll} \\
  214. u_{min} = (Q_{min} - zp) * scale \\
  215. u_{max} = (Q_{max} - zp) * scale \\
  216. u_X = \left \lfloor \frac{\min(u_{max}, \max(u_{min}, X)) - u_{min}}{scale}
  217. + 0.5 \right \rfloor \\
  218. output = u_X * scale + u_{min}
  219. \end{array}
  220. Args:
  221. min_init (int, float): The initialized min value. Default: -6.
  222. max_init (int, float): The initialized max value. Default: 6.
  223. ema (bool): The exponential Moving Average algorithm updates min and max. Default: False.
  224. ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999.
  225. per_channel (bool): Quantization granularity based on layer or on channel. Default: False.
  226. channel_axis (int): Quantization by channel axis. Default: 1.
  227. num_channels (int): declarate the min and max channel size, Default: 1.
  228. quant_dtype (QuantDtype): The datatype of quantization, supporting 4 and 8bits. Default: QuantDtype.INT8.
  229. symmetric (bool): Whether the quantization algorithm is symmetric or not. Default: False.
  230. narrow_range (bool): Whether the quantization algorithm uses narrow range or not. Default: False.
  231. quant_delay (int): Quantization delay parameters according to the global step. Default: 0.
  232. Inputs:
  233. - **input** (Tensor) - The input of FakeQuantWithMinMaxObserver.
  234. Outputs:
  235. Tensor, with the same type and shape as the `input`.
  236. Examples:
  237. >>> fake_quant = nn.FakeQuantWithMinMaxObserver()
  238. >>> input = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
  239. >>> output = fake_quant(input)
  240. >>> print(output)
  241. [[ 0.9882355 1.9764705 0.9882355]
  242. [-1.9764705 0. -0.9882355]]
  243. """
  244. def __init__(self,
  245. min_init=-6,
  246. max_init=6,
  247. ema=False,
  248. ema_decay=0.999,
  249. per_channel=False,
  250. channel_axis=1,
  251. num_channels=1,
  252. quant_dtype=QuantDtype.INT8,
  253. symmetric=False,
  254. narrow_range=False,
  255. quant_delay=0):
  256. """Initialize FakeQuantWithMinMaxObserver"""
  257. super(FakeQuantWithMinMaxObserver, self).__init__(quant_dtype=quant_dtype, per_channel=per_channel,
  258. symmetric=symmetric, narrow_range=narrow_range,
  259. num_channels=num_channels)
  260. Validator.check_value_type("min_init", min_init, [int, float], type(self).__name__)
  261. Validator.check_value_type("max_init", max_init, [int, float], type(self).__name__)
  262. Validator.check("min_init", min_init, "max_init", max_init, rel=Rel.LT)
  263. Validator.check_non_negative_int(quant_delay, 'quant_delay')
  264. self.min_init = min_init
  265. self.max_init = max_init
  266. self.quant_dtype = quant_dtype
  267. self.ema = ema
  268. self.ema_decay = ema_decay
  269. self.per_channel = per_channel
  270. self.num_channels = num_channels
  271. self.channel_axis = channel_axis
  272. self.quant_delay = quant_delay
  273. self.symmetric = symmetric
  274. self.narrow_range = narrow_range
  275. self.is_ascend = context.get_context('device_target') == "Ascend"
  276. # init tensor min and max for fake quantized operation
  277. if self.per_channel:
  278. min_array = np.array([self.min_init] * self.num_channels).astype(np.float32)
  279. max_array = np.array([self.max_init] * self.num_channels).astype(np.float32)
  280. else:
  281. min_array = np.array([self.min_init]).astype(np.float32)
  282. max_array = np.array([self.max_init]).astype(np.float32)
  283. self.minq = Parameter(Tensor(min_array), name='quant_min', requires_grad=False)
  284. self.maxq = Parameter(Tensor(max_array), name='quant_max', requires_grad=False)
  285. # init fake quant relative op
  286. if self.per_channel:
  287. quant_fun = partial(Q.FakeQuantPerChannel, channel_axis=self.channel_axis)
  288. ema_fun = partial(Q.MinMaxUpdatePerChannel, channel_axis=self.channel_axis)
  289. else:
  290. quant_fun = Q.FakeQuantPerLayer
  291. ema_fun = Q.MinMaxUpdatePerLayer
  292. self.ema_update = ema_fun(ema=self.ema, ema_decay=self.ema_decay)
  293. if self.is_ascend:
  294. self.fake_quant_train = quant_fun(num_bits=self.quant_dtype.num_bits,
  295. symmetric=self.symmetric,
  296. narrow_range=self.narrow_range,
  297. quant_delay=self.quant_delay)
  298. self.fake_quant_infer = self.fake_quant_train
  299. else:
  300. quant_fun = partial(quant_fun,
  301. ema=self.ema,
  302. ema_decay=ema_decay,
  303. num_bits=self.quant_dtype.num_bits,
  304. symmetric=self.symmetric,
  305. narrow_range=self.narrow_range,
  306. quant_delay=self.quant_delay)
  307. self.fake_quant_train = quant_fun(training=True)
  308. self.fake_quant_infer = quant_fun(training=False)
  309. def extend_repr(self):
  310. s = 'quant_dtype={}, symmetric={}, narrow_range={}, ema={}({}), per_channel={}({}, {}), ' \
  311. 'quant_delay={}, min_init={}, max_init={}'.format(self.quant_dtype, self.symmetric, self.narrow_range,
  312. self.ema, self.ema_decay, self.per_channel,
  313. self.channel_axis, self.num_channels, self.quant_delay,
  314. self.min_init, self.max_init)
  315. return s
  316. def construct(self, x):
  317. if self.training:
  318. min_up, max_up = self.ema_update(x, self.minq, self.maxq)
  319. self.minq = min_up
  320. self.maxq = max_up
  321. out = self.fake_quant_train(x, self.minq, self.maxq)
  322. else:
  323. out = self.fake_quant_infer(x, self.minq, self.maxq)
  324. return out
  325. QuantConfig = namedtuple("QuantConfig", ['weight', 'activation'])
  326. quant_config_default = QuantConfig(weight=FakeQuantWithMinMaxObserver, activation=FakeQuantWithMinMaxObserver)
  327. class Conv2dBnFoldQuantOneConv(Cell):
  328. r"""
  329. 2D convolution which use the convolution layer statistics once to calculate BatchNormal operation folded construct.
  330. This part is a more detailed overview of Conv2d operation. For more detials about Quantilization,
  331. please refer to :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.
  332. Args:
  333. in_channels (int): The number of input channel :math:`C_{in}`.
  334. out_channels (int): The number of output channel :math:`C_{out}`.
  335. kernel_size (Union[int, tuple]): Specifies the height and width of the 2D convolution window.
  336. stride (int): Specifies stride for all spatial dimensions with the same value.
  337. pad_mode (str): Specifies padding mode. The optional values are "same", "valid", "pad". Default: "same".
  338. padding (int): Implicit paddings on both sides of the input. Default: 0.
  339. eps (float): Parameters for BatchNormal. Default: 1e-5.
  340. momentum (float): Parameters for BatchNormal op. Default: 0.997.
  341. dilation (int): Specifies the dilation rate to use for dilated convolution. Default: 1.
  342. group (int): Splits filter into groups, `in_ channels` and `out_channels` must be
  343. divisible by the number of groups. Default: 1.
  344. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
  345. weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  346. convolution kernel. Default: 'normal'.
  347. bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  348. bias vector. Default: 'zeros'.
  349. beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  350. beta vector. Default: 'zeros'.
  351. gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  352. gamma vector. Default: 'ones'.
  353. mean_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  354. mean vector. Default: 'zeros'.
  355. var_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  356. variance vector. Default: 'ones'.
  357. fake (bool): Whether Conv2dBnFoldQuant Cell adds FakeQuantWithMinMaxObserver. Default: True.
  358. quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be
  359. generated by compression.quant.create_quant_config method.
  360. Default: both set to default FakeQuantWithMinMaxObserver.
  361. quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.
  362. Inputs:
  363. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
  364. Outputs:
  365. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
  366. Examples:
  367. >>> qconfig = compression.quant.create_quant_config()
  368. >>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
  369. ... quant_config=qconfig)
  370. >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32)
  371. >>> result = conv2d_bnfold(input)
  372. >>> output = result.shape
  373. >>> print(output)
  374. (2, 6, 2, 2)
  375. """
  376. def __init__(self,
  377. in_channels,
  378. out_channels,
  379. kernel_size,
  380. stride=1,
  381. pad_mode='same',
  382. padding=0,
  383. dilation=1,
  384. group=1,
  385. eps=1e-5,
  386. momentum=0.997,
  387. has_bias=False,
  388. weight_init='normal',
  389. bias_init='zeros',
  390. beta_init='zeros',
  391. gamma_init='ones',
  392. mean_init='zeros',
  393. var_init='ones',
  394. fake=True,
  395. quant_config=quant_config_default,
  396. quant_dtype=QuantDtype.INT8):
  397. """Initialize Conv2dBnFoldQuant layer"""
  398. super(Conv2dBnFoldQuantOneConv, self).__init__()
  399. self.in_channels = Validator.check_positive_int(in_channels)
  400. self.out_channels = Validator.check_positive_int(out_channels)
  401. self.kernel_size = twice(kernel_size)
  402. self.stride = twice(stride)
  403. self.pad_mode = pad_mode
  404. self.padding = padding
  405. self.dilation = twice(dilation)
  406. self.group = group
  407. self.eps = eps
  408. self.momentum = momentum
  409. self.has_bias = has_bias
  410. self.fake = fake
  411. self.quant_config = quant_config
  412. self.quant_dtype = quant_dtype
  413. data_format = 'NCHW'
  414. self.format = Validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.cls_name)
  415. self._target = context.get_context("device_target")
  416. self.is_graph_mode = context.get_context("mode") == context.GRAPH_MODE
  417. if context.get_context("enable_ge"):
  418. self.is_ge_backend = True
  419. else:
  420. self.is_ge_backend = False
  421. self.enable_default_train = self.is_graph_mode and \
  422. (self.is_ge_backend or self._target == "Ascend")
  423. # initialize convolution op and Parameter
  424. self.conv = P.Conv2D(out_channel=out_channels,
  425. kernel_size=self.kernel_size,
  426. pad_mode=pad_mode,
  427. pad=padding,
  428. stride=self.stride,
  429. dilation=self.dilation,
  430. group=group)
  431. weight_shape = [out_channels, in_channels // group, *self.kernel_size]
  432. channel_axis = 0
  433. self.channel_axis = channel_axis
  434. self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
  435. self.bias_add = P.BiasAdd()
  436. if Validator.check_bool(has_bias):
  437. self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
  438. else:
  439. self.bias = None
  440. # initialize BatchNorm Parameter
  441. self.gamma = Parameter(initializer(gamma_init, [out_channels]), name='gamma')
  442. self.beta = Parameter(initializer(beta_init, [out_channels]), name='beta')
  443. self.moving_mean = Parameter(initializer(mean_init, [out_channels]), name='moving_mean', requires_grad=False)
  444. self.moving_variance = Parameter(initializer(var_init, [out_channels]), name='moving_variance',
  445. requires_grad=False)
  446. # initialize fake ops
  447. self.fake_quant_weight = quant_config.weight(min_init=-6,
  448. max_init=6,
  449. ema=False,
  450. channel_axis=channel_axis,
  451. num_channels=out_channels,
  452. quant_dtype=quant_dtype)
  453. if self._target == "Ascend":
  454. self.bn_train = P.BatchNorm(is_training=True,
  455. epsilon=self.eps,
  456. momentum=self.momentum)
  457. if self._target == "GPU":
  458. self.bn_train = P.FusedBatchNormEx(mode=1,
  459. epsilon=self.eps,
  460. momentum=self.momentum,
  461. data_format=self.format)
  462. if self._target == "CPU":
  463. self.bn_train = P.FusedBatchNorm(mode=1,
  464. epsilon=self.eps,
  465. momentum=self.momentum)
  466. self.bn_infer = P.BatchNorm(is_training=False, epsilon=self.eps, data_format=self.format)
  467. data_parallel_strategy = ((1,), (1,))
  468. data_parallel_strategy_one = ((1,), ())
  469. self.sub_mean = P.Sub().shard(data_parallel_strategy)
  470. self.sub_var = P.Sub().shard(data_parallel_strategy)
  471. self.mul_mean = P.Mul().shard(data_parallel_strategy_one)
  472. self.mul_var = P.Mul().shard(data_parallel_strategy_one)
  473. self.assign_sub_mean = P.AssignSub().shard(data_parallel_strategy)
  474. self.assign_sub_var = P.AssignSub().shard(data_parallel_strategy)
  475. self.reshape = P.Reshape()
  476. def extend_repr(self):
  477. s = 'in_channels={}, out_channels={}, kernel_size={}, stride={}, ' \
  478. 'pad_mode={}, padding={}, dilation={}, group={}, ' \
  479. 'fake={}, momentum={}, quant_delay={}'.format(self.in_channels, self.out_channels,
  480. self.kernel_size, self.stride,
  481. self.pad_mode, self.padding, self.dilation,
  482. self.group,
  483. self.fake, self.momentum,
  484. self.fake_quant_weight.quant_delay)
  485. return s
  486. def construct(self, x):
  487. running_std = P.Sqrt()(P.Add()(self.moving_variance, self.eps))
  488. scale_factor = self.gamma / running_std
  489. if self.channel_axis:
  490. scale_factor = self.reshape(scale_factor, (1, -1, 1, 1))
  491. else:
  492. scale_factor = self.reshape(scale_factor, (-1, 1, 1, 1))
  493. weight = self.weight * scale_factor
  494. if self.fake:
  495. weight = self.fake_quant_weight(weight)
  496. conv = self.conv(x, weight)
  497. scale_factor = self.reshape(scale_factor, (1, -1, 1, 1))
  498. if self.enable_default_train:
  499. scale_factor = P.Reciprocal()(scale_factor)
  500. conv_orig = conv * scale_factor
  501. else:
  502. conv_orig = conv / scale_factor
  503. if self.training:
  504. return self.bn_train(conv_orig,
  505. self.gamma,
  506. self.beta,
  507. self.moving_mean,
  508. self.moving_variance)[0]
  509. return self.bn_infer(conv_orig,
  510. self.gamma,
  511. self.beta,
  512. self.moving_mean,
  513. self.moving_variance)[0]
  514. class Conv2dBnFoldQuant(Cell):
  515. r"""
  516. 2D convolution with BatchNormal operation folded construct.
  517. This part is a more detailed overview of Conv2d operation. For more detials about Quantilization,
  518. please refer to :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.
  519. Args:
  520. in_channels (int): The number of input channel :math:`C_{in}`.
  521. out_channels (int): The number of output channel :math:`C_{out}`.
  522. kernel_size (Union[int, tuple]): Specifies the height and width of the 2D convolution window.
  523. stride (int): Specifies stride for all spatial dimensions with the same value.
  524. pad_mode (str): Specifies padding mode. The optional values are "same", "valid", "pad". Default: "same".
  525. padding (int): Implicit paddings on both sides of the input. Default: 0.
  526. eps (float): Parameters for BatchNormal. Default: 1e-5.
  527. momentum (float): Parameters for BatchNormal op. Default: 0.997.
  528. dilation (int): Specifies the dilation rate to use for dilated convolution. Default: 1.
  529. group (int): Splits filter into groups, `in_ channels` and `out_channels` must be
  530. divisible by the number of groups. Default: 1.
  531. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
  532. weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  533. convolution kernel. Default: 'normal'.
  534. bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  535. bias vector. Default: 'zeros'.
  536. beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  537. beta vector. Default: 'zeros'.
  538. gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  539. gamma vector. Default: 'ones'.
  540. mean_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  541. mean vector. Default: 'zeros'.
  542. var_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the
  543. variance vector. Default: 'ones'.
  544. fake (bool): Whether Conv2dBnFoldQuant Cell adds FakeQuantWithMinMaxObserver. Default: True.
  545. quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be
  546. generated by compression.quant.create_quant_config method.
  547. Default: both set to default FakeQuantWithMinMaxObserver.
  548. quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.
  549. freeze_bn (int): The quantization freeze BatchNormal op is according to the global step. Default: 100000.
  550. Inputs:
  551. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
  552. Outputs:
  553. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
  554. Supported Platforms:
  555. ``Ascend`` ``GPU``
  556. Examples:
  557. >>> qconfig = compression.quant.create_quant_config()
  558. >>> conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
  559. ... quant_config=qconfig)
  560. >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32)
  561. >>> output = conv2d_bnfold(input)
  562. >>> print(output.shape)
  563. (2, 6, 2, 2)
  564. """
  565. def __init__(self,
  566. in_channels,
  567. out_channels,
  568. kernel_size,
  569. stride=1,
  570. pad_mode='same',
  571. padding=0,
  572. dilation=1,
  573. group=1,
  574. eps=1e-5,
  575. momentum=0.997,
  576. has_bias=False,
  577. weight_init='normal',
  578. bias_init='zeros',
  579. beta_init='zeros',
  580. gamma_init='ones',
  581. mean_init='zeros',
  582. var_init='ones',
  583. fake=True,
  584. quant_config=quant_config_default,
  585. quant_dtype=QuantDtype.INT8,
  586. freeze_bn=100000):
  587. """Initialize Conv2dBnFoldQuant layer"""
  588. super(Conv2dBnFoldQuant, self).__init__()
  589. self.in_channels = Validator.check_positive_int(in_channels)
  590. self.out_channels = Validator.check_positive_int(out_channels)
  591. self.kernel_size = twice(kernel_size)
  592. self.stride = twice(stride)
  593. self.pad_mode = pad_mode
  594. self.padding = padding
  595. self.dilation = twice(dilation)
  596. self.group = group
  597. self.eps = eps
  598. self.momentum = momentum
  599. self.has_bias = has_bias
  600. self.freeze_bn = freeze_bn
  601. self.fake = fake
  602. self.quant_config = quant_config
  603. self.quant_dtype = quant_dtype
  604. self.is_gpu = context.get_context('device_target') == "GPU"
  605. # initialize convolution op and Parameter
  606. self.conv = P.Conv2D(out_channel=out_channels,
  607. kernel_size=self.kernel_size,
  608. pad_mode=pad_mode,
  609. pad=padding,
  610. stride=self.stride,
  611. dilation=self.dilation,
  612. group=group)
  613. weight_shape = [out_channels, in_channels // group, *self.kernel_size]
  614. channel_axis = 0
  615. self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
  616. self.bias_add = P.BiasAdd()
  617. if Validator.check_bool(has_bias):
  618. self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
  619. else:
  620. self.bias = None
  621. # initialize BatchNorm Parameter
  622. self.gamma = Parameter(initializer(gamma_init, [out_channels]), name='gamma')
  623. self.beta = Parameter(initializer(beta_init, [out_channels]), name='beta')
  624. self.moving_mean = Parameter(initializer(mean_init, [out_channels]), name='moving_mean', requires_grad=False)
  625. self.moving_variance = Parameter(initializer(var_init, [out_channels]), name='moving_variance',
  626. requires_grad=False)
  627. # initialize fake ops
  628. self.fake_quant_weight = quant_config.weight(min_init=-6,
  629. max_init=6,
  630. ema=False,
  631. channel_axis=channel_axis,
  632. num_channels=out_channels,
  633. quant_dtype=quant_dtype)
  634. self.batchnorm_fold = BatchNormFoldCell(epsilon=eps, momentum=momentum, freeze_bn=freeze_bn)
  635. self.correct_mul = Q.CorrectionMul(channel_axis)
  636. if context.get_context('device_target') == "Ascend":
  637. self.batchnorm_fold2_train = Q.BatchNormFold2_D(freeze_bn=freeze_bn)
  638. self.batchnorm_fold2_infer = Q.BatchNormFold2_D(freeze_bn=0)
  639. elif context.get_context('device_target') == "GPU":
  640. self.batchnorm_fold2_train = Q.BatchNormFold2(freeze_bn=freeze_bn)
  641. self.batchnorm_fold2_infer = Q.BatchNormFold2(freeze_bn=0)
  642. else:
  643. raise ValueError("Unsupported platform: {}".format(context.get_context('device_target')))
  644. self.step = Parameter(initializer('normal', [1], dtype=mstype.int32), name='step', requires_grad=False)
  645. self.one = Tensor(1, mstype.int32)
  646. self.assignadd = P.AssignAdd()
  647. def extend_repr(self):
  648. s = 'in_channels={}, out_channels={}, kernel_size={}, stride={}, ' \
  649. 'pad_mode={}, padding={}, dilation={}, group={}, ' \
  650. 'fake={}, freeze_bn={}, momentum={}, quant_delay={}'.format(self.in_channels, self.out_channels,
  651. self.kernel_size, self.stride,
  652. self.pad_mode, self.padding, self.dilation,
  653. self.group,
  654. self.fake, self.freeze_bn, self.momentum,
  655. self.fake_quant_weight.quant_delay)
  656. return s
  657. def construct(self, x):
  658. out_conv = self.conv(x, self.weight)
  659. if self.has_bias:
  660. out_conv = self.bias_add(out_conv, self.bias)
  661. # BN fold1
  662. batch_mean, batch_std, running_mean, running_std = self.batchnorm_fold(out_conv,
  663. self.moving_mean,
  664. self.moving_variance,
  665. self.step)
  666. # fake weight
  667. weight = self.correct_mul(self.weight, self.gamma, running_std)
  668. if self.fake:
  669. weight = self.fake_quant_weight(weight)
  670. out = self.conv(x, weight)
  671. if self.has_bias:
  672. out = self.bias_add(out, self.bias)
  673. # BN fold2
  674. if self.is_gpu:
  675. if self.training:
  676. out = self.batchnorm_fold2_train(out, self.beta, self.gamma,
  677. batch_std, batch_mean, running_std, running_mean, self.step)
  678. self.assignadd(self.step, self.one)
  679. else:
  680. out = self.batchnorm_fold2_infer(out, self.beta, self.gamma,
  681. batch_std, batch_mean, running_std, running_mean, self.step)
  682. else:
  683. if self.training:
  684. out = self.batchnorm_fold2_train(out, self.beta, self.gamma, batch_std, batch_mean, running_std)
  685. self.assignadd(self.step, self.one)
  686. else:
  687. out = self.batchnorm_fold2_infer(out, self.beta, self.gamma, running_std, running_mean, running_std)
  688. return out
  689. class Conv2dBnWithoutFoldQuant(Cell):
  690. r"""
  691. 2D convolution and batchnorm without fold with fake quantized construct.
  692. This part is a more detailed overview of Conv2d operation. For more detials about Quantilization,
  693. please refer to :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.
  694. Args:
  695. in_channels (int): The number of input channel :math:`C_{in}`.
  696. out_channels (int): The number of output channel :math:`C_{out}`.
  697. kernel_size (Union[int, tuple]): Specifies the height and width of the 2D convolution window.
  698. stride (int): Specifies stride for all spatial dimensions with the same value. Default: 1.
  699. pad_mode (str): Specifies padding mode. The optional values are "same", "valid", "pad". Default: "same".
  700. padding (int): Implicit paddings on both sides of the input. Default: 0.
  701. dilation (int): Specifies the dilation rate to use for dilated convolution. Default: 1.
  702. group (int): Splits filter into groups, `in_ channels` and `out_channels` must be
  703. divisible by the number of groups. Default: 1.
  704. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
  705. eps (float): Parameters for BatchNormal. Default: 1e-5.
  706. momentum (float): Parameters for BatchNormal op. Default: 0.997.
  707. weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
  708. Default: 'normal'.
  709. bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Default: 'zeros'.
  710. quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be
  711. generated by compression.quant.create_quant_config method.
  712. Default: both set to default FakeQuantWithMinMaxObserver.
  713. quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.
  714. Inputs:
  715. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
  716. Outputs:
  717. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
  718. Supported Platforms:
  719. ``Ascend`` ``GPU``
  720. Examples:
  721. >>> qconfig = compression.quant.create_quant_config()
  722. >>> conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 6, kernel_size=(2, 2), stride=(1, 1), pad_mode="valid",
  723. ... quant_config=qconfig)
  724. >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mstype.float32)
  725. >>> output = conv2d_no_bnfold(input)
  726. >>> print(output.shape)
  727. (2, 6, 2, 2)
  728. """
  729. def __init__(self,
  730. in_channels,
  731. out_channels,
  732. kernel_size,
  733. stride=1,
  734. pad_mode='same',
  735. padding=0,
  736. dilation=1,
  737. group=1,
  738. has_bias=False,
  739. eps=1e-5,
  740. momentum=0.997,
  741. weight_init='normal',
  742. bias_init='zeros',
  743. quant_config=quant_config_default,
  744. quant_dtype=QuantDtype.INT8):
  745. super(Conv2dBnWithoutFoldQuant, self).__init__()
  746. self.in_channels = Validator.check_positive_int(in_channels)
  747. self.out_channels = Validator.check_positive_int(out_channels)
  748. self.has_bias = has_bias
  749. self.kernel_size = twice(kernel_size)
  750. self.stride = twice(stride)
  751. self.dilation = twice(dilation)
  752. self.pad_mode = pad_mode
  753. self.padding = padding
  754. self.group = group
  755. self.bias_add = P.BiasAdd()
  756. if Validator.check_bool(has_bias):
  757. self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
  758. else:
  759. self.bias = None
  760. # initialize convolution op and Parameter
  761. self.conv = P.Conv2D(out_channel=self.out_channels,
  762. kernel_size=self.kernel_size,
  763. mode=1,
  764. pad_mode=self.pad_mode,
  765. pad=self.padding,
  766. stride=self.stride,
  767. dilation=self.dilation,
  768. group=self.group)
  769. weight_shape = [out_channels, in_channels // group, *self.kernel_size]
  770. channel_axis = 0
  771. self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
  772. self.fake_quant_weight = quant_config.weight(min_init=-6,
  773. max_init=6,
  774. ema=False,
  775. channel_axis=channel_axis,
  776. num_channels=out_channels,
  777. quant_dtype=quant_dtype)
  778. self.batchnorm = BatchNorm2d(out_channels, eps=eps, momentum=momentum)
  779. def construct(self, x):
  780. weight = self.fake_quant_weight(self.weight)
  781. out = self.conv(x, weight)
  782. if self.has_bias:
  783. out = self.bias_add(out, self.bias)
  784. out = self.batchnorm(out)
  785. return out
  786. def extend_repr(self):
  787. s = 'in_channels={}, out_channels={}, kernel_size={}, stride={}, ' \
  788. 'pad_mode={}, padding={}, dilation={}, group={}, ' \
  789. 'has_bias={}, quant_delay={}'.format(self.in_channels, self.out_channels, self.kernel_size, self.stride,
  790. self.pad_mode, self.padding, self.dilation, self.group,
  791. self.has_bias, self.fake_quant_weight.quant_delay)
  792. return s
  793. class Conv2dQuant(Cell):
  794. r"""
  795. 2D convolution with fake quantized operation layer.
  796. This part is a more detailed overview of Conv2d operation. For more detials about Quantilization,
  797. please refer to :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.
  798. Args:
  799. in_channels (int): The number of input channel :math:`C_{in}`.
  800. out_channels (int): The number of output channel :math:`C_{out}`.
  801. kernel_size (Union[int, tuple]): Specifies the height and width of the 2D convolution window.
  802. stride (int): Specifies stride for all spatial dimensions with the same value. Default: 1.
  803. pad_mode (str): Specifies padding mode. The optional values are "same", "valid", "pad". Default: "same".
  804. padding (int): Implicit paddings on both sides of the input. Default: 0.
  805. dilation (int): Specifies the dilation rate to use for dilated convolution. Default: 1.
  806. group (int): Splits filter into groups, `in_ channels` and `out_channels` must be
  807. divisible by the number of groups. Default: 1.
  808. has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
  809. weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
  810. Default: 'normal'.
  811. bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Default: 'zeros'.
  812. quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be
  813. generated by compression.quant.create_quant_config method.
  814. Default: both set to default FakeQuantWithMinMaxObserver.
  815. quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.
  816. Inputs:
  817. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
  818. Outputs:
  819. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
  820. Supported Platforms:
  821. ``Ascend`` ``GPU``
  822. Examples:
  823. >>> qconfig = compression.quant.create_quant_config()
  824. >>> conv2d_quant = nn.Conv2dQuant(1, 6, kernel_size= (2, 2), stride=(1, 1), pad_mode="valid",
  825. ... quant_config=qconfig)
  826. >>> input = Tensor(np.random.randint(-2, 2, (2, 1, 3, 3)), mindspore.float32)
  827. >>> output = conv2d_quant(input)
  828. >>> print(output.shape)
  829. (2, 6, 2, 2)
  830. """
  831. def __init__(self,
  832. in_channels,
  833. out_channels,
  834. kernel_size,
  835. stride=1,
  836. pad_mode='same',
  837. padding=0,
  838. dilation=1,
  839. group=1,
  840. has_bias=False,
  841. weight_init='normal',
  842. bias_init='zeros',
  843. quant_config=quant_config_default,
  844. quant_dtype=QuantDtype.INT8):
  845. super(Conv2dQuant, self).__init__()
  846. self.in_channels = Validator.check_positive_int(in_channels)
  847. self.out_channels = Validator.check_positive_int(out_channels)
  848. self.has_bias = has_bias
  849. self.kernel_size = twice(kernel_size)
  850. self.stride = twice(stride)
  851. self.dilation = twice(dilation)
  852. self.pad_mode = pad_mode
  853. self.padding = padding
  854. self.group = group
  855. weight_shape = [out_channels, in_channels // group, *self.kernel_size]
  856. self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
  857. self.bias_add = P.BiasAdd()
  858. if Validator.check_bool(has_bias):
  859. self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
  860. else:
  861. self.bias = None
  862. self.conv = P.Conv2D(out_channel=self.out_channels,
  863. kernel_size=self.kernel_size,
  864. mode=1,
  865. pad_mode=self.pad_mode,
  866. pad=self.padding,
  867. stride=self.stride,
  868. dilation=self.dilation,
  869. group=self.group)
  870. channel_axis = 0
  871. self.fake_quant_weight = quant_config.weight(min_init=-6,
  872. max_init=6,
  873. ema=False,
  874. channel_axis=channel_axis,
  875. num_channels=out_channels,
  876. quant_dtype=quant_dtype)
  877. def construct(self, x):
  878. weight = self.fake_quant_weight(self.weight)
  879. out = self.conv(x, weight)
  880. if self.has_bias:
  881. return self.bias_add(out, self.bias)
  882. return out
  883. def extend_repr(self):
  884. s = 'in_channels={}, out_channels={}, kernel_size={}, stride={}, ' \
  885. 'pad_mode={}, padding={}, dilation={}, group={}, ' \
  886. 'has_bias={}, quant_delay={}'.format(self.in_channels, self.out_channels, self.kernel_size, self.stride,
  887. self.pad_mode, self.padding, self.dilation, self.group,
  888. self.has_bias, self.fake_quant_weight.quant_delay)
  889. return s
  890. class DenseQuant(Cell):
  891. r"""
  892. The fully connected layer with fake quantized operation.
  893. This part is a more detailed overview of Dense operation. For more detials about Quantilization,
  894. please refer to :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.
  895. Args:
  896. in_channels (int): The dimension of the input space.
  897. out_channels (int): The dimension of the output space.
  898. weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
  899. is same as input. The values of str refer to the function `initializer`. Default: 'normal'.
  900. bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
  901. same as input. The values of str refer to the function `initializer`. Default: 'zeros'.
  902. has_bias (bool): Specifies whether the layer uses a bias vector. Default: True.
  903. activation (Union[str, Cell, Primitive]): The regularization function applied to the output of the layer,
  904. eg. 'relu'. Default: None.
  905. quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be
  906. generated by compression.quant.create_quant_config method.
  907. Default: both set to default FakeQuantWithMinMaxObserver.
  908. quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.
  909. Inputs:
  910. - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
  911. Outputs:
  912. Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
  913. Supported Platforms:
  914. ``Ascend`` ``GPU``
  915. Examples:
  916. >>> qconfig = compression.quant.create_quant_config()
  917. >>> dense_quant = nn.DenseQuant(3, 6, quant_config=qconfig)
  918. >>> input = Tensor(np.random.randint(-2, 2, (2, 3)), mindspore.float32)
  919. >>> result = dense_quant(input)
  920. >>> output = result.shape
  921. >>> print(output)
  922. (2, 6)
  923. """
  924. def __init__(self,
  925. in_channels,
  926. out_channels,
  927. weight_init='normal',
  928. bias_init='zeros',
  929. has_bias=True,
  930. activation=None,
  931. quant_config=quant_config_default,
  932. quant_dtype=QuantDtype.INT8):
  933. super(DenseQuant, self).__init__()
  934. self.in_channels = Validator.check_positive_int(in_channels)
  935. self.out_channels = Validator.check_positive_int(out_channels)
  936. self.has_bias = Validator.check_bool(has_bias)
  937. if isinstance(weight_init, Tensor):
  938. if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
  939. weight_init.shape[1] != in_channels:
  940. raise ValueError("weight_init shape error")
  941. self.weight = Parameter(initializer(
  942. weight_init, [out_channels, in_channels]), name="weight")
  943. if self.has_bias:
  944. if isinstance(bias_init, Tensor):
  945. if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
  946. raise ValueError("bias_init shape error")
  947. self.bias = Parameter(initializer(
  948. bias_init, [out_channels]), name="bias")
  949. self.matmul = P.MatMul(transpose_b=True)
  950. self.bias_add = P.BiasAdd()
  951. self.activation = get_activation(activation) if isinstance(activation, str) else activation
  952. if activation is not None and not isinstance(self.activation, (Cell, Primitive)):
  953. raise TypeError("The activation must be str or Cell or Primitive,"" but got {}.".format(activation))
  954. self.activation_flag = self.activation is not None
  955. self.fake_quant_weight = quant_config.weight(min_init=-6,
  956. max_init=6,
  957. ema=False,
  958. channel_axis=0,
  959. num_channels=out_channels,
  960. quant_dtype=quant_dtype)
  961. def construct(self, x):
  962. """Use operators to construct the Dense layer."""
  963. output = self.fake_quant_weight(self.weight)
  964. output = self.matmul(x, output)
  965. if self.has_bias:
  966. output = self.bias_add(output, self.bias)
  967. if self.activation_flag:
  968. return self.activation(output)
  969. return output
  970. def extend_repr(self):
  971. """A pretty print for Dense layer."""
  972. s = 'in_channels={}, out_channels={}, weight={}, has_bias={}'.format(
  973. self.in_channels, self.out_channels, self.weight, self.has_bias)
  974. if self.has_bias:
  975. s += ', bias={}'.format(self.bias)
  976. if self.activation_flag:
  977. s += ', activation={}'.format(self.activation)
  978. return s
  979. class _QuantActivation(Cell):
  980. r"""
  981. Base class for quantization aware training activation function. Add fake quantized operation
  982. after activation operation.
  983. """
  984. def get_origin(self):
  985. raise NotImplementedError
  986. class ActQuant(_QuantActivation):
  987. r"""
  988. Quantization aware training activation function.
  989. Add the fake quantized operation to the end of activation operation, by which the output of activation operation
  990. will be truncated. For more detials about Quantilization,
  991. please refer to :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.
  992. Args:
  993. activation (Cell): Activation cell.
  994. ema (bool): The exponential Moving Average algorithm updates min and max. Default: False.
  995. ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999.
  996. fake_before (bool): Whether add fake quantized operation before activation. Default: False.
  997. quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be
  998. generated by compression.quant.create_quant_config method.
  999. Default: both set to default FakeQuantWithMinMaxObserver.
  1000. quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.
  1001. Inputs:
  1002. - **input** (Tensor) - The input of ActQuant.
  1003. Outputs:
  1004. Tensor, with the same type and shape as the `input`.
  1005. Supported Platforms:
  1006. ``Ascend`` ``GPU``
  1007. Examples:
  1008. >>> qconfig = compression.quant.create_quant_config()
  1009. >>> act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig)
  1010. >>> input = Tensor(np.array([[1, 2, -1], [-2, 0, -1]]), mindspore.float32)
  1011. >>> output = act_quant(input)
  1012. >>> print(output)
  1013. [[0.9882355 1.9764705 0. ]
  1014. [0. 0. 0. ]]
  1015. """
  1016. def __init__(self,
  1017. activation,
  1018. ema=False,
  1019. ema_decay=0.999,
  1020. fake_before=False,
  1021. quant_config=quant_config_default,
  1022. quant_dtype=QuantDtype.INT8):
  1023. super(ActQuant, self).__init__()
  1024. self.act = Validator.check_isinstance("activation", activation, Cell)
  1025. self.fake_before = Validator.check_bool(fake_before, "fake_before")
  1026. if self.fake_before:
  1027. self.fake_quant_act_before = quant_config.activation(min_init=-6,
  1028. max_init=6,
  1029. ema=ema,
  1030. ema_decay=ema_decay,
  1031. quant_dtype=quant_dtype)
  1032. self.fake_quant_act = quant_config.activation(min_init=-6,
  1033. max_init=6,
  1034. ema=ema,
  1035. ema_decay=ema_decay,
  1036. quant_dtype=quant_dtype)
  1037. def construct(self, x):
  1038. if self.fake_before:
  1039. x = self.fake_quant_act_before(x)
  1040. x = self.act(x)
  1041. x = self.fake_quant_act(x)
  1042. return x
  1043. def get_origin(self):
  1044. return self.act
  1045. class TensorAddQuant(Cell):
  1046. r"""
  1047. Add fake quantized operation after TensorAdd operation.
  1048. This part is a more detailed overview of TensorAdd operation. For more detials about Quantilization,
  1049. please refer to :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.
  1050. Args:
  1051. ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999.
  1052. quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be
  1053. generated by compression.quant.create_quant_config method.
  1054. Default: both set to default FakeQuantWithMinMaxObserver.
  1055. quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.
  1056. Inputs:
  1057. - **input_x1** (Tensor) - The first tensor of TensorAddQuant.
  1058. - **input_x2** (Tensor) - The second tensor of TensorAddQuant.
  1059. Outputs:
  1060. Tensor, with the same type and shape as the `input_x1`.
  1061. Supported Platforms:
  1062. ``Ascend`` ``GPU``
  1063. Examples:
  1064. >>> qconfig = compression.quant.create_quant_config()
  1065. >>> add_quant = nn.TensorAddQuant(quant_config=qconfig)
  1066. >>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
  1067. >>> input_x2 = Tensor(np.ones((2, 3)), mindspore.float32)
  1068. >>> output = add_quant(input_x1, input_x2)
  1069. >>> print(output)
  1070. [[ 1.9764705 3.011765 1.9764705]
  1071. [-0.9882355 0.9882355 0. ]]
  1072. """
  1073. def __init__(self,
  1074. ema_decay=0.999,
  1075. quant_config=quant_config_default,
  1076. quant_dtype=QuantDtype.INT8):
  1077. super(TensorAddQuant, self).__init__()
  1078. self.fake_quant_act = quant_config.activation(min_init=-6,
  1079. max_init=6,
  1080. ema=True,
  1081. ema_decay=ema_decay,
  1082. quant_dtype=quant_dtype)
  1083. self.add = P.Add()
  1084. def construct(self, x1, x2):
  1085. x = self.add(x1, x2)
  1086. x = self.fake_quant_act(x)
  1087. return x
  1088. class MulQuant(Cell):
  1089. r"""
  1090. Add fake quantized operation after `Mul` operation.
  1091. This part is a more detailed overview of `Mul` operation. For more detials about Quantilization,
  1092. please refer to :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.
  1093. Args:
  1094. ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999.
  1095. quant_config (QuantConfig): Configures the oberser types and quant settings of weight and activation. Can be
  1096. generated by compression.quant.create_quant_config method.
  1097. Default: both set to default FakeQuantWithMinMaxObserver.
  1098. quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.
  1099. Inputs:
  1100. - **input_x1** (Tensor) - The first tensor of MulQuant.
  1101. - **input_x2** (Tensor) - The second tensor of MulQuant.
  1102. Outputs:
  1103. Tensor, with the same type and shape as the `input_x1`.
  1104. Supported Platforms:
  1105. ``Ascend`` ``GPU``
  1106. Examples:
  1107. >>> qconfig = compression.quant.create_quant_config()
  1108. >>> mul_quant = nn.MulQuant(quant_config=qconfig)
  1109. >>> input_x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)
  1110. >>> input_x2 = Tensor(np.ones((2, 3)) * 2, mindspore.float32)
  1111. >>> output = mul_quant(input_x1, input_x2)
  1112. >>> print(output)
  1113. [[ 1.9764705 4.0000005 1.9764705]
  1114. [-4. 0. -1.9764705]]
  1115. """
  1116. def __init__(self,
  1117. ema_decay=0.999,
  1118. quant_config=quant_config_default,
  1119. quant_dtype=QuantDtype.INT8):
  1120. super(MulQuant, self).__init__()
  1121. self.fake_quant_act = quant_config.activation(min_init=-6,
  1122. max_init=6,
  1123. ema=True,
  1124. ema_decay=ema_decay,
  1125. quant_dtype=quant_dtype)
  1126. self.mul = P.Mul()
  1127. def construct(self, x1, x2):
  1128. x = self.mul(x1, x2)
  1129. x = self.fake_quant_act(x)
  1130. return x
  1131. class QuantBlock(Cell):
  1132. r"""
  1133. A quant block of Conv/Dense, activation layer for Ascend deploy.
  1134. Calculate Conv or Dense in Int8, with Quant and DeQuant.
  1135. Notes:
  1136. This block is only for deploy, and not trainable.
  1137. Args:
  1138. in_channels (int): The number of channels in the input space.
  1139. out_channels (int): The number of channels in the output space.
  1140. weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
  1141. is same as input x. The values of str refer to the function `initializer`. Default: 'normal'.
  1142. bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
  1143. same as input x. The values of str refer to the function `initializer`. Default: 'zeros'.
  1144. has_bias (bool): Specifies whether the layer uses a bias vector. Default: True.
  1145. activation (str): The regularization function applied to the output of the layer, eg. 'relu'. Default: None.
  1146. batchnorm (bool): Specifies to used batchnorm or not. Default: None.
  1147. activation (string): Specifies activation type. The optional values are as following:
  1148. 'softmax', 'logsoftmax', 'relu', 'relu6', 'tanh', 'gelu', 'sigmoid',
  1149. 'prelu', 'leakyrelu', 'hswish', 'hsigmoid'. Default: None.
  1150. Inputs:
  1151. - **input** (Tensor) - Tensor of shape :math:`(N, in\_channels)`.
  1152. Outputs:
  1153. Tensor of shape :math:`(N, out\_channels)`.
  1154. """
  1155. def __init__(self,
  1156. core_op,
  1157. weight,
  1158. quant_op,
  1159. dequant_op,
  1160. dequant_scale,
  1161. bias=None,
  1162. activation=None):
  1163. super(QuantBlock, self).__init__()
  1164. self.core_op = core_op
  1165. self.weight = weight
  1166. self.quant = quant_op
  1167. self.dequant = dequant_op
  1168. self.dequant_scale = dequant_scale
  1169. self.bias = bias
  1170. self.has_bias = bias is not None
  1171. self.activation = activation
  1172. self.has_act = activation is not None
  1173. self.bias_add = P.BiasAdd()
  1174. def construct(self, x):
  1175. x = self.quant(x)
  1176. if self.has_bias:
  1177. x = self.core_op(x, self.weight)
  1178. x = self.bias_add(x, self.bias)
  1179. else:
  1180. x = self.core_op(x, self.weight)
  1181. x = self.dequant(x, self.dequant_scale)
  1182. x = F.cast(x, mstype.float32)
  1183. if self.has_act:
  1184. x = self.activation(x)
  1185. return x
  1186. def extend_repr(self):
  1187. s = f'quant={self.quant}, core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]'
  1188. if self.has_bias:
  1189. s += f', bias=shape[{self.bias.shape}]'
  1190. if self.has_act:
  1191. s += f', activation={self.activation}'
  1192. s += f', dequant={self.dequant}'
  1193. return s
  1194. class QuantMindirBlock(Cell):
  1195. """A quant binary block of Conv/Dense, activation layer for export MINDIR model.
  1196. Args:
  1197. core_op (Cell): The operation cell.
  1198. weight (Tensor): The weight of the cell.
  1199. bias (Tensor): The bias of the cell. Default: None.
  1200. activation (str): The regularization function applied to the output of the layer, eg. 'relu'. Default: None.
  1201. param_dict (dict): The information of the cell.
  1202. """
  1203. def __init__(self,
  1204. core_op,
  1205. weight,
  1206. bias=None,
  1207. activation=None,
  1208. param_dict=None):
  1209. super(QuantMindirBlock, self).__init__()
  1210. self.core_op = core_op
  1211. if activation is not None:
  1212. self.core_op.add_prim_attr("activation_name", activation.__class__.__name__)
  1213. self.core_op.add_prim_attr("filter_maxq", Tensor(param_dict["filter_maxq"]))
  1214. self.core_op.add_prim_attr("filter_minq", Tensor(param_dict["filter_minq"]))
  1215. if param_dict["output_maxq"] is not None:
  1216. self.core_op.add_prim_attr("output_maxq", Tensor(param_dict["output_maxq"]))
  1217. self.core_op.add_prim_attr("output_minq", Tensor(param_dict["output_minq"]))
  1218. self.core_op.add_prim_attr("symmetric", Tensor(param_dict["symmetric"]))
  1219. if hasattr(core_op, 'pad_mode'):
  1220. self.core_op.add_prim_attr("pad_mode", core_op.pad_mode)
  1221. self.core_op.add_prim_attr("num_bits", Tensor(8))
  1222. self.core_op.add_prim_attr("narrow_range", Tensor(False))
  1223. if param_dict["input_maxq"] == 'None':
  1224. self.core_op.add_prim_attr("mean", Tensor(param_dict["mean"]))
  1225. self.core_op.add_prim_attr("std_dev", Tensor(param_dict["std_dev"]))
  1226. elif param_dict["input_maxq"] is not None:
  1227. self.core_op.add_prim_attr("input_maxq", Tensor(param_dict["input_maxq"]))
  1228. self.core_op.add_prim_attr("input_minq", Tensor(param_dict["input_minq"]))
  1229. self.weight = weight
  1230. self.bias = bias
  1231. self.has_bias = bias is not None
  1232. self.activation = activation
  1233. self.has_act = activation is not None
  1234. self.bias_add = P.BiasAdd()
  1235. if isinstance(activation, ReLU):
  1236. self.activation = None
  1237. self.has_act = False
  1238. def construct(self, x):
  1239. if self.has_bias:
  1240. x = self.core_op(x, self.weight)
  1241. x = self.bias_add(x, self.bias)
  1242. else:
  1243. x = self.core_op(x, self.weight)
  1244. return x
  1245. def extend_repr(self):
  1246. s = f'core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]'
  1247. if self.has_bias:
  1248. s += f', bias=shape[{self.bias.shape}]'
  1249. if self.has_act:
  1250. s += f', activation={self.activation}'
  1251. return s