You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

quant_export.py 23 kB

4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. # Copyright 2020 Huawei Technologies Co., Ltd
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ============================================================================
  15. """Export for quantization."""
  16. import copy
  17. import numpy as np
  18. from ... import nn, ops
  19. from ..._checkparam import Validator
  20. from ...common import Tensor
  21. from ...common import dtype as mstype
  22. from ...common.api import _cell_graph_executor as _executor
  23. from ...common.parameter import Parameter
  24. from ...nn import Cell
  25. from ...nn.layer import quant
  26. from ...ops import operations as P
  27. from ...ops import functional as F
  28. from ...ops.operations import _inner_ops as inner
  29. from ..quant import quant_utils
  30. from ..quant.qat import _AddFakeQuantInput, _AddFakeQuantAfterSubCell
  31. __all__ = ["ExportToQuantInferNetwork"]
  32. class QuantBlock(Cell):
  33. r"""
  34. A quant block of Conv/Dense, activation layer for Ascend deploy.
  35. Calculate Conv or Dense in Int8, with Quant and DeQuant.
  36. Notes:
  37. This block is only for deploy, and not trainable.
  38. Args:
  39. in_channels (int): The number of channels in the input space.
  40. out_channels (int): The number of channels in the output space.
  41. weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype
  42. is same as input x. The values of str refer to the function `initializer`. Default: 'normal'.
  43. bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is
  44. same as input x. The values of str refer to the function `initializer`. Default: 'zeros'.
  45. has_bias (bool): Specifies whether the layer uses a bias vector. Default: True.
  46. activation (str): The regularization function applied to the output of the layer, eg. 'relu'. Default: None.
  47. batchnorm (bool): Specifies to used batchnorm or not. Default: None.
  48. activation (string): Specifies activation type. The optional values are as following:
  49. 'softmax', 'logsoftmax', 'relu', 'relu6', 'tanh', 'gelu', 'sigmoid',
  50. 'prelu', 'leakyrelu', 'hswish', 'hsigmoid'. Default: None.
  51. Inputs:
  52. - **input** (Tensor) - Tensor of shape :math:`(N, in\_channels)`.
  53. Outputs:
  54. Tensor of shape :math:`(N, out\_channels)`.
  55. """
  56. def __init__(self,
  57. core_op,
  58. weight,
  59. quant_op,
  60. dequant_op,
  61. dequant_scale,
  62. bias=None,
  63. activation=None):
  64. super(QuantBlock, self).__init__()
  65. self.core_op = core_op
  66. self.weight = weight
  67. self.quant = quant_op
  68. self.dequant = dequant_op
  69. self.dequant_scale = dequant_scale
  70. self.bias = bias
  71. self.has_bias = bias is not None
  72. self.activation = activation
  73. self.has_act = activation is not None
  74. self.bias_add = P.BiasAdd()
  75. self.sub = P.Sub()
  76. self.weight_offset = Parameter(np.zeros(1, dtype=np.int8), name='weight_offset')
  77. def construct(self, x):
  78. x = self.quant(x)
  79. if self.has_bias:
  80. weight = self.sub(self.weight, self.weight_offset)
  81. x = self.core_op(x, weight)
  82. x = self.bias_add(x, self.bias)
  83. else:
  84. x = self.core_op(x, self.weight)
  85. x = self.dequant(x, self.dequant_scale)
  86. x = F.cast(x, mstype.float32)
  87. if self.has_act:
  88. x = self.activation(x)
  89. return x
  90. def extend_repr(self):
  91. s = f'quant={self.quant}, core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]'
  92. if self.has_bias:
  93. s += f', bias=shape[{self.bias.shape}]'
  94. if self.has_act:
  95. s += f', activation={self.activation}'
  96. s += f', dequant={self.dequant}'
  97. return s
  98. class QuantMindirBlock(Cell):
  99. """A quant binary block of Conv/Dense, activation layer for export MINDIR model.
  100. Args:
  101. core_op (Cell): The operation cell.
  102. weight (Tensor): The weight of the cell.
  103. bias (Tensor): The bias of the cell. Default: None.
  104. activation (str): The regularization function applied to the output of the layer, eg. 'relu'. Default: None.
  105. param_dict (dict): The information of the cell.
  106. """
  107. def __init__(self,
  108. core_op,
  109. weight,
  110. bias=None,
  111. activation=None,
  112. param_dict=None):
  113. super(QuantMindirBlock, self).__init__()
  114. self.core_op = core_op
  115. if activation is not None:
  116. self.core_op.add_prim_attr("activation_name", activation.__class__.__name__)
  117. self.core_op.add_prim_attr("filter_maxq", Tensor(param_dict["filter_maxq"]))
  118. self.core_op.add_prim_attr("filter_minq", Tensor(param_dict["filter_minq"]))
  119. if param_dict["output_maxq"] is not None:
  120. self.core_op.add_prim_attr("output_maxq", Tensor(param_dict["output_maxq"]))
  121. self.core_op.add_prim_attr("output_minq", Tensor(param_dict["output_minq"]))
  122. self.core_op.add_prim_attr("symmetric", Tensor(param_dict["symmetric"]))
  123. if hasattr(core_op, 'pad_mode'):
  124. self.core_op.add_prim_attr("pad_mode", core_op.pad_mode)
  125. self.core_op.add_prim_attr("act_num_bits", Tensor(8))
  126. self.core_op.add_prim_attr("weight_num_bits", Tensor(param_dict["weight_num_bits"]))
  127. self.core_op.add_prim_attr("weight_narrow_range", Tensor(param_dict["weight_narrow_range"]))
  128. if param_dict["input_narrow_range"] is not None:
  129. self.core_op.add_prim_attr("input_narrow_range", Tensor(param_dict["input_narrow_range"]))
  130. if param_dict["output_narrow_range"] is not None:
  131. self.core_op.add_prim_attr("output_narrow_range", Tensor(param_dict["output_narrow_range"]))
  132. if param_dict["input_maxq"] == 'None':
  133. self.core_op.add_prim_attr("mean", Tensor(param_dict["mean"]))
  134. self.core_op.add_prim_attr("std_dev", Tensor(param_dict["std_dev"]))
  135. elif param_dict["input_maxq"] is not None:
  136. self.core_op.add_prim_attr("input_maxq", Tensor(param_dict["input_maxq"]))
  137. self.core_op.add_prim_attr("input_minq", Tensor(param_dict["input_minq"]))
  138. self.weight = weight
  139. self.bias = bias
  140. self.has_bias = bias is not None
  141. self.activation = activation
  142. self.has_act = activation is not None
  143. self.bias_add = P.BiasAdd()
  144. def construct(self, x):
  145. if self.has_bias:
  146. x = self.core_op(x, self.weight)
  147. x = self.bias_add(x, self.bias)
  148. else:
  149. x = self.core_op(x, self.weight)
  150. if self.has_act:
  151. x = self.activation(x)
  152. return x
  153. def extend_repr(self):
  154. s = f'core_op={type(self.core_op)}, weight=shape[{self.weight.shape}]'
  155. if self.has_bias:
  156. s += f', bias=shape[{self.bias.shape}]'
  157. if self.has_act:
  158. s += f', activation={self.activation}'
  159. return s
  160. class ExportToQuantInferNetwork:
  161. """
  162. Convert quantization aware network to infer network.
  163. Args:
  164. network (Cell): MindSpore quantization aware training network.
  165. inputs (Tensor): Input tensors of the `quantization aware training network`.
  166. mean (int, float): The mean of input data after preprocessing, used for quantizing the first layer of network.
  167. Default: 127.5.
  168. std_dev (int, float): The variance of input data after preprocessing, used for quantizing the first layer
  169. of network. Default: 127.5.
  170. is_mindir (bool): Whether export MINDIR format. Default: False.
  171. Returns:
  172. Cell, Infer network.
  173. """
  174. def __init__(self, network, mean, std_dev, *inputs, is_mindir=False):
  175. network = Validator.check_isinstance('network', network, (nn.Cell,))
  176. self.data_type = mstype.int8
  177. self.network = copy.deepcopy(network)
  178. self.network_bk = copy.deepcopy(network)
  179. self.get_inputs_table(inputs)
  180. self.mean = mean
  181. self.std_dev = std_dev
  182. self.is_mindir = is_mindir
  183. self.upcell = None
  184. def get_inputs_table(self, inputs):
  185. """Get the input quantization parameters of quantization cell for quant export."""
  186. phase_name = 'export_quant'
  187. graph_id, _ = _executor.compile(self.network, *inputs, phase=phase_name, do_convert=False)
  188. self.quant_info_table = _executor.fetch_info_for_quant_export(graph_id)
  189. def run(self):
  190. """Start to convert."""
  191. self.network.update_cell_prefix()
  192. network = self.network
  193. if isinstance(network, _AddFakeQuantInput):
  194. network = network.network
  195. network = self._convert_quant2deploy(network)
  196. return network
  197. def _get_quant_block(self, cell_core, activation, fake_quant_a_out):
  198. """convert network's quant subcell to deploy subcell"""
  199. scale_a_in, zp_a_in, scale_w, zp_w, param_dict = self.__get_quant_param(cell_core, fake_quant_a_out)
  200. # Build the `Quant` `Dequant` op.
  201. # Quant only support perlayer version. Need check here.
  202. quant_op = inner.Quant(1 / float(scale_a_in), float(zp_a_in))
  203. scale_deq = self.__get_dequant_scale(scale_a_in, scale_w)
  204. dequant_op = inner.Dequant()
  205. if isinstance(activation, _AddFakeQuantAfterSubCell):
  206. activation = activation.subcell
  207. elif hasattr(activation, "get_origin"):
  208. activation = activation.get_origin()
  209. # get op
  210. if isinstance(cell_core, quant.DenseQuant):
  211. op_core = P.MatMul()
  212. else:
  213. op_core = cell_core.conv
  214. # get the `weight` and `bias`
  215. weight, bias, weight_b, bias_b = self.__get_weight_bias(cell_core, scale_a_in, scale_w, zp_w)
  216. if self.is_mindir:
  217. block = QuantMindirBlock(op_core, weight_b, bias_b, activation, param_dict)
  218. else:
  219. block = QuantBlock(op_core, weight, quant_op, dequant_op, scale_deq, bias, activation)
  220. return block
  221. def _get_input_quant_param(self, minq_name, np_type, param_dict):
  222. """get input quant parameter for quant block"""
  223. fake_quant_a_in_prefix = minq_name[:-5]
  224. cells = self.network_bk.cells_and_names()
  225. for cell in cells:
  226. if cell[0].endswith(fake_quant_a_in_prefix):
  227. fake_quant_a_in = cell[1]
  228. break
  229. scale_a_in, zp_a_in, param_dict["input_maxq"], param_dict["input_minq"] = \
  230. quant_utils.scale_zp_max_min_from_fake_quant_cell(fake_quant_a_in, np_type)
  231. param_dict["input_narrow_range"] = fake_quant_a_in.narrow_range
  232. return scale_a_in, zp_a_in
  233. def __get_quant_param(self, cell_core, fake_quant_a_out):
  234. """get parameter for quant block"""
  235. w_minq_name = cell_core.fake_quant_weight.minq.name
  236. w_maxq_name = cell_core.fake_quant_weight.maxq.name
  237. np_type = mstype.dtype_to_nptype(self.data_type)
  238. param_dict = dict()
  239. param_dict["filter_maxq"] = None
  240. param_dict["filter_minq"] = None
  241. param_dict["output_maxq"] = None
  242. param_dict["output_minq"] = None
  243. param_dict["input_maxq"] = None
  244. param_dict["input_minq"] = None
  245. param_dict["input_narrow_range"] = None
  246. param_dict["output_narrow_range"] = None
  247. param_dict["weight_narrow_range"] = cell_core.fake_quant_weight.narrow_range
  248. param_dict["mean"] = self.mean
  249. param_dict["std_dev"] = self.std_dev
  250. param_dict["symmetric"] = cell_core.fake_quant_weight.symmetric
  251. param_dict["weight_num_bits"] = cell_core.fake_quant_weight.num_bits
  252. scale_w, zp_w, param_dict["filter_maxq"], param_dict["filter_minq"] = \
  253. quant_utils.scale_zp_max_min_from_fake_quant_cell(cell_core.fake_quant_weight, np_type)
  254. if fake_quant_a_out is not None:
  255. _, _, param_dict["output_maxq"], param_dict["output_minq"] = \
  256. quant_utils.scale_zp_max_min_from_fake_quant_cell(fake_quant_a_out, np_type)
  257. param_dict["output_narrow_range"] = fake_quant_a_out.narrow_range
  258. info = self.quant_info_table.get(w_minq_name, None)
  259. if not info:
  260. info = self.quant_info_table.get(w_maxq_name, None)
  261. if info:
  262. _, minq_name = info
  263. if minq_name == 'input':
  264. scale_a_in, zp_a_in, param_dict["input_maxq"], param_dict["input_minq"] = \
  265. (1 / self.std_dev), round(self.mean), 'None', 'None'
  266. else:
  267. scale_a_in, zp_a_in = self._get_input_quant_param(minq_name, np_type, param_dict)
  268. else:
  269. # skip quant layer
  270. scale_a_in, zp_a_in = 1.0, 0.0
  271. return scale_a_in, zp_a_in, scale_w, zp_w, param_dict
  272. @staticmethod
  273. def __get_dequant_scale(scale_a_in, scale_w):
  274. """Get dequant scale"""
  275. scale_deq = scale_a_in * scale_w
  276. # fuse parameter
  277. # |--------|47:40|--------|39:32|--------|31:0|
  278. # offset_w [8] shift_N [8] deq_scale [32]
  279. float32_deq_scale = scale_deq.astype(np.float32)
  280. uint32_deq_scale = np.frombuffer(float32_deq_scale, np.uint32)
  281. scale_length = scale_deq.size # channel
  282. dequant_param = np.zeros(scale_length, dtype=np.uint64)
  283. for index in range(scale_length):
  284. dequant_param[index] += uint32_deq_scale[index]
  285. scale_deq = Tensor(dequant_param, mstype.uint64)
  286. return scale_deq
  287. def __get_weight_bias(self, cell_core, scale_a_in, scale_w, zp_w):
  288. """Get weight and bias for quantizaiton"""
  289. np_type = mstype.dtype_to_nptype(self.data_type)
  290. weight = cell_core.weight.data.asnumpy()
  291. bias = None
  292. if isinstance(cell_core, (quant.DenseQuant, quant.Conv2dQuant)):
  293. if cell_core.has_bias:
  294. bias = cell_core.bias.data.asnumpy()
  295. elif isinstance(cell_core, (quant.Conv2dBnFoldQuant, quant.Conv2dBnFoldQuantOneConv)):
  296. weight, bias = quant_utils.fold_batchnorm(weight, cell_core)
  297. elif isinstance(cell_core, quant.Conv2dBnWithoutFoldQuant):
  298. weight, bias = quant_utils.without_fold_batchnorm(weight, cell_core)
  299. weight_b = weight
  300. bias_b = bias
  301. # apply the quant
  302. quant_min, quant_max = quant_utils.get_quant_min_max(np_type,
  303. cell_core.fake_quant_weight.num_bits,
  304. cell_core.fake_quant_weight.narrow_range)
  305. weight = quant_utils.weight2int(weight, scale_w, zp_w, quant_min, quant_max)
  306. if bias is not None:
  307. bias = Tensor(bias / scale_a_in / scale_w, mstype.int32)
  308. if isinstance(cell_core, quant.DenseQuant):
  309. weight = np.transpose(weight)
  310. weight_b = np.transpose(weight_b)
  311. weight = Tensor(weight, self.data_type)
  312. weight_b = Tensor(weight_b)
  313. if bias_b is not None:
  314. bias_b = Tensor(bias_b, mstype.float32)
  315. return weight, bias, weight_b, bias_b
  316. def _add_output_min_max_for_op(self, origin_op, fake_quant_cell):
  317. """add output quant info for quant op for export mindir."""
  318. if self.is_mindir:
  319. if isinstance(origin_op, ops.Primitive) and not hasattr(origin_op, 'output_minq'):
  320. np_type = mstype.dtype_to_nptype(self.data_type)
  321. _, _, maxq, minq = quant_utils.scale_zp_max_min_from_fake_quant_cell(fake_quant_cell, np_type)
  322. origin_op.add_prim_attr('output_maxq', Tensor(maxq))
  323. origin_op.add_prim_attr('output_minq', Tensor(minq))
  324. def _convert_subcell(self, network, change, name, subcell):
  325. """Convert subcell to ant subcell."""
  326. if subcell is not None and hasattr(subcell, "fake_quant_weight"):
  327. new_subcell = self._get_quant_block(subcell, None, None)
  328. prefix = subcell.param_prefix
  329. new_subcell.update_parameters_name(prefix + '.')
  330. self.upcell = new_subcell
  331. network.insert_child_to_cell(name, new_subcell)
  332. change = True
  333. return network, change
  334. def _convert_conv(self, network, change, name, subcell):
  335. """Convert subcell to ant subcell for conv."""
  336. cell_core = subcell.conv
  337. activation = subcell.activation
  338. fake_quant_act = None
  339. if hasattr(activation, 'fake_quant_act_before'):
  340. fake_quant_act = activation.fake_quant_act_before
  341. elif hasattr(activation, 'fake_quant_act'):
  342. fake_quant_act = activation.fake_quant_act
  343. if cell_core is not None and hasattr(cell_core, "fake_quant_weight"):
  344. new_subcell = self._get_quant_block(cell_core, activation, fake_quant_act)
  345. self.upcell = None
  346. prefix = subcell.param_prefix
  347. new_subcell.update_parameters_name(prefix + '.')
  348. network.insert_child_to_cell(name, new_subcell)
  349. change = True
  350. return network, change
  351. def _convert_dense(self, network, change, name, subcell):
  352. """Convert subcell to ant subcell for dense."""
  353. cell_core = subcell.dense
  354. activation = subcell.activation
  355. fake_quant_act = None
  356. if hasattr(activation, 'fake_quant_act_before'):
  357. fake_quant_act = activation.fake_quant_act_before
  358. elif hasattr(activation, 'fake_quant_act'):
  359. fake_quant_act = activation.fake_quant_act
  360. if cell_core is not None and hasattr(cell_core, "fake_quant_weight"):
  361. new_subcell = self._get_quant_block(cell_core, activation, fake_quant_act)
  362. prefix = subcell.param_prefix
  363. new_subcell.update_parameters_name(prefix + '.')
  364. network.insert_child_to_cell(name, new_subcell)
  365. self.upcell = None
  366. change = True
  367. return network, change
  368. def _convert_act(self, subcell):
  369. """Convert subcell to ant subcell for activation."""
  370. activation = subcell.get_origin()
  371. if isinstance(activation, nn.ReLU):
  372. self._add_output_min_max_for_op(activation.relu, subcell.fake_quant_act)
  373. elif isinstance(activation, nn.ReLU6):
  374. self._add_output_min_max_for_op(activation.relu6, subcell.fake_quant_act)
  375. if self.upcell:
  376. self._add_output_min_max_for_op(self.upcell.core_op, subcell.fake_quant_act)
  377. return activation
  378. def _convert_add(self, subcell):
  379. """Convert subcell to ant subcell for add."""
  380. if isinstance(subcell.add, _AddFakeQuantAfterSubCell):
  381. add_op = subcell.add.subcell
  382. subcell.__delattr__("add")
  383. subcell.__setattr__("add", add_op)
  384. add_op = subcell.add
  385. self._add_output_min_max_for_op(add_op, subcell.fake_quant_act)
  386. subcell.__delattr__("fake_quant_act")
  387. subcell.__setattr__("fake_quant_act", P.identity())
  388. def _convert_observer(self, network, name, subcell):
  389. """Convert subcell to ant subcell for FakeQuantWithMinMaxObserver."""
  390. if self.upcell:
  391. self._add_output_min_max_for_op(self.upcell.core_op, subcell)
  392. network.__delattr__(name)
  393. network.__setattr__(name, P.identity())
  394. def _convert_fake_quant_after_cell(self, network, name, subcell):
  395. """Convert subcell to ant subcell for _AddFakeQuantAfterSubCell."""
  396. op = subcell.subcell
  397. self._add_output_min_max_for_op(op, subcell.fake_quant_act)
  398. network.__delattr__(name)
  399. network.__setattr__(name, op)
  400. def _convert_core_quant_subcell(self, network, change, name, subcell):
  401. """Convert subcell to ant subcell for conv and dense."""
  402. is_core_subcell = True
  403. if isinstance(subcell, nn.Conv2dBnAct):
  404. network, change = self._convert_conv(network, change, name, subcell)
  405. elif isinstance(subcell, nn.DenseBnAct):
  406. network, change = self._convert_dense(network, change, name, subcell)
  407. elif isinstance(subcell, (quant.Conv2dBnFoldQuant, quant.Conv2dBnFoldQuantOneConv,
  408. quant.Conv2dBnWithoutFoldQuant, quant.Conv2dQuant, quant.DenseQuant)):
  409. network, change = self._convert_subcell(network, change, name, subcell)
  410. else:
  411. is_core_subcell = False
  412. return is_core_subcell, network, change
  413. def _convert_other_quant_subcell(self, network, change, name, subcell):
  414. """Convert subcell to ant subcell for cell except conv and dense."""
  415. is_other_subcell = True
  416. if isinstance(subcell, nn.ActQuant) and hasattr(subcell, "get_origin"):
  417. activation = self._convert_act(subcell)
  418. network.insert_child_to_cell(name, activation)
  419. change = True
  420. elif isinstance(subcell, nn.TensorAddQuant):
  421. self._convert_add(subcell)
  422. elif isinstance(subcell, quant.FakeQuantWithMinMaxObserver):
  423. self._convert_observer(network, name, subcell)
  424. elif isinstance(subcell, _AddFakeQuantAfterSubCell):
  425. self._convert_fake_quant_after_cell(network, name, subcell)
  426. change = True
  427. else:
  428. is_other_subcell = False
  429. return is_other_subcell, network, change
  430. def _convert_quant2deploy(self, network):
  431. """Convert network's all quant subcell to deploy subcell."""
  432. cells = network.name_cells()
  433. change = False
  434. for name in cells:
  435. subcell = cells[name]
  436. if subcell == network:
  437. continue
  438. is_core_quant_subcell, network, change = self._convert_core_quant_subcell(network, change, name, subcell)
  439. is_other_quant_subcell, network, change = self._convert_other_quant_subcell(network, change, name, subcell)
  440. if not is_core_quant_subcell and not is_other_quant_subcell:
  441. self.upcell = None
  442. self._convert_quant2deploy(subcell)
  443. if isinstance(network, nn.SequentialCell) and change:
  444. network.cell_list = list(network.cells())
  445. return network