|
|
|
@@ -164,7 +164,7 @@ def train_on_gpu(): |
|
|
|
# convert fusion network to quantization aware network |
|
|
|
quantizer = QuantizationAwareTraining(bn_fold=True, |
|
|
|
per_channel=[True, False], |
|
|
|
symmetric=[True, False], |
|
|
|
symmetric=[False, False], |
|
|
|
freeze_bn=1000000, |
|
|
|
quant_delay=step_size * 2) |
|
|
|
network = quantizer.quantize(network) |
|
|
|
|