Browse Source

Fix Fp16 bug

tags/v1.6.0
nizzan 4 years ago
parent
commit
60195d9dc9
5 changed files with 9 additions and 13 deletions
  1. +1
    -1
      mindspore/lite/examples/export_models/prepare.sh
  2. +6
    -6
      mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc
  3. +0
    -4
      mindspore/lite/test/st/scripts/run_net_train.sh
  4. +0
    -1
      mindspore/lite/test/ut/src/runtime/kernel/arm/cxx_api/model_test.cc
  5. +2
    -1
      mindspore/lite/tools/benchmark_train/net_train.cc

+ 1
- 1
mindspore/lite/examples/export_models/prepare.sh View File

@@ -59,7 +59,7 @@ export_result_file=export_result.txt
echo ' ' > ${export_result_file}


CLOUD_MODEL_ZOO=../../../../model_zoo/
CLOUD_MODEL_ZOO=../../../../../models/

checkopts "$@"



+ 6
- 6
mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc View File

@@ -96,6 +96,12 @@ int ConvolutionFP16CPUKernel::Prepare() {
CHECK_LESS_RETURN(in_tensors_.size(), 2);
CHECK_LESS_RETURN(out_tensors_.size(), 1);
UpdateOriginWeightAndBias();
#ifdef ENABLE_ARM64
row_tile_ = C16NUM;
#else
row_tile_ = C12NUM;
#endif
col_tile_ = C8NUM;
if (op_parameter_->is_train_session_) {
auto filter_tensor = in_tensors_.at(kWeightIndex);
CHECK_NULL_RETURN(filter_tensor);
@@ -106,12 +112,6 @@ int ConvolutionFP16CPUKernel::Prepare() {
int pack_weight_size = oc8 * in_channel * kernel_plane;
set_workspace_size(pack_weight_size * sizeof(float16_t));
}
#ifdef ENABLE_ARM64
row_tile_ = C16NUM;
#else
row_tile_ = C12NUM;
#endif
col_tile_ = C8NUM;
auto ret = InitConvWeightBias();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Init weight bias failed.";


+ 0
- 4
mindspore/lite/test/st/scripts/run_net_train.sh View File

@@ -360,10 +360,6 @@ function Run_arm() {
echo "rm -f ${inference_file}*" >> ${run_arm_log_file}
echo "rm -f ${inference_file}*" >> ${adb_cmd_run_file}
fi
if [ ${enable_fp16} = "true" ]; then
export_file=""
inference_file=""
fi
adb -s ${device_id} shell < ${adb_cmd_run_file} >> ${run_arm_log_file}
adb_cmd=$(cat <<-ENDM
export LD_LIBRARY_PATH=./:/data/local/tmp/:${tmp_dir};./benchmark_train \


+ 0
- 1
mindspore/lite/test/ut/src/runtime/kernel/arm/cxx_api/model_test.cc View File

@@ -220,7 +220,6 @@ TEST_F(TestCxxApiLiteModel, set_weights_FAILURE) {
cpu_context->SetEnableFP16(true);
context->MutableDeviceInfo().push_back(cpu_context);
auto train_cfg = std::make_shared<TrainCfg>();
train_cfg->mix_precision_cfg_.is_raw_mix_precision_ = true;

ASSERT_TRUE(Serialization::Load("./nets/mix_lenet_tod.ms", ModelType::kMindIR, &graph) == kSuccess);
ASSERT_TRUE(model.Build(GraphCell(graph), context, train_cfg) == kSuccess);


+ 2
- 1
mindspore/lite/tools/benchmark_train/net_train.cc View File

@@ -348,6 +348,7 @@ std::unique_ptr<session::LiteSession> NetTrain::CreateAndRunNetworkForTrain(cons
}
} else {
MS_LOG(INFO) << "CreateTrainSession from model file" << filename.c_str();
std::cout << "CreateTrainSession from model file" << filename.c_str() << std::endl;
session = std::unique_ptr<session::LiteSession>(
session::TrainSession::CreateTrainSession(filename, &context, true, &train_cfg));
if (session == nullptr) {
@@ -424,7 +425,7 @@ int NetTrain::CreateAndRunNetwork(const std::string &filename, const std::string
if (train_session) {
session = CreateAndRunNetworkForTrain(filename, bb_filename, context, train_cfg, epochs);
if (session == nullptr) {
MS_LOG(ERROR) << "CreateAndRunNetworkForInference failed.";
MS_LOG(ERROR) << "CreateAndRunNetworkForTrain failed.";
return RET_ERROR;
}
} else {


Loading…
Cancel
Save