From: @yonibaehr_admin Reviewed-by: Signed-off-by:tags/v1.2.0-rc1
| @@ -81,6 +81,7 @@ constexpr auto kFreezeBn = "freeze_bn"; | |||
| constexpr auto kGateOrder = "gate_order"; | |||
| constexpr auto kGlobal = "global"; | |||
| constexpr auto kGrad = "grad"; | |||
| constexpr auto kIsGrad = "is_grad"; | |||
| constexpr auto kGradientScale = "gradient_scale"; | |||
| constexpr auto kGradX = "grad_x"; | |||
| constexpr auto kGradY = "grad_y"; | |||
| @@ -25,12 +25,14 @@ | |||
| namespace mindspore { | |||
| namespace ops { | |||
| void SparseSoftmaxCrossEntropyWithLogits::Init(const bool grad) { this->set_grad(grad); } | |||
| void SparseSoftmaxCrossEntropyWithLogits::Init(const bool is_grad) { this->set_is_grad(is_grad); } | |||
| void SparseSoftmaxCrossEntropyWithLogits::set_grad(const bool grad) { this->AddAttr(kGrad, MakeValue(grad)); } | |||
| void SparseSoftmaxCrossEntropyWithLogits::set_is_grad(const bool is_grad) { | |||
| this->AddAttr(kIsGrad, MakeValue(is_grad)); | |||
| } | |||
| bool SparseSoftmaxCrossEntropyWithLogits::get_grad() const { | |||
| auto value_ptr = GetAttr(kGrad); | |||
| bool SparseSoftmaxCrossEntropyWithLogits::get_is_grad() const { | |||
| auto value_ptr = GetAttr(kIsGrad); | |||
| return GetValue<bool>(value_ptr); | |||
| } | |||
| @@ -49,7 +51,7 @@ AbstractBasePtr SparseSoftmaxCrossEntropyWithLogitsInfer(const abstract::Analysi | |||
| auto input_shape = | |||
| CheckAndConvertUtils::ConvertShapePtrToShape("input_shape", input_args[0]->BuildShape(), prim_name); | |||
| std::vector<int64_t> output_shape; | |||
| if (sparse_softmax_cross_entropy_prim->get_grad() != 0) { | |||
| if (sparse_softmax_cross_entropy_prim->get_is_grad() != 0) { | |||
| output_shape = input_shape; | |||
| } else { | |||
| output_shape.push_back(1); | |||
| @@ -31,8 +31,8 @@ class SparseSoftmaxCrossEntropyWithLogits : public PrimitiveC { | |||
| ~SparseSoftmaxCrossEntropyWithLogits() = default; | |||
| MS_DECLARE_PARENT(SparseSoftmaxCrossEntropyWithLogits, PrimitiveC); | |||
| void Init(const bool is_grad = false); | |||
| void set_grad(const bool is_grad); | |||
| bool get_grad() const; | |||
| void set_is_grad(const bool is_grad); | |||
| bool get_is_grad() const; | |||
| }; | |||
| AbstractBasePtr SparseSoftmaxCrossEntropyWithLogitsInfer(const abstract::AnalysisEnginePtr &, | |||
| const PrimitivePtr &primitive, | |||
| @@ -23,7 +23,7 @@ ifeq ($(TARGET),arm64) | |||
| CXX := ${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64/bin/clang++ | |||
| CFLAGS += --target=aarch64-none-linux-android21 --gcc-toolchain=${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64 --sysroot=${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64/sysroot -fdata-sections -ffunction-sections | |||
| LDFLAGS := --target=aarch64-none-linux-android21 --gcc-toolchain=${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64 --sysroot=${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64/sysroot -Wl,--gc-sections | |||
| LDFLAGS += -L$(MSDIR) -l$(MSLIB) $(LMDLIB) -pthread -llog -latomic -lm $(LHIAILIB) | |||
| LDFLAGS += -L$(MSDIR) -l$(MSLIB) $(LMDLIB) -pthread -llog -latomic -lm $(LHIAILIB) -Wl,-rpath,$(MSDIR) | |||
| else | |||
| CFLAGS += -g | |||
| LDFLAGS := -L$(MSDIR) -l$(MSLIB) $(LMDLIB) -lpthread -Wl,-rpath,$(MSDIR) | |||
| @@ -42,7 +42,7 @@ clean: | |||
| mrproper: | |||
| rm -rf package* msl src/*.o bin/ model/*.mindir model/*.ms model/*.so model/converter_lite | |||
| rm -rf package* msl src/*.o bin/ model/*.mindir model/*.ms model/*.so* model/converter_lite | |||
| %.o:%.cc | |||
| $(CXX) $(CFLAGS) -c $< -o $@ | |||
| @@ -17,6 +17,7 @@ | |||
| #include "src/net_runner.h" | |||
| #include <math.h> | |||
| #include <getopt.h> | |||
| #include <stdio.h> | |||
| #include <cstring> | |||
| #include <iostream> | |||
| #include <fstream> | |||
| @@ -35,7 +36,9 @@ | |||
| using mindspore::dataset::Dataset; | |||
| using mindspore::dataset::Mnist; | |||
| using mindspore::dataset::TensorOperation; | |||
| using mindspore::dataset::transforms::TypeCast; | |||
| using mindspore::dataset::vision::Normalize; | |||
| using mindspore::dataset::vision::Resize; | |||
| using mindspore::lite::AccuracyMetrics; | |||
| using mindspore::session::TrainLoopCallBack; | |||
| using mindspore::session::TrainLoopCallBackData; | |||
| @@ -96,10 +99,9 @@ void NetRunner::InitAndFigureInputs() { | |||
| context.device_list_[0].device_type_ = mindspore::lite::DT_CPU; | |||
| context.thread_num_ = 2; | |||
| auto session = mindspore::session::TrainSession::CreateSession(ms_file_, &context); | |||
| loop_ = mindspore::session::TrainLoop::CreateTrainLoop(session, &context); | |||
| session_ = loop_->train_session(); | |||
| session_ = mindspore::session::TrainSession::CreateSession(ms_file_, &context); | |||
| MS_ASSERT(nullptr != session_); | |||
| loop_ = mindspore::session::TrainLoop::CreateTrainLoop(session_, &context); | |||
| acc_metrics_ = std::shared_ptr<AccuracyMetrics>(new AccuracyMetrics); | |||
| @@ -111,12 +113,12 @@ void NetRunner::InitAndFigureInputs() { | |||
| float NetRunner::CalculateAccuracy(int max_tests) { | |||
| test_ds_ = Mnist(data_dir_ + "/test", "all"); | |||
| std::shared_ptr<TensorOperation> typecast_f = mindspore::dataset::transforms::TypeCast("float32"); | |||
| std::shared_ptr<TensorOperation> resize = mindspore::dataset::vision::Resize({32, 32}); | |||
| test_ds_ = test_ds_->Map({resize, typecast_f}, {"image"}); | |||
| TypeCast typecast_f("float32"); | |||
| Resize resize({32, 32}); | |||
| test_ds_ = test_ds_->Map({&resize, &typecast_f}, {"image"}); | |||
| std::shared_ptr<TensorOperation> typecast = mindspore::dataset::transforms::TypeCast("int32"); | |||
| test_ds_ = test_ds_->Map({typecast}, {"label"}); | |||
| TypeCast typecast("int32"); | |||
| test_ds_ = test_ds_->Map({&typecast}, {"label"}); | |||
| test_ds_ = test_ds_->Batch(32, true); | |||
| Rescaler rescale(255.0); | |||
| @@ -130,14 +132,14 @@ float NetRunner::CalculateAccuracy(int max_tests) { | |||
| int NetRunner::InitDB() { | |||
| train_ds_ = Mnist(data_dir_ + "/train", "all"); | |||
| std::shared_ptr<TensorOperation> typecast_f = mindspore::dataset::transforms::TypeCast("float32"); | |||
| std::shared_ptr<TensorOperation> resize = mindspore::dataset::vision::Resize({32, 32}); | |||
| // std::shared_ptr<TensorOperation> rescale_op = Normalize({0.0, 0.0, 0.0}, {255.0, 255.0, 255.0}); | |||
| // std::shared_ptr<TensorOperation> rescale_op = mindspore::dataset::vision::Rescale(255.0, 0.0); | |||
| train_ds_ = train_ds_->Map({resize, typecast_f}, {"image"}); | |||
| TypeCast typecast_f("float32"); | |||
| Resize resize({32, 32}); | |||
| // Normalize rescale_op({0.0, 0.0, 0.0}, {255.0, 255.0, 255.0}); pending on Minddata operator | |||
| // Rescale rescale_op(255.0, 0.0); | |||
| train_ds_ = train_ds_->Map({&resize, &typecast_f}, {"image"}); | |||
| std::shared_ptr<TensorOperation> typecast = mindspore::dataset::transforms::TypeCast("int32"); | |||
| train_ds_ = train_ds_->Map({typecast}, {"label"}); | |||
| TypeCast typecast("int32"); | |||
| train_ds_ = train_ds_->Map({&typecast}, {"label"}); | |||
| train_ds_ = train_ds_->Shuffle(2); | |||
| train_ds_ = train_ds_->Batch(32, true); | |||
| @@ -15,6 +15,7 @@ | |||
| */ | |||
| #include "nnacl/infer/sparse_softmax_cross_entropy_with_logits_infer.h" | |||
| #include "nnacl/fp32_grad/softmax_grad.h" | |||
| int SparseSoftmaxCrossEntropyWithLogitsInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, | |||
| size_t outputs_size, OpParameter *parameter) { | |||
| @@ -13,11 +13,10 @@ | |||
| * See the License for the specific language governing permissions and | |||
| * limitations under the License. | |||
| */ | |||
| #ifndef MINDSPORE_LITE_NNACL_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H | |||
| #define MINDSPORE_LITE_NNACL_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H | |||
| #ifndef MINDSPORE_LITE_NNACL_INFER_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H_ | |||
| #define MINDSPORE_LITE_NNACL_INFER_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H_ | |||
| #include "nnacl/infer/common_infer.h" | |||
| #include "nnacl/fp32_grad/softmax_grad.h" | |||
| #ifdef __cplusplus | |||
| extern "C" { | |||
| @@ -29,4 +28,4 @@ int SparseSoftmaxCrossEntropyWithLogitsInferShape(const TensorC *const *inputs, | |||
| #ifdef __cplusplus | |||
| } | |||
| #endif | |||
| #endif // MINDSPORE_LITE_NNACL_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H | |||
| #endif // MINDSPORE_LITE_NNACL_INFER_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_INFER_H_ | |||
| @@ -910,7 +910,7 @@ table SpaceToDepth { | |||
| } | |||
| table SparseSoftmaxCrossEntropyWithLogits { | |||
| grad: bool; | |||
| is_grad: bool; | |||
| } | |||
| table SparseToDense { | |||
| @@ -909,7 +909,7 @@ OP_ATTR_ENUM(format, Format) | |||
| OP_SCHEMA_DEF_END(SpaceToDepth) | |||
| OP_SCHEMA_DEF(SparseSoftmaxCrossEntropyWithLogits) | |||
| OP_ATTR(grad, bool) | |||
| OP_ATTR(is_grad, bool) | |||
| OP_SCHEMA_DEF_END(SparseSoftmaxCrossEntropyWithLogits) | |||
| OP_SCHEMA_DEF(SparseToDense) | |||
| @@ -399,7 +399,7 @@ static RegistryInferShape g_PartialInferShape(mindspore::schema::PrimitiveType_P | |||
| static RegistryInferShape g_MergeInferShape(mindspore::schema::PrimitiveType_Merge, MergeInferShape); | |||
| static RegistryInferShape g_SwitchInferShape(mindspore::schema::PrimitiveType_Switch, SwitchInferShape); | |||
| static RegistryInferShape g_AssertOpInferShape(mindspore::schema::PrimitiveType_Assert, AssertOpInferShape); | |||
| static RegistryInferShape g_SparseSoftmaxCrossEntropyInferShape( | |||
| static RegistryInferShape g_SparseSoftmaxCrossEntropyWithLogitsInferShape( | |||
| mindspore::schema::PrimitiveType_SparseSoftmaxCrossEntropyWithLogits, SparseSoftmaxCrossEntropyWithLogitsInferShape); | |||
| static RegistryInferShape g_DropoutInferShape(mindspore::schema::PrimitiveType_Dropout, DropoutInferShape); | |||
| static RegistryInferShape g_PriorBoxInferShape(mindspore::schema::PrimitiveType_PriorBox, PriorBoxInferShape); | |||
| @@ -145,29 +145,6 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() { | |||
| return RET_OK; | |||
| } | |||
| kernel::LiteKernel *CpuSparseSoftmaxCrossEntropyWithLogitsFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, | |||
| const std::vector<lite::Tensor *> &outputs, | |||
| OpParameter *opParameter, | |||
| const lite::InnerContext *ctx, | |||
| const kernel::KernelKey &desc) { | |||
| MS_ASSERT(opParameter != nullptr); | |||
| MS_ASSERT(desc.type == schema::PrimitiveType_SparseSoftmaxCrossEntropyWithLogits); | |||
| auto *kernel = new (std::nothrow) SparseSoftmaxCrossEntropyWithLogitsCPUKernel(opParameter, inputs, outputs, ctx); | |||
| if (kernel == nullptr) { | |||
| MS_LOG(ERROR) << "new SparseSoftmaxCrossEntropyWithLogitsCPUKernel failed!"; | |||
| free(opParameter); | |||
| return nullptr; | |||
| } | |||
| auto ret = kernel->Init(); | |||
| if (ret != RET_OK) { | |||
| MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " | |||
| << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); | |||
| delete kernel; | |||
| return nullptr; | |||
| } | |||
| return kernel; | |||
| } | |||
| REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SparseSoftmaxCrossEntropyWithLogits, | |||
| CpuSparseSoftmaxCrossEntropyWithLogitsFp32KernelCreator) | |||
| LiteKernelCreator<SparseSoftmaxCrossEntropyWithLogitsCPUKernel>) | |||
| } // namespace mindspore::kernel | |||
| @@ -135,7 +135,7 @@ OpParameter *PopulateSparseSoftmaxCrossEntropyWithLogitsParameter(const void *pr | |||
| auto primitive = static_cast<const schema::Primitive *>(prim); | |||
| auto value = primitive->value_as_SparseSoftmaxCrossEntropyWithLogits(); | |||
| sce_param->op_parameter_.type_ = primitive->value_type(); | |||
| sce_param->is_grad_ = value->grad(); | |||
| sce_param->is_grad_ = value->is_grad(); | |||
| return reinterpret_cast<OpParameter *>(sce_param); | |||
| } | |||
| @@ -430,6 +430,27 @@ OpParameter *PopulateResizeGradParameter(const void *prim) { | |||
| return reinterpret_cast<OpParameter *>(resize_grad_param); | |||
| } | |||
| OpParameter *PopulateStridedSliceGradParameter(const void *prim) { | |||
| StridedSliceParameter *strided_slice_param = | |||
| reinterpret_cast<StridedSliceParameter *>(malloc(sizeof(StridedSliceParameter))); | |||
| if (strided_slice_param == nullptr) { | |||
| MS_LOG(ERROR) << "malloc StridedSliceParameter failed."; | |||
| return nullptr; | |||
| } | |||
| memset(strided_slice_param, 0, sizeof(StridedSliceParameter)); | |||
| auto primitive = static_cast<const schema::Primitive *>(prim); | |||
| auto value = primitive->value_as_StridedSliceGrad(); | |||
| strided_slice_param->op_parameter_.type_ = primitive->value_type(); | |||
| strided_slice_param->begins_mask_ = value->begin_mask(); | |||
| strided_slice_param->ends_mask_ = value->end_mask(); | |||
| strided_slice_param->ellipsisMask_ = value->ellipsis_mask(); | |||
| strided_slice_param->newAxisMask_ = value->new_axis_mask(); | |||
| strided_slice_param->shrinkAxisMask_ = value->shrink_axis_mask(); | |||
| return reinterpret_cast<OpParameter *>(strided_slice_param); | |||
| } | |||
| void PopulateTrainParameters() { | |||
| lite::Registry ApplyMomentumParameterRegistry(schema::PrimitiveType_ApplyMomentum, PopulateApplyMomentumParameter, | |||
| lite::SCHEMA_CUR); | |||
| @@ -488,7 +509,7 @@ void PopulateTrainParameters() { | |||
| lite::Registry FlattenGradParameterRegistry(schema::PrimitiveType_FlattenGrad, lite::DefaultPopulateParameter, | |||
| lite::SCHEMA_CUR); | |||
| lite::Registry StridedSliceGradParameterRegistry(schema::PrimitiveType_StridedSliceGrad, | |||
| lite::PopulateStridedSliceParameter, lite::SCHEMA_CUR); | |||
| PopulateStridedSliceGradParameter, lite::SCHEMA_CUR); | |||
| lite::Registry SqrtGradParameterRegistry(schema::PrimitiveType_SqrtGrad, lite::DefaultPopulateParameter, | |||
| lite::SCHEMA_CUR); | |||
| lite::Registry RsqrtGradParameterRegistry(schema::PrimitiveType_RsqrtGrad, lite::DefaultPopulateParameter, | |||
| @@ -47,7 +47,7 @@ logs_path=${basepath}/logs_train | |||
| rm -rf ${logs_path} | |||
| mkdir -p ${logs_path} | |||
| docker_image=mindspore_build:210301 | |||
| docker_image=mindspore_build:210311 | |||
| #docker_image=mindspore/mindspore-gpu:1.1.1 | |||
| # Export models | |||
| echo "Start Exporting models ..." | |||
| @@ -71,6 +71,7 @@ | |||
| #include "ops/sigmoid.h" | |||
| #include "ops/stack.h" | |||
| #include "ops/tanh.h" | |||
| #include "ops/sparse_softmax_cross_entropy_with_logits.h" | |||
| using mindspore::ops::kNameAdd; | |||
| using mindspore::ops::kNameAdder; | |||
| @@ -111,6 +112,7 @@ using mindspore::ops::kNameResizeBilinear; | |||
| using mindspore::ops::kNameResizeNearestNeighbor; | |||
| using mindspore::ops::kNameScale; | |||
| using mindspore::ops::kNameSigmoid; | |||
| using mindspore::ops::kNameSparseSoftmaxCrossEntropyWithLogits; | |||
| using mindspore::ops::kNameSub; | |||
| using mindspore::ops::kNameTanh; | |||
| using mindspore::ops::kNameTile; | |||
| @@ -585,5 +587,7 @@ REGIST_PRIMITIVE_ADJUST(kNameSub, MoveAttrMapCommon<ops::SubFusion>) | |||
| REGIST_PRIMITIVE_ADJUST(kNameTanh, MoveAttrMapActivation) | |||
| REGIST_PRIMITIVE_ADJUST(kNameTile, MoveAttrMapCommon<ops::TileFusion>) | |||
| REGIST_PRIMITIVE_ADJUST(kNameTopK, MoveAttrMapCommon<ops::TopKFusion>) | |||
| REGIST_PRIMITIVE_ADJUST(kNameSparseSoftmaxCrossEntropyWithLogits, | |||
| MoveAttrMapCommon<ops::SparseSoftmaxCrossEntropyWithLogits>) | |||
| } // namespace opt | |||
| } // namespace mindspore | |||