Merge pull request !6574 from liubuyu/code_cleantags/v1.0.0
| @@ -16,12 +16,13 @@ | |||||
| @title mindspore_build | @title mindspore_build | ||||
| SET BASEPATH=%CD% | SET BASEPATH=%CD% | ||||
| IF NOT EXIST "%BASEPATH%/build" ( | |||||
| SET BUILD_PATH=%BASEPATH%/build | |||||
| IF NOT EXIST "%BUILD_PATH%" ( | |||||
| md "build" | md "build" | ||||
| ) | ) | ||||
| cd %BASEPATH%/build | |||||
| set BUILD_PATH=%CD% | |||||
| cd %BUILD_PATH% | |||||
| IF NOT EXIST "%BUILD_PATH%/mindspore" ( | IF NOT EXIST "%BUILD_PATH%/mindspore" ( | ||||
| md "mindspore" | md "mindspore" | ||||
| @@ -237,7 +237,7 @@ checkopts() | |||||
| ;; | ;; | ||||
| z) | z) | ||||
| eval ARG=\$\{$OPTIND\} | eval ARG=\$\{$OPTIND\} | ||||
| if [[ -n $ARG && $ARG != -* ]]; then | |||||
| if [[ -n "$ARG" && "$ARG" != -* ]]; then | |||||
| OPTARG="$ARG" | OPTARG="$ARG" | ||||
| check_on_off $OPTARG z | check_on_off $OPTARG z | ||||
| OPTIND=$((OPTIND + 1)) | OPTIND=$((OPTIND + 1)) | ||||
| @@ -81,7 +81,6 @@ class MS_API InferSession { | |||||
| } | } | ||||
| static std::shared_ptr<InferSession> CreateSession(const std::string &device, uint32_t device_id); | static std::shared_ptr<InferSession> CreateSession(const std::string &device, uint32_t device_id); | ||||
| }; | }; | ||||
| } // namespace inference | } // namespace inference | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| #endif // MINDSPORE_INCLUDE_MS_SESSION_H | #endif // MINDSPORE_INCLUDE_MS_SESSION_H | ||||
| @@ -66,7 +66,6 @@ class MirrorPadGpuFwdKernel : public GpuKernel { | |||||
| } | } | ||||
| string mode = GetValue<string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("mode")); | string mode = GetValue<string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("mode")); | ||||
| if (mode == "REFLECT") { | if (mode == "REFLECT") { | ||||
| mode_ = 0; // reflected mirroring | mode_ = 0; // reflected mirroring | ||||
| } else { | } else { | ||||
| @@ -66,7 +66,6 @@ class MirrorPadGpuBackKernel : public GpuKernel { | |||||
| } | } | ||||
| string mode = GetValue<string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("mode")); | string mode = GetValue<string>(AnfAlgo::GetCNodePrimitive(kernel_node)->GetAttr("mode")); | ||||
| if (mode == "REFLECT") { | if (mode == "REFLECT") { | ||||
| mode_ = 0; // reflected mirroring | mode_ = 0; // reflected mirroring | ||||
| } else { | } else { | ||||
| @@ -27,6 +27,5 @@ MS_REG_GPU_KERNEL_ONE( | |||||
| ROIAlign, | ROIAlign, | ||||
| KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), | KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), | ||||
| ROIAlignGpuFwdKernel, half) | ROIAlignGpuFwdKernel, half) | ||||
| } // namespace kernel | } // namespace kernel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -27,6 +27,5 @@ MS_REG_GPU_KERNEL_ONE( | |||||
| ROIAlignGrad, | ROIAlignGrad, | ||||
| KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), | KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumberTypeFloat16), | ||||
| ROIAlignGradGpuFwdKernel, half) | ROIAlignGradGpuFwdKernel, half) | ||||
| } // namespace kernel | } // namespace kernel | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -14,12 +14,12 @@ endif () | |||||
| if (ENABLE_CPU) | if (ENABLE_CPU) | ||||
| file(GLOB_RECURSE CPU_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "cpu/*.cc") | file(GLOB_RECURSE CPU_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} "cpu/*.cc") | ||||
| list(REMOVE_ITEM CPU_SRC_LIST "cpu/mpi/mpi_adapter.cc", "cpu/mpi/mpi_export.cc") | |||||
| list(REMOVE_ITEM CPU_SRC_LIST "cpu/mpi/mpi_adapter.cc" "cpu/mpi/mpi_export.cc") | |||||
| endif () | endif () | ||||
| if (ENABLE_MPI) | if (ENABLE_MPI) | ||||
| if (ENABLE_CPU) | if (ENABLE_CPU) | ||||
| file(GLOB_RECURSE MPI_SRC_LIST "cpu/mpi/mpi_adapter.cc", "cpu/mpi/mpi_export.cc") | |||||
| file(GLOB_RECURSE MPI_SRC_LIST "cpu/mpi/mpi_adapter.cc" "cpu/mpi/mpi_export.cc") | |||||
| set_property(SOURCE ${MPI_SRC_LIST} | set_property(SOURCE ${MPI_SRC_LIST} | ||||
| PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_DEVICE) | PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_DEVICE) | ||||
| add_library(mpi_adapter SHARED ${MPI_SRC_LIST}) | add_library(mpi_adapter SHARED ${MPI_SRC_LIST}) | ||||
| @@ -57,7 +57,6 @@ constexpr const char *kOpTypeOpDebug = "Opdebug"; | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace device { | namespace device { | ||||
| namespace ascend { | namespace ascend { | ||||
| DataDumper::~DataDumper() { | DataDumper::~DataDumper() { | ||||
| ReleaseDevMem(&dev_load_mem_); | ReleaseDevMem(&dev_load_mem_); | ||||
| ReleaseDevMem(&dev_unload_mem_); | ReleaseDevMem(&dev_unload_mem_); | ||||
| @@ -141,7 +141,6 @@ inline bool CheckNullInput(std::vector<size_t> input_shape) { | |||||
| MS_LOG(EXCEPTION) << "CUAD curand Error: " << message << " | curandStatus: " << status; \ | MS_LOG(EXCEPTION) << "CUAD curand Error: " << message << " | curandStatus: " << status; \ | ||||
| } \ | } \ | ||||
| } | } | ||||
| } // namespace gpu | } // namespace gpu | ||||
| } // namespace device | } // namespace device | ||||
| } // namespace mindspore | } // namespace mindspore | ||||
| @@ -25,7 +25,6 @@ | |||||
| namespace mindspore { | namespace mindspore { | ||||
| namespace device { | namespace device { | ||||
| namespace gpu { | namespace gpu { | ||||
| MPIInitializer &MPIInitializer::GetInstance() { | MPIInitializer &MPIInitializer::GetInstance() { | ||||
| static MPIInitializer instance; | static MPIInitializer instance; | ||||
| return instance; | return instance; | ||||
| @@ -63,10 +63,8 @@ namespace pse_adaptor { | |||||
| vector<int> row(label_mat.cols); | vector<int> row(label_mat.cols); | ||||
| for (int y = 0; y < label_mat.cols; ++y) { | for (int y = 0; y < label_mat.cols; ++y) { | ||||
| int label = label_mat.at<int>(x, y); | int label = label_mat.at<int>(x, y); | ||||
| if (label == 0) continue; | if (label == 0) continue; | ||||
| if (area[label] < min_area) continue; | if (area[label] < min_area) continue; | ||||
| Point point(x, y); | Point point(x, y); | ||||
| queue.push(point); | queue.push(point); | ||||
| row[y] = label; | row[y] = label; | ||||
| @@ -26,8 +26,7 @@ MSNetWork::MSNetWork(void) : session_(nullptr) {} | |||||
| MSNetWork::~MSNetWork(void) {} | MSNetWork::~MSNetWork(void) {} | ||||
| void | |||||
| MSNetWork::CreateSessionMS(char *modelBuffer, size_t bufferLen, mindspore::lite::Context *ctx) { | |||||
| void MSNetWork::CreateSessionMS(char *modelBuffer, size_t bufferLen, mindspore::lite::Context *ctx) { | |||||
| session_ = mindspore::session::LiteSession::CreateSession(ctx); | session_ = mindspore::session::LiteSession::CreateSession(ctx); | ||||
| if (session_ == nullptr) { | if (session_ == nullptr) { | ||||
| MS_PRINT("Create Session failed."); | MS_PRINT("Create Session failed."); | ||||
| @@ -52,4 +51,3 @@ int MSNetWork::ReleaseNets(void) { | |||||
| delete session_; | delete session_; | ||||
| return 0; | return 0; | ||||
| } | } | ||||
| @@ -52,7 +52,7 @@ class MSNetWork { | |||||
| int ReleaseNets(void); | int ReleaseNets(void); | ||||
| mindspore::session::LiteSession * session() const { return session_; } | |||||
| mindspore::session::LiteSession *session() const { return session_; } | |||||
| private: | private: | ||||
| mindspore::session::LiteSession *session_; | mindspore::session::LiteSession *session_; | ||||
| }; | }; | ||||
| @@ -145,9 +145,8 @@ char *CreateLocalModelBuffer(JNIEnv *env, jobject modelBuffer) { | |||||
| * @param msOutputs | * @param msOutputs | ||||
| * @return | * @return | ||||
| */ | */ | ||||
| std::string | |||||
| ProcessRunnetResult(const int RET_CATEGORY_SUM, const char *const labels_name_map[], | |||||
| std::unordered_map<std::string, mindspore::tensor::MSTensor *> msOutputs) { | |||||
| std::string ProcessRunnetResult(const int RET_CATEGORY_SUM, const char *const labels_name_map[], | |||||
| std::unordered_map<std::string, mindspore::tensor::MSTensor *> msOutputs) { | |||||
| // Get the branch of the model output. | // Get the branch of the model output. | ||||
| // Use iterators to get map elements. | // Use iterators to get map elements. | ||||
| std::unordered_map<std::string, mindspore::tensor::MSTensor *>::iterator iter; | std::unordered_map<std::string, mindspore::tensor::MSTensor *>::iterator iter; | ||||
| @@ -160,7 +159,7 @@ ProcessRunnetResult(const int RET_CATEGORY_SUM, const char *const labels_name_ma | |||||
| MS_PRINT("Number of tensor elements:%d", tensorNum); | MS_PRINT("Number of tensor elements:%d", tensorNum); | ||||
| // Get a pointer to the first score. | // Get a pointer to the first score. | ||||
| float *temp_scores = static_cast<float * >(outputTensor->MutableData()); | |||||
| float *temp_scores = static_cast<float *>(outputTensor->MutableData()); | |||||
| float scores[RET_CATEGORY_SUM]; | float scores[RET_CATEGORY_SUM]; | ||||
| for (int i = 0; i < RET_CATEGORY_SUM; ++i) { | for (int i = 0; i < RET_CATEGORY_SUM; ++i) { | ||||
| @@ -202,12 +201,12 @@ bool BitmapToLiteMat(JNIEnv *env, const jobject &srcBitmap, LiteMat *lite_mat) { | |||||
| MS_PRINT("Init From RGBA error"); | MS_PRINT("Init From RGBA error"); | ||||
| } | } | ||||
| } else { | } else { | ||||
| unsigned char *pixels_ptr = new unsigned char[info.width*info.height*4]; | |||||
| unsigned char *pixels_ptr = new unsigned char[info.width * info.height * 4]; | |||||
| unsigned char *ptr = pixels_ptr; | unsigned char *ptr = pixels_ptr; | ||||
| unsigned char *data = reinterpret_cast<unsigned char *>(pixels); | unsigned char *data = reinterpret_cast<unsigned char *>(pixels); | ||||
| for (int i = 0; i < info.height; i++) { | for (int i = 0; i < info.height; i++) { | ||||
| memcpy(ptr, data, info.width*4); | |||||
| ptr += info.width*4; | |||||
| memcpy(ptr, data, info.width * 4); | |||||
| ptr += info.width * 4; | |||||
| data += info.stride; | data += info.stride; | ||||
| } | } | ||||
| ret = InitFromPixel(reinterpret_cast<const unsigned char *>(pixels_ptr), | ret = InitFromPixel(reinterpret_cast<const unsigned char *>(pixels_ptr), | ||||
| @@ -18,6 +18,4 @@ | |||||
| #define MINDSPORE_JNI_HMS_DEBUG_MINDSPORENETNATIVE_H | #define MINDSPORE_JNI_HMS_DEBUG_MINDSPORENETNATIVE_H | ||||
| #endif // MINDSPORE_JNI_HMS_DEBUG_MINDSPORENETNATIVE_H | #endif // MINDSPORE_JNI_HMS_DEBUG_MINDSPORENETNATIVE_H | ||||
| @@ -26,8 +26,7 @@ MSNetWork::MSNetWork(void) : session_(nullptr) {} | |||||
| MSNetWork::~MSNetWork(void) {} | MSNetWork::~MSNetWork(void) {} | ||||
| void | |||||
| MSNetWork::CreateSessionMS(char *modelBuffer, size_t bufferLen, mindspore::lite::Context *ctx) { | |||||
| void MSNetWork::CreateSessionMS(char *modelBuffer, size_t bufferLen, mindspore::lite::Context *ctx) { | |||||
| session_ = mindspore::session::LiteSession::CreateSession(ctx); | session_ = mindspore::session::LiteSession::CreateSession(ctx); | ||||
| if (session_ == nullptr) { | if (session_ == nullptr) { | ||||
| MS_PRINT("Create Session failed."); | MS_PRINT("Create Session failed."); | ||||
| @@ -52,7 +52,7 @@ class MSNetWork { | |||||
| int ReleaseNets(void); | int ReleaseNets(void); | ||||
| mindspore::session::LiteSession * session() const { return session_; } | |||||
| mindspore::session::LiteSession *session() const { return session_; } | |||||
| private: | private: | ||||
| mindspore::session::LiteSession *session_; | mindspore::session::LiteSession *session_; | ||||
| }; | }; | ||||
| @@ -45,7 +45,7 @@ bool BitmapToLiteMat(JNIEnv *env, const jobject &srcBitmap, LiteMat *lite_mat) { | |||||
| return false; | return false; | ||||
| } | } | ||||
| AndroidBitmap_lockPixels(env, srcBitmap, &pixels); | AndroidBitmap_lockPixels(env, srcBitmap, &pixels); | ||||
| if (info.stride == info.width*4) { | |||||
| if (info.stride == info.width * 4) { | |||||
| ret = InitFromPixel(reinterpret_cast<const unsigned char *>(pixels), | ret = InitFromPixel(reinterpret_cast<const unsigned char *>(pixels), | ||||
| LPixelType::RGBA2RGB, LDataType::UINT8, | LPixelType::RGBA2RGB, LDataType::UINT8, | ||||
| info.width, info.height, lite_mat_bgr); | info.width, info.height, lite_mat_bgr); | ||||
| @@ -53,12 +53,12 @@ bool BitmapToLiteMat(JNIEnv *env, const jobject &srcBitmap, LiteMat *lite_mat) { | |||||
| MS_PRINT("Init From RGBA error"); | MS_PRINT("Init From RGBA error"); | ||||
| } | } | ||||
| } else { | } else { | ||||
| unsigned char *pixels_ptr = new unsigned char[info.width*info.height*4]; | |||||
| unsigned char *pixels_ptr = new unsigned char[info.width * info.height * 4]; | |||||
| unsigned char *ptr = pixels_ptr; | unsigned char *ptr = pixels_ptr; | ||||
| unsigned char *data = reinterpret_cast<unsigned char *>(pixels); | unsigned char *data = reinterpret_cast<unsigned char *>(pixels); | ||||
| for (int i = 0; i < info.height; i++) { | for (int i = 0; i < info.height; i++) { | ||||
| memcpy(ptr, data, info.width*4); | |||||
| ptr += info.width*4; | |||||
| memcpy(ptr, data, info.width * 4); | |||||
| ptr += info.width * 4; | |||||
| data += info.stride; | data += info.stride; | ||||
| } | } | ||||
| ret = InitFromPixel(reinterpret_cast<const unsigned char *>(pixels_ptr), | ret = InitFromPixel(reinterpret_cast<const unsigned char *>(pixels_ptr), | ||||
| @@ -110,8 +110,7 @@ char *CreateLocalModelBuffer(JNIEnv *env, jobject modelBuffer) { | |||||
| * @param srcImageHeight The height of the original input image. | * @param srcImageHeight The height of the original input image. | ||||
| * @return | * @return | ||||
| */ | */ | ||||
| std::string ProcessRunnetResult(std::unordered_map<std::string, | |||||
| mindspore::tensor::MSTensor *> msOutputs, | |||||
| std::string ProcessRunnetResult(std::unordered_map<std::string, mindspore::tensor::MSTensor *> msOutputs, | |||||
| int srcImageWidth, int srcImageHeight) { | int srcImageWidth, int srcImageHeight) { | ||||
| std::unordered_map<std::string, mindspore::tensor::MSTensor *>::iterator iter; | std::unordered_map<std::string, mindspore::tensor::MSTensor *>::iterator iter; | ||||
| iter = msOutputs.begin(); | iter = msOutputs.begin(); | ||||
| @@ -124,8 +123,8 @@ std::string ProcessRunnetResult(std::unordered_map<std::string, | |||||
| MS_PRINT("%s %s", branch1_string.c_str(), branch2_string.c_str()); | MS_PRINT("%s %s", branch1_string.c_str(), branch2_string.c_str()); | ||||
| // ----------- 接口测试 -------------------------- | // ----------- 接口测试 -------------------------- | ||||
| float *tmpscores2 = reinterpret_cast<float * >(branch1_tensor->MutableData()); | |||||
| float *tmpdata = reinterpret_cast<float * >(branch2_tensor->MutableData()); | |||||
| float *tmpscores2 = reinterpret_cast<float *>(branch1_tensor->MutableData()); | |||||
| float *tmpdata = reinterpret_cast<float *>(branch2_tensor->MutableData()); | |||||
| // Using ssd model util to process model branch outputs. | // Using ssd model util to process model branch outputs. | ||||
| SSDModelUtil ssdUtil(srcImageWidth, srcImageHeight); | SSDModelUtil ssdUtil(srcImageWidth, srcImageHeight); | ||||
| @@ -177,7 +177,6 @@ void SSDModelUtil::getDefaultBoxes() { | |||||
| tempWHBox.boxw = h; | tempWHBox.boxw = h; | ||||
| tempWHBox.boxh = w; | tempWHBox.boxh = w; | ||||
| all_sizes.push_back(tempWHBox); | all_sizes.push_back(tempWHBox); | ||||
| } else { | } else { | ||||
| // len(all_sizes) = 6. | // len(all_sizes) = 6. | ||||
| tempWHBox.boxw = sk1; | tempWHBox.boxw = sk1; | ||||