| @@ -30,11 +30,14 @@ Status TreeAdapterLite::BuildExecutionTreeRecur(std::shared_ptr<DatasetNode> ir, | |||
| (*op) = ops.front(); // return the first op to be added as child by the caller of this function | |||
| RETURN_IF_NOT_OK(tree_->AssociateNode(*op)); | |||
| for (size_t i = 1; i < ops.size(); i++) { | |||
| if (op == NULL) { | |||
| return StatusCode::kLiteNullptr; | |||
| } | |||
| for (size_t i = 0; i < ops.size(); i++) { | |||
| RETURN_IF_NOT_OK(tree_->AssociateNode(ops[i])); | |||
| RETURN_IF_NOT_OK(ops[i - 1]->AddChild(ops[i])); | |||
| if (i > 0) { | |||
| RETURN_IF_NOT_OK(ops[i - 1]->AddChild(ops[i])); | |||
| } | |||
| } | |||
| // Build the children of IR, once they return, add the return value to *op | |||
| @@ -104,17 +104,21 @@ static Status JpegReadScanlines(jpeg_decompress_struct *const cinfo, int max_sca | |||
| const int k = scanline_ptr[cmyk_pixel + 3]; | |||
| int r, g, b; | |||
| if (cinfo->saw_Adobe_marker) { | |||
| r = (k * c) / 255; | |||
| g = (k * m) / 255; | |||
| b = (k * y) / 255; | |||
| r = (k * c) / MAX_PIXEL_VALUE; | |||
| g = (k * m) / MAX_PIXEL_VALUE; | |||
| b = (k * y) / MAX_PIXEL_VALUE; | |||
| } else { | |||
| r = (255 - c) * (255 - k) / 255; | |||
| g = (255 - m) * (255 - k) / 255; | |||
| b = (255 - y) * (255 - k) / 255; | |||
| r = (MAX_PIXEL_VALUE - c) * (MAX_PIXEL_VALUE - k) / MAX_PIXEL_VALUE; | |||
| g = (MAX_PIXEL_VALUE - m) * (MAX_PIXEL_VALUE - k) / MAX_PIXEL_VALUE; | |||
| b = (MAX_PIXEL_VALUE - y) * (MAX_PIXEL_VALUE - k) / MAX_PIXEL_VALUE; | |||
| } | |||
| buffer[3 * i + 0] = r; | |||
| buffer[3 * i + 1] = g; | |||
| buffer[3 * i + 2] = b; | |||
| constexpr int buffer_rgb_val_size = 3; | |||
| constexpr int channel_red = 0; | |||
| constexpr int channel_green = 1; | |||
| constexpr int channel_blue = 2; | |||
| buffer[buffer_rgb_val_size * i + channel_red] = r; | |||
| buffer[buffer_rgb_val_size * i + channel_green] = g; | |||
| buffer[buffer_rgb_val_size * i + channel_blue] = b; | |||
| } | |||
| } else if (num_lines_read > 0) { | |||
| auto copy_status = memcpy_s(buffer, buffer_size, scanline_ptr + offset, stride); | |||
| @@ -361,8 +365,9 @@ Status Resize(const std::shared_ptr<Tensor> &input, std::shared_ptr<Tensor> *out | |||
| RETURN_STATUS_UNEXPECTED("Resize: image datatype is not uint8."); | |||
| } | |||
| // resize image too large or too small | |||
| if (output_height == 0 || output_height > input->shape()[0] * 1000 || output_width == 0 || | |||
| output_width > input->shape()[1] * 1000) { | |||
| const int height_width_scale_limit = 1000; | |||
| if (output_height == 0 || output_height > input->shape()[0] * height_width_scale_limit || output_width == 0 || | |||
| output_width > input->shape()[1] * height_width_scale_limit) { | |||
| std::string err_msg = | |||
| "Resize: the resizing width or height 1) is too big, it's up to " | |||
| "1000 times the original image; 2) can not be 0."; | |||
| @@ -33,6 +33,8 @@ | |||
| #include "minddata/dataset/kernels/image/lite_cv/image_process.h" | |||
| #include "minddata/dataset/util/status.h" | |||
| #define MAX_PIXEL_VALUE 255 | |||
| namespace mindspore { | |||
| namespace dataset { | |||
| void JpegErrorExitCustom(j_common_ptr cinfo); | |||
| @@ -30,8 +30,8 @@ Status RandomCropAndResizeWithBBoxOp::Compute(const TensorRow &input, TensorRow | |||
| RETURN_IF_NOT_OK(BoundingBox::ValidateBoundingBoxes(input)); | |||
| CHECK_FAIL_RETURN_UNEXPECTED(input[0]->shape().Size() >= 2, | |||
| "RandomCropAndResizeWithBBox: image shape is not <H,W,C> or <H,W>."); | |||
| output->resize(2); | |||
| const int output_count = 2; | |||
| output->resize(output_count); | |||
| (*output)[1] = std::move(input[1]); // move boxes over to output | |||
| size_t bboxCount = input[1]->shape()[0]; // number of rows in bbox tensor | |||
| @@ -34,8 +34,8 @@ Status RandomCropWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) | |||
| int32_t padded_image_h; | |||
| int32_t padded_image_w; | |||
| output->resize(2); | |||
| const int output_count = 2; | |||
| output->resize(output_count); | |||
| (*output)[1] = std::move(input[1]); // since some boxes may be removed | |||
| bool crop_further = true; // Whether further cropping will be required or not, true unless required size matches | |||
| @@ -43,8 +43,8 @@ Status RandomVerticalFlipWithBBoxOp::Compute(const TensorRow &input, TensorRow * | |||
| bbox->SetY(newBoxCorner_y); | |||
| RETURN_IF_NOT_OK(bbox->WriteToTensor(input[1], i)); | |||
| } | |||
| output->resize(2); | |||
| const int output_count = 2; | |||
| output->resize(output_count); | |||
| (*output)[1] = std::move(input[1]); | |||
| return VerticalFlip(input[0], &(*output)[0]); | |||
| @@ -76,11 +76,11 @@ class MDToDApi { | |||
| std::vector<std::string> MDToDBuffToVector(MDToDBuff_t StrBuff) { | |||
| std::vector<std::string> strVector; | |||
| if (StrBuff.DataSize > 0) { | |||
| const char *p = reinterpret_cast<char *>(StrBuff.Buff); | |||
| const char *p = static_cast<char *>(StrBuff.Buff); | |||
| do { | |||
| strVector.push_back(std::string(p)); | |||
| p += strVector.back().size() + 1; | |||
| } while (p < reinterpret_cast<char *>(StrBuff.Buff) + StrBuff.DataSize); | |||
| } while (p < static_cast<char *>(StrBuff.Buff) + StrBuff.DataSize); | |||
| } | |||
| return strVector; | |||
| } | |||
| @@ -94,11 +94,12 @@ int MDToDApi_pathTest(const char* path) { | |||
| auto dir_it = Path::DirIterator::OpenDirectory(&f); | |||
| MS_LOG(INFO) << dir_it.get(); | |||
| int i = 0; | |||
| const int path_len_limit = 5; | |||
| while (dir_it->hasNext()) { | |||
| Path v = dir_it->next(); | |||
| MS_LOG(INFO) << v.toString() << "\n"; | |||
| i++; | |||
| if (i > 5) break; | |||
| if (i > path_len_limit) break; | |||
| } | |||
| return 0; | |||
| } | |||
| @@ -182,7 +183,7 @@ void MDBuffToVector(const MDToDBuff_t &MDBuff, std::vector<T> *vec) { | |||
| template <typename T> | |||
| void GetValue(std::unordered_map<std::string, std::shared_ptr<Tensor>> row, std::string columnName, T *o) { | |||
| auto column = row[columnName]; | |||
| if (NULL != column) { | |||
| if (column != NULL) { | |||
| MS_LOG(INFO) << "Tensor " << columnName << " shape: " << column->shape() << " type: " << column->type() | |||
| << " bytes: " << column->SizeInBytes(); | |||
| column->GetItemAt<T>(o, {}); | |||
| @@ -200,7 +201,7 @@ void GetTensorToBuff(std::unordered_map<std::string, std::shared_ptr<Tensor>> ro | |||
| resBuff->TensorSize[0] = resBuff->TensorSize[1] = resBuff->TensorSize[2] = resBuff->TensorSize[3] = | |||
| 0; // Mark all dims do not exist in tensor | |||
| int firstDim = (hasBatch) ? 1 : 0; | |||
| if (NULL != column) { | |||
| if (column != NULL) { | |||
| MS_LOG(INFO) << "Tensor " << columnName << " shape: " << column->shape() << " type: " << column->type() | |||
| << " bytes: " << column->SizeInBytes() << "nof elements: " << column->shape()[firstDim]; | |||
| auto tesoreShape = column->shape().AsVector(); | |||
| @@ -242,7 +243,7 @@ void GetTensorToBuff(std::unordered_map<std::string, std::shared_ptr<Tensor>> ro | |||
| } | |||
| MS_LOG(INFO) << columnName << " " << resBuff->DataSize | |||
| << " bytesCopyed to buff (MaxBuffSize: " << resBuff->MaxBuffSize << ") "; | |||
| if (0 == resBuff->DataSize) { | |||
| if (resBuff->DataSize == 0) { | |||
| MS_LOG(ERROR) << "COPY FAIL!!!! " << columnName << " Too large" | |||
| << "."; // memcpy failed | |||
| } | |||
| @@ -373,7 +374,7 @@ extern "C" int MDToDApi_UpdateEmbeding(MDToDApi *pMDToDApi, const char *column, | |||
| MS_LOG(INFO) << "Saved file " << embedding_file_path; | |||
| std::string file_path; | |||
| if (0 != GetJsonFullFileName(pMDToDApi, &file_path)) { | |||
| if (GetJsonFullFileName(pMDToDApi, &file_path) != 0) { | |||
| MS_LOG(ERROR) << "Failed to update " << columnName; | |||
| return -1; | |||
| } | |||
| @@ -390,7 +391,7 @@ extern "C" int MDToDApi_UpdateEmbeding(MDToDApi *pMDToDApi, const char *column, | |||
| extern "C" int MDToDApi_UpdateStringArray(MDToDApi *pMDToDApi, const char *column, MDToDBuff_t MDbuff) { | |||
| auto columnName = std::string(column); | |||
| std::string file_path; | |||
| if (0 != GetJsonFullFileName(pMDToDApi, &file_path)) { | |||
| if (GetJsonFullFileName(pMDToDApi, &file_path) != 0) { | |||
| MS_LOG(ERROR) << "Failed to update " << columnName; | |||
| return -1; | |||
| } | |||
| @@ -415,7 +416,7 @@ extern "C" int MDToDApi_UpdateStringArray(MDToDApi *pMDToDApi, const char *colum | |||
| extern "C" int MDToDApi_UpdateFloatArray(MDToDApi *pMDToDApi, const char *column, MDToDBuff_t MDBuff) { | |||
| auto columnName = std::string(column); | |||
| std::string file_path; | |||
| if (0 != GetJsonFullFileName(pMDToDApi, &file_path)) { | |||
| if (GetJsonFullFileName(pMDToDApi, &file_path) != 0) { | |||
| MS_LOG(ERROR) << "Failed to updaet " << columnName; | |||
| return -1; | |||
| } | |||
| @@ -58,17 +58,17 @@ typedef struct MDToDResult { | |||
| MDToDBuff_t faceEmbeddingsBuff; | |||
| } MDToDResult_t; | |||
| typedef int (*MDToDApi_pathTest_t)(const char *path); | |||
| typedef int (*MDToDApi_testAlbum_t)(); | |||
| typedef MDToDApi *(*MDToDApi_createPipeLine_t)(MDToDConf_t MDConf); | |||
| typedef int (*MDToDApi_GetNext_t)(MDToDApi *pMDToDApi, MDToDResult_t *results); | |||
| typedef int (*MDToDApi_UpdateEmbeding_t)(MDToDApi *pMDToDApi, const char *column, float *emmbeddings, | |||
| size_t emmbeddingsSize); | |||
| typedef int (*MDToDApi_UpdateStringArray_t)(MDToDApi *pMDToDApi, const char *column, MDToDBuff_t MDbuff); | |||
| typedef int (*MDToDApi_UpdateFloatArray_t)(MDToDApi *pMDToDApi, const char *column, MDToDBuff_t MDbuff); | |||
| typedef int (*MDToDApi_UpdateIsForTrain_t)(MDToDApi *pMDToDApi, uint8_t isForTrain); | |||
| typedef int (*MDToDApi_UpdateNoOfFaces_t)(MDToDApi *pMDToDApi, int32_t noOfFaces); | |||
| typedef int (*MDToDApi_Stop_t)(MDToDApi *pMDToDApi); | |||
| typedef int (*MDToDApi_Destroy_t)(MDToDApi *pMDToDApi); | |||
| using (*MDToDApi_pathTest_t)(const char *path) = int; | |||
| using (*MDToDApi_testAlbum_t)() = int; | |||
| using *(*MDToDApi_createPipeLine_t)(MDToDConf_t MDConf) = MDToDApi; | |||
| using (*MDToDApi_GetNext_t)(MDToDApi *pMDToDApi, MDToDResult_t *results) = int; | |||
| using (*MDToDApi_UpdateEmbeding_t)(MDToDApi *pMDToDApi, const char *column, float *emmbeddings, | |||
| size_t emmbeddingsSize) = int; | |||
| using (*MDToDApi_UpdateStringArray_t)(MDToDApi *pMDToDApi, const char *column, MDToDBuff_t MDbuff) = int; | |||
| using (*MDToDApi_UpdateFloatArray_t)(MDToDApi *pMDToDApi, const char *column, MDToDBuff_t MDbuff) = int; | |||
| using (*MDToDApi_UpdateIsForTrain_t)(MDToDApi *pMDToDApi, uint8_t isForTrain) = int; | |||
| using (*MDToDApi_UpdateNoOfFaces_t)(MDToDApi *pMDToDApi, int32_t noOfFaces) = int; | |||
| using (*MDToDApi_Stop_t)(MDToDApi *pMDToDApi) = int; | |||
| using (*MDToDApi_Destroy_t)(MDToDApi *pMDToDApi) = int; | |||
| #endif | |||
| @@ -28,8 +28,9 @@ int main() { | |||
| MS_LOG(INFO) << "Doing MindDataTestPipeline-TestCifar10Dataset."; | |||
| // Create a Cifar10 Dataset | |||
| const int64_t num_samples = 10; | |||
| std::string folder_path = "./testCifar10Data/"; | |||
| std::shared_ptr<Dataset> ds = Cifar10(folder_path, std::string(), RandomSampler(false, 10)); | |||
| std::shared_ptr<Dataset> ds = Cifar10(folder_path, std::string(), RandomSampler(false, num_samples)); | |||
| // Create an iterator over the result of the above dataset | |||
| // This will trigger the creation of the Execution Tree and launch it. | |||