Browse Source

add customized vector

tags/v1.0.0
jianghui58 5 years ago
parent
commit
fef12388e8
7 changed files with 384 additions and 13 deletions
  1. +1
    -0
      mindspore/lite/internal/CMakeLists.txt
  2. +8
    -8
      mindspore/lite/internal/include/lite_utils.h
  3. +109
    -0
      mindspore/lite/internal/include/vector.h
  4. +259
    -0
      mindspore/lite/internal/src/common/vector.cc
  5. +3
    -3
      mindspore/lite/internal/src/kernel/fp32/matmul.cc
  6. +2
    -0
      mindspore/lite/internal/src/lite_log.h
  7. +2
    -2
      mindspore/lite/internal/src/ms_tensor.cc

+ 1
- 0
mindspore/lite/internal/CMakeLists.txt View File

@@ -27,6 +27,7 @@ set(CCSRC
${CMAKE_CURRENT_SOURCE_DIR}/src/lite_session.cc ${CMAKE_CURRENT_SOURCE_DIR}/src/lite_session.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/ms_tensor.cc ${CMAKE_CURRENT_SOURCE_DIR}/src/ms_tensor.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/common/string.cc ${CMAKE_CURRENT_SOURCE_DIR}/src/common/string.cc
${CMAKE_CURRENT_SOURCE_DIR}/src/common/vector.cc
${TOP_DIR}/src/common/log_adapter.cc ${TOP_DIR}/src/common/log_adapter.cc
${CMAKE_CURRENT_SOURCE_DIR}/../../core/gvar/logging_level.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../core/gvar/logging_level.cc
${TOP_DIR}/src/runtime/allocator.cc ${TOP_DIR}/src/runtime/allocator.cc


+ 8
- 8
mindspore/lite/internal/include/lite_utils.h View File

@@ -16,17 +16,17 @@


#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_ #ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_
#define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_ #define MINDSPORE_LITE_INTERNAL_INCLUDE_LITE_UTILS_H_
#include <vector>
#include "internal/include/string.h" #include "internal/include/string.h"
#include "internal/include/vector.h"


struct MSTensor; struct MSTensor;
struct Node; struct Node;
using TensorPtr = MSTensor *; using TensorPtr = MSTensor *;
using TensorPtrVector = std::vector<MSTensor *>;
using Uint32Vector = std::vector<uint32_t>;
using StringVector = std::vector<String>;
using ShapeVector = std::vector<int>;
using NodePtrVector = std::vector<struct Node *>;
using Int32Vector = std::vector<int>;
using Int32VectorVector = std::vector<Int32Vector>;
using TensorPtrVector = Vector<MSTensor *>;
using Uint32Vector = Vector<uint32_t>;
using StringVector = Vector<String>;
using ShapeVector = Vector<int>;
using NodePtrVector = Vector<struct Node *>;
using Int32Vector = Vector<int>;
using Int32VectorVector = Vector<Int32Vector>;
#endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_ #endif // MINDSPORE_LITE_INCLUDE_LITE_UTILS_H_

+ 109
- 0
mindspore/lite/internal/include/vector.h View File

@@ -0,0 +1,109 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_INTERNAL_INCLUDE_VECTOR_H
#define MINDSPORE_LITE_INTERNAL_INCLUDE_VECTOR_H

#include <stdint.h>
#include "internal/include/string.h"
#define DEFAULT_CAPACITY 1

struct MSTensor;
struct Node;

template <typename T>
class Vector {
private:
size_t size_;
size_t elem_size_;
size_t capacity_;
T *data_;

public:
Vector();

explicit Vector(size_t size);

Vector(const Vector<T> &vector);

~Vector();

void clear();

void push_back(const T &elem);

void pop_back();

void insert(const T &elem, size_t index);

T *begin();

const T *begin() const;

T *end();

const T *end() const;

T &front();

const T &front() const;

T &back();

const T &back() const;

T &at(size_t index);

const T &at(size_t index) const;

T &operator[](size_t index);

const T &operator[](size_t index) const;

T *data();

const T *data() const;

size_t size() const;

size_t capacity() const;

bool empty() const;

void erase(size_t index);

void resize(size_t size);

void reserve(size_t capacity);
};

template <typename T>
bool operator==(const Vector<T> &lhs, const Vector<T> &rhs) {
if (lhs.size() != rhs.size()) {
return false;
}
for (int i = 0; i < lhs.size(); ++i) {
if (lhs[i] != rhs[i]) {
return false;
}
}
return true;
}

template <typename T>
bool operator!=(const Vector<T> &lhs, const Vector<T> &rhs) {
return !(lhs == rhs);
}
#endif // MINDSPORE_LITE_INTERNAL_INCLUDE_VECTOR_H

+ 259
- 0
mindspore/lite/internal/src/common/vector.cc View File

@@ -0,0 +1,259 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "internal/include/vector.h"
#include <stdlib.h>
#include <string.h>
#include "internal/src/lite_log.h"

#define min(x, y) ((x < y) ? (x) : (y))

template<typename T>
Vector<T>::Vector() {
size_ = 0;
capacity_ = DEFAULT_CAPACITY;
elem_size_ = sizeof(T);
data_ = nullptr;
}

template<typename T>
Vector<T>::Vector(size_t size) {
size_ = size;
elem_size_ = sizeof(T);
capacity_ = size;
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memset(data_, 0, capacity_ * elem_size_);
}

template<typename T>
Vector<T>::Vector(const Vector<T> &vec) {
size_ = vec.size_;
elem_size_ = sizeof(T);
capacity_ = vec.capacity_;
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(data_, vec.data_, size_ * elem_size_);
}

template<typename T>
Vector<T>::~Vector() {
if (data_) {
free(data_);
}
}

template<typename T>
void Vector<T>::clear() {
size_ = 0;
if (data_) {
free(data_);
data_ = nullptr;
}
}

template<typename T>
void Vector<T>::push_back(const T &elem) {
if (data_ == nullptr) {
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
} else if (size_ == capacity_) {
resize(size_ + 1);
--size_;
}
memcpy(data_ + size_, &elem, elem_size_);
++size_;
}

template<typename T>
void Vector<T>::pop_back() {
if (size_ > 0) {
--size_;
} else {
MS_C_EXCEPTION("Index is out of range!");
}
}

template<typename T>
void Vector<T>::insert(const T &elem, size_t index) {
if (index <= size_) {
++size_;
if (size_ > capacity_) {
resize(size_);
}
if (index == size_ - 1) {
push_back(elem);
} else {
memmove(data_ + index + 1, data_ + index, (size_ - index - 1) * elem_size_);
memcpy(data_ + index, &elem, elem_size_);
}
} else {
MS_C_EXCEPTION("Input index is out of range!");
}
}

template<typename T>
T *Vector<T>::begin() {
return data_;
}

template<typename T>
const T *Vector<T>::begin() const {
return data_;
}

template<typename T>
T *Vector<T>::end() {
return data_ + size_;
}

template<typename T>
const T *Vector<T>::end() const {
return data_ + size_;
}

template<typename T>
T &Vector<T>::front() {
if (size_ > 0) {
return *data_;
}
MS_C_EXCEPTION("Index is out of range!");
}

template<typename T>
const T &Vector<T>::front() const {
if (size_ > 0) {
return *data_;
}
MS_C_EXCEPTION("Index is out of range!");
}
template<typename T>
T &Vector<T>::back() {
if (size_ > 0) {
return *(data_ + size_ - 1);
}
MS_C_EXCEPTION("Index is out of range!");
}
template<typename T>
const T &Vector<T>::back() const {
if (size_ > 0) {
return *(data_ + size_ - 1);
}
MS_C_EXCEPTION("Index is out of range!");
}

template<typename T>
T &Vector<T>::at(size_t index) {
if (index < size_) {
return *(data_ + index);
}
MS_C_EXCEPTION("Input index is out of range!");
}

template<typename T>
const T &Vector<T>::at(size_t index) const {
if (index < size_) {
return *(data_ + index);
}
MS_C_EXCEPTION("Input index is out of range!");
}

template<typename T>
T &Vector<T>::operator[](size_t index) {
if (index < size_) {
return *(data_ + index);
}
MS_C_EXCEPTION("Input index is out of range!");
}

template<typename T>
const T &Vector<T>::operator[](size_t index) const {
if (index < size_) {
return *(data_ + index);
}
MS_C_EXCEPTION("Input index is out of range!");
}

template<typename T>
T *Vector<T>::data() {
return data_;
}

template<typename T>
const T *Vector<T>::data() const {
return data_;
}

template<typename T>
size_t Vector<T>::size() const {
return size_;
}

template<typename T>
size_t Vector<T>::capacity() const {
return capacity_;
}

template<typename T>
bool Vector<T>::empty() const {
return size_ == 0;
}

template<typename T>
void Vector<T>::erase(size_t index) {
if (index == size_ - 1) {
--size_;
} else if (index < size_) {
memmove(data_ + index, data_ + index + 1, (size_ - index - 1) * elem_size_);
--size_;
} else {
MS_C_EXCEPTION("Input index is out of range!");
}
}

template<typename T>
void Vector<T>::resize(size_t size) {
if (size > capacity_) {
capacity_ *= 2;
}
T *tmp = data_;
data_ = reinterpret_cast<T *>(malloc(capacity_ * elem_size_));
if (data_ == nullptr) {
MS_C_EXCEPTION("malloc data failed");
}
memcpy(data_, tmp, min(size, size_) * elem_size_);
size_ = size;
free(tmp);
}

template<typename T>
void Vector<T>::reserve(size_t capacity) {
if (capacity > capacity_) {
capacity_ = capacity;
}
}

template class Vector<int>;
template class Vector<Vector<int>>;
template class Vector<uint32_t>;
template class Vector<String>;
template class Vector<MSTensor *>;
template class Vector<Node *>;

+ 3
- 3
mindspore/lite/internal/src/kernel/fp32/matmul.cc View File

@@ -125,10 +125,10 @@ int DoMatMul(const TensorPtrVector &in_tensors, const TensorPtrVector &out_tenso
return RET_PARAM_INVALID; return RET_PARAM_INVALID;
} }
int batch = 1; int batch = 1;
std::vector<int> a_shape = in_tensors[0]->shape_;
std::vector<int> c_shape = out_tensors[0]->shape_;
ShapeVector a_shape = in_tensors[0]->shape_;
ShapeVector c_shape = out_tensors[0]->shape_;
if (in_tensors.size() == 3) { if (in_tensors.size() == 3) {
std::vector<int> bias_shape = in_tensors[2]->shape_;
ShapeVector bias_shape = in_tensors[2]->shape_;
if (bias_shape[bias_shape.size() - 1] != c_shape[c_shape.size() - 1]) { if (bias_shape[bias_shape.size() - 1] != c_shape[c_shape.size() - 1]) {
LITE_ERROR_LOG("The bias' dimension %d is not equal with column %d", bias_shape[bias_shape.size() - 1], LITE_ERROR_LOG("The bias' dimension %d is not equal with column %d", bias_shape[bias_shape.size() - 1],
c_shape[c_shape.size() - 1]); c_shape[c_shape.size() - 1]);


+ 2
- 0
mindspore/lite/internal/src/lite_log.h View File

@@ -16,6 +16,8 @@


#ifndef MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_ #ifndef MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_
#define MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_ #define MINDSPORE_LITE_INTERNAL_SRC_LITE_LOG_H_

#include <stdlib.h>
#ifdef DEBUG #ifdef DEBUG
#include <assert.h> #include <assert.h>
#endif #endif


+ 2
- 2
mindspore/lite/internal/src/ms_tensor.cc View File

@@ -14,8 +14,8 @@
* limitations under the License. * limitations under the License.
*/ */
#include <iostream> #include <iostream>
#include <vector>
#include <string>
#include "internal/include/string.h"
#include "internal/include/vector.h"
#include "internal/include/ms_tensor.h" #include "internal/include/ms_tensor.h"
MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape) { MSTensor *CreateTensor(TypeId data_type, const ShapeVector &shape) {
MSTensor *tensor = new (std::nothrow) MSTensor(); MSTensor *tensor = new (std::nothrow) MSTensor();


Loading…
Cancel
Save