Browse Source

gather should be reset to gatherv2 in gpu implementation files

tags/v1.1.0
zhouyuanshen 5 years ago
parent
commit
ff7ecebdcd
4 changed files with 22 additions and 22 deletions
  1. +4
    -4
      mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/gatherv2_gpu_kernel.cc
  2. +8
    -8
      mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/gatherv2_gpu_kernel.h
  3. +7
    -7
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/gatherv2.cu
  4. +3
    -3
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/gatherv2.cuh

mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/gather_gpu_kernel.cc → mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/gatherv2_gpu_kernel.cc View File

@@ -1,5 +1,5 @@
/** /**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@@ -14,17 +14,17 @@
* limitations under the License. * limitations under the License.
*/ */


#include "backend/kernel_compiler/gpu/arrays/gather_gpu_kernel.h"
#include "backend/kernel_compiler/gpu/arrays/gatherv2_gpu_kernel.h"


namespace mindspore { namespace mindspore {
namespace kernel { namespace kernel {
MS_REG_GPU_KERNEL_TWO( MS_REG_GPU_KERNEL_TWO(
GatherV2, GatherV2,
KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat32), KernelAttr().AddInputAttr(kNumberTypeFloat32).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat32),
GatherGpuFwdKernel, float, int)
GatherV2GpuFwdKernel, float, int)
MS_REG_GPU_KERNEL_TWO( MS_REG_GPU_KERNEL_TWO(
GatherV2, GatherV2,
KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat16), KernelAttr().AddInputAttr(kNumberTypeFloat16).AddInputAttr(kNumberTypeInt32).AddOutputAttr(kNumberTypeFloat16),
GatherGpuFwdKernel, half, int)
GatherV2GpuFwdKernel, half, int)
} // namespace kernel } // namespace kernel
} // namespace mindspore } // namespace mindspore

mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/gather_gpu_kernel.h → mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/gatherv2_gpu_kernel.h View File

@@ -1,5 +1,5 @@
/** /**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@@ -20,15 +20,15 @@
#include <vector> #include <vector>
#include "backend/kernel_compiler/gpu/gpu_kernel.h" #include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h" #include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "backend/kernel_compiler/gpu/cuda_impl/gather.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/gatherv2.cuh"


namespace mindspore { namespace mindspore {
namespace kernel { namespace kernel {
template <typename T, typename S> template <typename T, typename S>
class GatherGpuFwdKernel : public GpuKernel {
class GatherV2GpuFwdKernel : public GpuKernel {
public: public:
GatherGpuFwdKernel() : axis_(0), handle_(nullptr) {}
~GatherGpuFwdKernel() = default;
GatherV2GpuFwdKernel() : axis_(0), handle_(nullptr) {}
~GatherV2GpuFwdKernel() = default;


const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; } const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; } const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
@@ -42,15 +42,15 @@ class GatherGpuFwdKernel : public GpuKernel {
T *output_addr = GetDeviceAddress<T>(outputs, 0); T *output_addr = GetDeviceAddress<T>(outputs, 0);


auto input_dim1 = input_shapes_[IntToSize(axis_)]; auto input_dim1 = input_shapes_[IntToSize(axis_)];
Gather(input_addr, indices_addr, output_addr, dims_[0], dims_[1], dims_[2], input_dim1,
reinterpret_cast<cudaStream_t>(stream_ptr));
GatherV2(input_addr, indices_addr, output_addr, dims_[0], dims_[1], dims_[2], input_dim1,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true; return true;
} }
bool Init(const CNodePtr &kernel_node) override { bool Init(const CNodePtr &kernel_node) override {
InitResource(); InitResource();
size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node);
if (input_num != 2) { if (input_num != 2) {
MS_LOG(EXCEPTION) << "Argument number is " << input_num << ", but GatherGpuFwdKernel needs 2.";
MS_LOG(EXCEPTION) << "Argument number is " << input_num << ", but GatherGpuV2FwdKernel needs 2.";
} }
input_shapes_ = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); input_shapes_ = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
indices_shapes_ = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); indices_shapes_ = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);

mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/gather.cu → mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/gatherv2.cu View File

@@ -1,5 +1,5 @@
/** /**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@@ -15,10 +15,10 @@
*/ */


#include <iostream> #include <iostream>
#include "backend/kernel_compiler/gpu/cuda_impl/gather.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/gatherv2.cuh"
#include "runtime/device/gpu/cuda_common.h" #include "runtime/device/gpu/cuda_common.h"
template <typename T, typename S> template <typename T, typename S>
__global__ void GatherKernel(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1,
__global__ void GatherV2Kernel(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1) { size_t output_dim2, size_t input_dim1) {
int num = output_dim0 * output_dim1 * output_dim2; int num = output_dim0 * output_dim1 * output_dim2;
int i, j, k; int i, j, k;
@@ -39,16 +39,16 @@ __global__ void GatherKernel(T *input, S *indices, T *output, size_t output_dim0
return; return;
} }
template <typename T, typename S> template <typename T, typename S>
void Gather(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1, size_t output_dim2,
void GatherV2(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1, size_t output_dim2,
size_t input_dim1, cudaStream_t stream) { size_t input_dim1, cudaStream_t stream) {
int size = output_dim0 * output_dim1 * output_dim2; int size = output_dim0 * output_dim1 * output_dim2;
GatherKernel<<<GET_BLOCKS(size), GET_THREADS, 0, stream>>>(input, indices, output, output_dim0, output_dim1,
GatherV2Kernel<<<GET_BLOCKS(size), GET_THREADS, 0, stream>>>(input, indices, output, output_dim0, output_dim1,
output_dim2, input_dim1); output_dim2, input_dim1);
return; return;
} }


template void Gather<float, int>(float *input, int *indices, float *output, size_t output_dim0, size_t output_dim1,
template void GatherV2<float, int>(float *input, int *indices, float *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, cudaStream_t stream); size_t output_dim2, size_t input_dim1, cudaStream_t stream);


template void Gather<half, int>(half *input, int *indices, half *output, size_t output_dim0, size_t output_dim1,
template void GatherV2<half, int>(half *input, int *indices, half *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, cudaStream_t stream); size_t output_dim2, size_t input_dim1, cudaStream_t stream);

mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/gather.cuh → mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/gatherv2.cuh View File

@@ -1,5 +1,5 @@
/** /**
* Copyright 2019 Huawei Technologies Co., Ltd
* Copyright 2019-2020 Huawei Technologies Co., Ltd
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@@ -17,7 +17,7 @@
#ifndef MINDSPORE_GATHER_GPU_CU_H #ifndef MINDSPORE_GATHER_GPU_CU_H
#define MINDSPORE_GATHER_GPU_CU_H #define MINDSPORE_GATHER_GPU_CU_H
template <typename T, typename S> template <typename T, typename S>
void Gather(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1, size_t output_dim2,
size_t input_dim1, cudaStream_t stream);
void GatherV2(T *input, S *indices, T *output, size_t output_dim0, size_t output_dim1,
size_t output_dim2, size_t input_dim1, cudaStream_t stream);


#endif #endif

Loading…
Cancel
Save