| @@ -90,6 +90,12 @@ ConvBiasForwardImpl::AlgoPack::AlgoPack() { | |||||
| for (auto&& algo : uint4_int4_nchw64_imma) { | for (auto&& algo : uint4_int4_nchw64_imma) { | ||||
| all_algos.push_back(&algo); | all_algos.push_back(&algo); | ||||
| } | } | ||||
| for (auto&& algo : int4_int4_nhwc_imma) { | |||||
| all_algos.push_back(&algo); | |||||
| } | |||||
| for (auto&& algo : uint4_int4_nhwc_imma) { | |||||
| all_algos.push_back(&algo); | |||||
| } | |||||
| #endif | #endif | ||||
| #endif | #endif | ||||
| fill_dp4a_algos(); | fill_dp4a_algos(); | ||||
| @@ -247,6 +253,34 @@ void ConvBiasForwardImpl::AlgoPack::fill_imma_algos() { | |||||
| uint4_int4_nchw64_imma.emplace_back( | uint4_int4_nchw64_imma.emplace_back( | ||||
| AlgoParam{256, 128, 128, 64, 64, 128}); | AlgoParam{256, 128, 128, 64, 64, 128}); | ||||
| } | } | ||||
| { | |||||
| using AlgoParam = AlgoInt4Int4NHWCIMMAImplicitGemm::AlgoParam; | |||||
| int4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 32, 64, 64, 32, 64, 32}); | |||||
| int4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 32, 64, 64, 32, 64, 16}); | |||||
| int4_int4_nhwc_imma.emplace_back(AlgoParam{128, 32, 64, 64, 32, 64, 8}); | |||||
| int4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 64, 64, 64, 64, 64, 32}); | |||||
| int4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 64, 64, 64, 64, 64, 16}); | |||||
| int4_int4_nhwc_imma.emplace_back(AlgoParam{128, 64, 64, 64, 64, 64, 8}); | |||||
| } | |||||
| { | |||||
| using AlgoParam = AlgoUInt4Int4NHWCIMMAImplicitGemm::AlgoParam; | |||||
| uint4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 32, 64, 64, 32, 64, 32}); | |||||
| uint4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 32, 64, 64, 32, 64, 16}); | |||||
| uint4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 32, 64, 64, 32, 64, 8}); | |||||
| uint4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 64, 64, 64, 64, 64, 32}); | |||||
| uint4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 64, 64, 64, 64, 64, 16}); | |||||
| uint4_int4_nhwc_imma.emplace_back( | |||||
| AlgoParam{128, 64, 64, 64, 64, 64, 8}); | |||||
| } | |||||
| #endif | #endif | ||||
| } | } | ||||
| #endif | #endif | ||||
| @@ -63,6 +63,8 @@ public: | |||||
| CUDA_IMPLICIT_GEMM_IMMA_NCHW32_INT8, | CUDA_IMPLICIT_GEMM_IMMA_NCHW32_INT8, | ||||
| CUDA_IMPLICIT_GEMM_IMMA_NCHW64_INT4_INT4, | CUDA_IMPLICIT_GEMM_IMMA_NCHW64_INT4_INT4, | ||||
| CUDA_IMPLICIT_GEMM_IMMA_NCHW64_UINT4_INT4, | CUDA_IMPLICIT_GEMM_IMMA_NCHW64_UINT4_INT4, | ||||
| CUDA_IMPLICIT_GEMM_IMMA_NHWC_INT4_INT4, | |||||
| CUDA_IMPLICIT_GEMM_IMMA_NHWC_UINT4_INT4, | |||||
| CUDA_BFLOAT16, | CUDA_BFLOAT16, | ||||
| CUDA_IMPLICIT_GEMM_SASS_NCHW4_DOTPROD_INT8, | CUDA_IMPLICIT_GEMM_SASS_NCHW4_DOTPROD_INT8, | ||||
| CUDA_IMPLICIT_GEMM_1X1_SASS_NCHW4_DOTPROD_INT8, | CUDA_IMPLICIT_GEMM_1X1_SASS_NCHW4_DOTPROD_INT8, | ||||
| @@ -879,6 +881,133 @@ public: | |||||
| MEGDNN_DECL_ALGO_TYPE(CUDA_IMPLICIT_GEMM_IMMA_NCHW64_UINT4_INT4) | MEGDNN_DECL_ALGO_TYPE(CUDA_IMPLICIT_GEMM_IMMA_NCHW64_UINT4_INT4) | ||||
| private: | |||||
| DTypeEnum src_dtype() const override { return DTypeEnum::Quantized4Asymm; } | |||||
| std::tuple<void*, void*> prepare_filter_bias( | |||||
| const ExecArgs& args) const override; | |||||
| std::tuple<float, float, float, float, float> get_constants( | |||||
| const ExecArgs& args) const override; | |||||
| void do_exec(const ExecArgs& args, void* filter_ptr, void* bias_ptr, | |||||
| void* z_ptr, convolution::ConvParam kern_param, | |||||
| uint32_t nonlinear_mode, float alpha, float beta, float gamma, | |||||
| float delta, float theta, cudaStream_t stream) const override; | |||||
| void update_bias(const ExecArgs& args, void* updated_bias, | |||||
| void* reduce_filter_ptr, void* reduce_workspace) const; | |||||
| }; | |||||
| class ConvBiasForwardImpl::AlgoInt4NHWCIMMAImplicitGemmBase : public AlgoBase { | |||||
| public: | |||||
| struct AlgoParam { | |||||
| int threadblock_m; | |||||
| int threadblock_n; | |||||
| int threadblock_k; | |||||
| int warp_m; | |||||
| int warp_n; | |||||
| int warp_k; | |||||
| int access_size; | |||||
| }; | |||||
| AlgoInt4NHWCIMMAImplicitGemmBase(AlgoParam algo_param) | |||||
| : m_algo_param(algo_param) {} | |||||
| AlgoAttribute attribute() const override { | |||||
| return AlgoAttribute::REPRODUCIBLE; | |||||
| } | |||||
| const char* name() const override { return m_name.c_str(); } | |||||
| std::string param() const override; | |||||
| bool is_available(const SizeArgs& args) const override; | |||||
| void exec(const ExecArgs& args) const override; | |||||
| std::string to_string(AlgoParam algo_param); | |||||
| protected: | |||||
| virtual DTypeEnum src_dtype() const = 0; | |||||
| // return filter_ptr, bias_ptr | |||||
| virtual std::tuple<void*, void*> prepare_filter_bias( | |||||
| const ExecArgs& args) const = 0; | |||||
| // return alpha, beta, gamma, delta, theta | |||||
| virtual std::tuple<float, float, float, float, float> get_constants( | |||||
| const ExecArgs& args) const = 0; | |||||
| virtual void do_exec(const ExecArgs& args, void* filter_ptr, void* bias_ptr, | |||||
| void* z_ptr, convolution::ConvParam kern_param, | |||||
| uint32_t nonlinear_mode, float alpha, float beta, | |||||
| float gamma, float delta, float theta, | |||||
| cudaStream_t stream) const = 0; | |||||
| void reorder_filter(const ExecArgs& args, int interleaved, | |||||
| void* reordered_filter) const; | |||||
| std::string m_name; | |||||
| AlgoParam m_algo_param; | |||||
| }; | |||||
| class ConvBiasForwardImpl::AlgoInt4Int4NHWCIMMAImplicitGemm final | |||||
| : public AlgoInt4NHWCIMMAImplicitGemmBase { | |||||
| public: | |||||
| using Base = AlgoInt4NHWCIMMAImplicitGemmBase; | |||||
| using AlgoParam = Base::AlgoParam; | |||||
| AlgoInt4Int4NHWCIMMAImplicitGemm(AlgoParam algo_param) : Base{algo_param} { | |||||
| m_name = ConvBias::algo_name<ConvBias::DirectParam>( | |||||
| ssprintf("INT4_INT4_NHWC_IMMA_IMPLICIT_GEMM_%s", | |||||
| to_string(m_algo_param).c_str()), | |||||
| ConvBias::DirectParam{}); | |||||
| } | |||||
| size_t get_workspace_in_bytes(const SizeArgs& args) const override; | |||||
| size_t get_preprocess_workspace_in_bytes( | |||||
| const SizeArgs& args) const override; | |||||
| SmallVector<TensorLayout> deduce_preprocessed_filter_layout( | |||||
| const SizeArgs& args) const override; | |||||
| void exec_preprocess(const ExecArgs& args) const override; | |||||
| MEGDNN_DECL_ALGO_TYPE(CUDA_IMPLICIT_GEMM_IMMA_NHWC_INT4_INT4) | |||||
| private: | |||||
| DTypeEnum src_dtype() const override { return DTypeEnum::QuantizedS4; } | |||||
| std::tuple<void*, void*> prepare_filter_bias( | |||||
| const ExecArgs& args) const override; | |||||
| std::tuple<float, float, float, float, float> get_constants( | |||||
| const ExecArgs& args) const override; | |||||
| void do_exec(const ExecArgs& args, void* filter_ptr, void* bias_ptr, | |||||
| void* z_ptr, convolution::ConvParam kern_param, | |||||
| uint32_t nonlinear_mode, float alpha, float beta, float gamma, | |||||
| float delta, float theta, cudaStream_t stream) const override; | |||||
| }; | |||||
| class ConvBiasForwardImpl::AlgoUInt4Int4NHWCIMMAImplicitGemm final | |||||
| : public AlgoInt4NHWCIMMAImplicitGemmBase { | |||||
| public: | |||||
| using Base = AlgoInt4NHWCIMMAImplicitGemmBase; | |||||
| using AlgoParam = Base::AlgoParam; | |||||
| AlgoUInt4Int4NHWCIMMAImplicitGemm(AlgoParam algo_param) : Base{algo_param} { | |||||
| m_name = ConvBias::algo_name<ConvBias::DirectParam>( | |||||
| ssprintf("UINT4_INT4_NHWC_IMMA_IMPLICIT_GEMM_%s", | |||||
| to_string(m_algo_param).c_str()), | |||||
| ConvBias::DirectParam{}); | |||||
| } | |||||
| size_t get_workspace_in_bytes(const SizeArgs& args) const override; | |||||
| size_t get_preprocess_workspace_in_bytes( | |||||
| const SizeArgs& args) const override; | |||||
| SmallVector<TensorLayout> deduce_preprocessed_filter_layout( | |||||
| const SizeArgs& args) const override; | |||||
| void exec_preprocess(const ExecArgs& args) const override; | |||||
| MEGDNN_DECL_ALGO_TYPE(CUDA_IMPLICIT_GEMM_IMMA_NHWC_UINT4_INT4) | |||||
| private: | private: | ||||
| DTypeEnum src_dtype() const override { return DTypeEnum::Quantized4Asymm; } | DTypeEnum src_dtype() const override { return DTypeEnum::Quantized4Asymm; } | ||||
| @@ -955,6 +1084,8 @@ public: | |||||
| std::vector<AlgoInt8NCHW32IMMAImplicitGemm> int8_nchw32_imma; | std::vector<AlgoInt8NCHW32IMMAImplicitGemm> int8_nchw32_imma; | ||||
| std::vector<AlgoInt4Int4NCHW64IMMAImplicitGemm> int4_int4_nchw64_imma; | std::vector<AlgoInt4Int4NCHW64IMMAImplicitGemm> int4_int4_nchw64_imma; | ||||
| std::vector<AlgoUInt4Int4NCHW64IMMAImplicitGemm> uint4_int4_nchw64_imma; | std::vector<AlgoUInt4Int4NCHW64IMMAImplicitGemm> uint4_int4_nchw64_imma; | ||||
| std::vector<AlgoInt4Int4NHWCIMMAImplicitGemm> int4_int4_nhwc_imma; | |||||
| std::vector<AlgoUInt4Int4NHWCIMMAImplicitGemm> uint4_int4_nhwc_imma; | |||||
| #endif | #endif | ||||
| std::vector<std::unique_ptr<AlgoGroupConvGeneral>> gconv_refhold; | std::vector<std::unique_ptr<AlgoGroupConvGeneral>> gconv_refhold; | ||||
| AlgoBFloat16 bfloat16; | AlgoBFloat16 bfloat16; | ||||
| @@ -321,7 +321,8 @@ void megdnn::cuda::cutlass_wrapper:: | |||||
| ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ | ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ | ||||
| cutlass::conv::threadblock:: \ | cutlass::conv::threadblock:: \ | ||||
| ConvolutionFpropNCxHWxThreadblockSwizzle, \ | ConvolutionFpropNCxHWxThreadblockSwizzle, \ | ||||
| stage_, 4, aligned_, NeedLoadFromConstMem>; \ | |||||
| stage_, 4, aligned_, NeedLoadFromConstMem, \ | |||||
| cutlass::arch::OpMultiplyAdd>; \ | |||||
| typename Convolution::ConvolutionParameter conv_param( \ | typename Convolution::ConvolutionParameter conv_param( \ | ||||
| param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ | param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ | ||||
| param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ | param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ | ||||
| @@ -582,7 +583,8 @@ void megdnn::cuda::cutlass_wrapper:: | |||||
| ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ | ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ | ||||
| cutlass::conv::threadblock:: \ | cutlass::conv::threadblock:: \ | ||||
| ConvolutionFpropNCxHWxThreadblockSwizzle, \ | ConvolutionFpropNCxHWxThreadblockSwizzle, \ | ||||
| stages_, 4, aligned_, NeedLoadFromConstMem>; \ | |||||
| stages_, 4, aligned_, NeedLoadFromConstMem, \ | |||||
| cutlass::arch::OpMultiplyAdd>; \ | |||||
| typename Convolution::ConvolutionParameter conv_param( \ | typename Convolution::ConvolutionParameter conv_param( \ | ||||
| param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ | param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ | ||||
| param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ | param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ | ||||
| @@ -1037,4 +1039,262 @@ INST(true); | |||||
| INST(false); | INST(false); | ||||
| #undef INST | #undef INST | ||||
| /* ====== cutlass kernel wrapper for int4 x int4 nchw64 layout ====== */ | |||||
| #if MEGDNN_TEGRA_X1 | |||||
| template <bool NeedLoadFromConstMem> | |||||
| void megdnn::cuda::cutlass_wrapper:: | |||||
| do_conv_bias_int4_int4_implicit_gemm_imma_nhwc( | |||||
| const int8_t* /* d_src */, const int8_t* /* d_filter */, | |||||
| const int32_t* /* d_bias */, const int8_t* /* d_z */, | |||||
| int8_t* /* d_dst */, int* /* workspace */, | |||||
| const convolution::ConvParam& /* param */, | |||||
| uint32_t /* nonlinear_mode */, float /* alpha */, | |||||
| float /* beta */, float /* gamma */, float /* scale */, | |||||
| const GemmCoord& /* threadblock_shape */, | |||||
| const GemmCoord& /* warp_shape */, | |||||
| const int32_t /* access_size */, cudaStream_t /* stream */) {} | |||||
| #else | |||||
| template <bool NeedLoadFromConstMem> | |||||
| void megdnn::cuda::cutlass_wrapper:: | |||||
| do_conv_bias_int4_int4_implicit_gemm_imma_nhwc( | |||||
| const int8_t* d_src, const int8_t* d_filter, | |||||
| const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, | |||||
| int* workspace, const convolution::ConvParam& param, | |||||
| uint32_t nonlinear_mode, float alpha, float beta, float gamma, | |||||
| float scale, const GemmCoord& threadblock_shape, | |||||
| const GemmCoord& warp_shape, const int32_t access_size, | |||||
| cudaStream_t stream) { | |||||
| #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ | |||||
| threadblock_k_, warp_m_, warp_n_, \ | |||||
| warp_k_, access_size_) \ | |||||
| if (threadblock_shape.m() == threadblock_m_ && \ | |||||
| threadblock_shape.n() == threadblock_n_ && \ | |||||
| threadblock_shape.k() == threadblock_k_ && \ | |||||
| warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ | |||||
| warp_shape.k() == warp_k_ && access_size == access_size_) { \ | |||||
| using ThreadBlockShape = \ | |||||
| cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ | |||||
| threadblock_k_>; \ | |||||
| using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ | |||||
| using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; \ | |||||
| using Convolution = cutlass::conv::device::Convolution< \ | |||||
| cutlass::int4b_t, cutlass::layout::TensorNHWC, \ | |||||
| cutlass::int4b_t, cutlass::layout::TensorNCxHWx<access_size_>, \ | |||||
| ElementOutput, cutlass::layout::TensorNHWC, int32_t, \ | |||||
| cutlass::layout::TensorNHWC, int32_t, \ | |||||
| cutlass::conv::ConvType::kConvolution, \ | |||||
| cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, \ | |||||
| ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ | |||||
| cutlass::conv::threadblock:: \ | |||||
| ConvolutionFpropNHWCThreadblockSwizzle, \ | |||||
| 2, access_size_, access_size_, NeedLoadFromConstMem, \ | |||||
| cutlass::arch::OpMultiplyAddSaturate, \ | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; \ | |||||
| typename Convolution::ConvolutionParameter conv_param( \ | |||||
| param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ | |||||
| param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ | |||||
| param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ | |||||
| return cutlass_convolution_wrapper<Convolution>( \ | |||||
| reinterpret_cast<const cutlass::int4b_t*>(d_src), \ | |||||
| reinterpret_cast<const cutlass::int4b_t*>(d_filter), d_bias, \ | |||||
| reinterpret_cast<const cutlass::int4b_t*>(d_z), \ | |||||
| reinterpret_cast<cutlass::int4b_t*>(d_dst), workspace, \ | |||||
| conv_param, epilogue, stream); \ | |||||
| } | |||||
| #define DISPATCH_KERNEL \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 64, 64, 32, 64, 32); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 64, 64, 32, 64, 16); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 64, 64, 32, 64, 8); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 64, 64, 32); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 64, 64, 16); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 64, 64, 8); \ | |||||
| megdnn_assert(false, \ | |||||
| "unsupported threadblock shape (%dx%dx%d) and warp shape " \ | |||||
| "(%dx%dx%d) and access_size (%d)", \ | |||||
| threadblock_shape.m(), threadblock_shape.n(), \ | |||||
| threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ | |||||
| warp_shape.k(), access_size); | |||||
| using ElementOutput = cutlass::int4b_t; | |||||
| using ElementAccumulator = int32_t; | |||||
| using ElementBias = int32_t; | |||||
| using ElementCompute = float; | |||||
| using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; | |||||
| switch (nonlinear_mode) { | |||||
| case NonlineMode::IDENTITY: { | |||||
| using EpilogueOp = | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| ElementOutput, 8, ElementAccumulator, ElementBias, | |||||
| ElementCompute>; | |||||
| typename EpilogueOp::Params epilogue{alpha, beta, gamma}; | |||||
| DISPATCH_KERNEL; | |||||
| } | |||||
| case NonlineMode::RELU: { | |||||
| using EpilogueOp = cutlass::epilogue::thread:: | |||||
| BiasAddLinearCombinationReluClamp< | |||||
| ElementOutput, 8, ElementAccumulator, ElementBias, | |||||
| ElementCompute>; | |||||
| typename EpilogueOp::Params epilogue{alpha, beta, gamma, 0}; | |||||
| DISPATCH_KERNEL; | |||||
| } | |||||
| case NonlineMode::H_SWISH: { | |||||
| using EpilogueOp = cutlass::epilogue::thread:: | |||||
| BiasAddLinearCombinationHSwishClamp< | |||||
| ElementOutput, 8, ElementAccumulator, ElementBias, | |||||
| ElementCompute>; | |||||
| typename EpilogueOp::Params epilogue{alpha, beta, gamma, scale}; | |||||
| DISPATCH_KERNEL; | |||||
| } | |||||
| default: | |||||
| megdnn_assert(false, | |||||
| "unsupported nonlinear mode for conv bias operator"); | |||||
| } | |||||
| #undef DISPATCH_KERNEL_WITH_TILE_SHAPE | |||||
| #undef DISPATCH_KERNEL | |||||
| } | |||||
| #endif | |||||
| #define INST(need_load_from_const_mem) \ | |||||
| template void megdnn::cuda::cutlass_wrapper:: \ | |||||
| do_conv_bias_int4_int4_implicit_gemm_imma_nhwc< \ | |||||
| need_load_from_const_mem>( \ | |||||
| const int8_t* d_src, const int8_t* d_filter, \ | |||||
| const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, \ | |||||
| int* workspace, const convolution::ConvParam& param, \ | |||||
| uint32_t nonlinear_mode, float alpha, float beta, \ | |||||
| float gamma, float scale, \ | |||||
| const GemmCoord& threadblock_shape, \ | |||||
| const GemmCoord& warp_shape, const int32_t access_size, \ | |||||
| cudaStream_t stream); | |||||
| INST(true); | |||||
| INST(false); | |||||
| #undef INST | |||||
| #if MEGDNN_TEGRA_X1 | |||||
| template <bool NeedLoadFromConstMem> | |||||
| void megdnn::cuda::cutlass_wrapper:: | |||||
| do_conv_bias_uint4_int4_implicit_gemm_imma_nhwc( | |||||
| const uint8_t* /* d_src */, const int8_t* /* d_filter */, | |||||
| const int32_t* /* d_bias */, const uint8_t* /* d_z */, | |||||
| uint8_t* /* d_dst */, int* /* workspace */, | |||||
| const convolution::ConvParam& /* param */, | |||||
| uint32_t /* nonlinear_mode */, float /* alpha */, | |||||
| float /* beta */, float /* gamma */, float /* delta */, | |||||
| float /* theta */, float /* scale */, | |||||
| uint8_t /* src_zero_point */, | |||||
| const GemmCoord& /* threadblock_shape */, | |||||
| const GemmCoord& /* warp_shape */, | |||||
| const int32_t /* access_size */, cudaStream_t /* stream */) {} | |||||
| #else | |||||
| template <bool NeedLoadFromConstMem> | |||||
| void megdnn::cuda::cutlass_wrapper:: | |||||
| do_conv_bias_uint4_int4_implicit_gemm_imma_nhwc( | |||||
| const uint8_t* d_src, const int8_t* d_filter, | |||||
| const int32_t* d_bias, const uint8_t* d_z, uint8_t* d_dst, | |||||
| int* workspace, const convolution::ConvParam& param, | |||||
| uint32_t nonlinear_mode, float alpha, float beta, float gamma, | |||||
| float delta, float theta, float /* scale */, | |||||
| uint8_t src_zero_point, const GemmCoord& threadblock_shape, | |||||
| const GemmCoord& warp_shape, const int32_t access_size, | |||||
| cudaStream_t stream) { | |||||
| #define DISPATCH_KERNEL_WITH_TILE_SHAPE(threadblock_m_, threadblock_n_, \ | |||||
| threadblock_k_, warp_m_, warp_n_, \ | |||||
| warp_k_, access_size_) \ | |||||
| if (threadblock_shape.m() == threadblock_m_ && \ | |||||
| threadblock_shape.n() == threadblock_n_ && \ | |||||
| threadblock_shape.k() == threadblock_k_ && \ | |||||
| warp_shape.m() == warp_m_ && warp_shape.n() == warp_n_ && \ | |||||
| warp_shape.k() == warp_k_ && access_size == access_size_) { \ | |||||
| using ThreadBlockShape = \ | |||||
| cutlass::gemm::GemmShape<threadblock_m_, threadblock_n_, \ | |||||
| threadblock_k_>; \ | |||||
| using WarpShape = cutlass::gemm::GemmShape<warp_m_, warp_n_, warp_k_>; \ | |||||
| using InstructionShape = cutlass::gemm::GemmShape<8, 8, 32>; \ | |||||
| using Convolution = cutlass::conv::device::Convolution< \ | |||||
| cutlass::uint4b_t, cutlass::layout::TensorNHWC, \ | |||||
| cutlass::int4b_t, cutlass::layout::TensorNCxHWx<access_size_>, \ | |||||
| ElementOutput, cutlass::layout::TensorNHWC, int32_t, \ | |||||
| cutlass::layout::TensorNHWC, int32_t, \ | |||||
| cutlass::conv::ConvType::kConvolution, \ | |||||
| cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, \ | |||||
| ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, \ | |||||
| cutlass::conv::threadblock:: \ | |||||
| ConvolutionFpropNHWCThreadblockSwizzle, \ | |||||
| 2, access_size_, access_size_, NeedLoadFromConstMem, \ | |||||
| cutlass::arch::OpMultiplyAddSaturate, \ | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; \ | |||||
| typename Convolution::ConvolutionParameter conv_param( \ | |||||
| param.n, param.hi, param.wi, param.ci, param.co, param.fh, \ | |||||
| param.fw, param.ho, param.wo, param.ph, param.pw, param.sh, \ | |||||
| param.sw, 1, 1, cutlass::conv::Mode::kCrossCorrelation); \ | |||||
| return cutlass_convolution_wrapper<Convolution>( \ | |||||
| reinterpret_cast<const cutlass::uint4b_t*>(d_src), \ | |||||
| reinterpret_cast<const cutlass::int4b_t*>(d_filter), d_bias, \ | |||||
| reinterpret_cast<const cutlass::uint4b_t*>(d_z), \ | |||||
| reinterpret_cast<cutlass::uint4b_t*>(d_dst), workspace, \ | |||||
| conv_param, epilogue, stream, {src_zero_point}); \ | |||||
| } | |||||
| #define DISPATCH_KERNEL \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 64, 64, 32, 64, 32); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 64, 64, 32, 64, 16); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 32, 64, 64, 32, 64, 8); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 64, 64, 32); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 64, 64, 16); \ | |||||
| DISPATCH_KERNEL_WITH_TILE_SHAPE(128, 64, 64, 64, 64, 64, 8); \ | |||||
| megdnn_assert(false, \ | |||||
| "unsupported threadblock shape (%dx%dx%d) and warp shape " \ | |||||
| "(%dx%dx%d) and access_size (%d)", \ | |||||
| threadblock_shape.m(), threadblock_shape.n(), \ | |||||
| threadblock_shape.k(), warp_shape.m(), warp_shape.n(), \ | |||||
| warp_shape.k(), access_size); | |||||
| using ElementOutput = cutlass::uint4b_t; | |||||
| using ElementAccumulator = int32_t; | |||||
| using ElementBias = int32_t; | |||||
| using ElementCompute = float; | |||||
| using NonlineMode = megdnn::param_enumv::ConvBias::NonlineMode; | |||||
| switch (nonlinear_mode) { | |||||
| case NonlineMode::IDENTITY: { | |||||
| using EpilogueOp = | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| ElementOutput, 8, ElementAccumulator, ElementBias, | |||||
| ElementCompute>; | |||||
| typename EpilogueOp::Params epilogue{alpha, beta, gamma, | |||||
| delta + theta}; | |||||
| DISPATCH_KERNEL; | |||||
| } | |||||
| case NonlineMode::RELU: { | |||||
| using EpilogueOp = cutlass::epilogue::thread:: | |||||
| BiasAddLinearCombinationReluClamp< | |||||
| ElementOutput, 8, ElementAccumulator, ElementBias, | |||||
| ElementCompute>; | |||||
| typename EpilogueOp::Params epilogue{alpha, beta, gamma, | |||||
| 0, delta, theta}; | |||||
| DISPATCH_KERNEL; | |||||
| } | |||||
| default: | |||||
| megdnn_assert(false, | |||||
| "unsupported nonlinear mode for conv bias operator"); | |||||
| } | |||||
| #undef DISPATCH_KERNEL_WITH_TILE_SHAPE | |||||
| #undef DISPATCH_KERNEL | |||||
| } | |||||
| #endif | |||||
| #define INST(need_load_from_const_mem) \ | |||||
| template void megdnn::cuda::cutlass_wrapper:: \ | |||||
| do_conv_bias_uint4_int4_implicit_gemm_imma_nhwc< \ | |||||
| need_load_from_const_mem>( \ | |||||
| const uint8_t* d_src, const int8_t* d_filter, \ | |||||
| const int32_t* d_bias, const uint8_t* d_z, uint8_t* d_dst, \ | |||||
| int* workspace, const convolution::ConvParam& param, \ | |||||
| uint32_t nonlinear_mode, float alpha, float beta, \ | |||||
| float gamma, float delta, float theta, float scale, \ | |||||
| uint8_t src_zero_point, \ | |||||
| const GemmCoord& threadblock_shape, \ | |||||
| const GemmCoord& warp_shape, const int32_t access_size, \ | |||||
| cudaStream_t stream); | |||||
| INST(true); | |||||
| INST(false); | |||||
| #undef INST | |||||
| // vim: syntax=cuda.doxygen | // vim: syntax=cuda.doxygen | ||||
| @@ -103,6 +103,25 @@ void do_conv_bias_int8_implicit_gemm_dp4a_ncdiv4hw4_nhwc( | |||||
| float scale, const GemmCoord& threadblock_shape, | float scale, const GemmCoord& threadblock_shape, | ||||
| const GemmCoord& warp_shape, int stages, cudaStream_t stream); | const GemmCoord& warp_shape, int stages, cudaStream_t stream); | ||||
| template <bool NeedLoadFromConstMem> | |||||
| void do_conv_bias_int4_int4_implicit_gemm_imma_nhwc( | |||||
| const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, | |||||
| const int8_t* d_z, int8_t* d_dst, int* workspace, | |||||
| const convolution::ConvParam& param, uint32_t nonlinear_mode, | |||||
| float alpha, float beta, float gamma, float scale, | |||||
| const GemmCoord& threadblock_shape, const GemmCoord& warp_shape, | |||||
| const int32_t access_size, cudaStream_t stream); | |||||
| template <bool NeedLoadFromConstMem> | |||||
| void do_conv_bias_uint4_int4_implicit_gemm_imma_nhwc( | |||||
| const uint8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, | |||||
| const uint8_t* d_z, uint8_t* d_dst, int* workspace, | |||||
| const convolution::ConvParam& param, uint32_t nonlinear_mode, | |||||
| float alpha, float beta, float gamma, float delta, float theta, | |||||
| float scale, uint8_t src_zero_point, const GemmCoord& threadblock_shape, | |||||
| const GemmCoord& warp_shape, const int32_t access_size, | |||||
| cudaStream_t stream); | |||||
| } // namespace cutlass_wrapper | } // namespace cutlass_wrapper | ||||
| } // namespace cuda | } // namespace cuda | ||||
| } // namespace megdnn | } // namespace megdnn | ||||
| @@ -0,0 +1,122 @@ | |||||
| /** | |||||
| * \file dnn/src/cuda/conv_bias/implicit_gemm_int4_int4_nhwc_imma.cpp | |||||
| * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") | |||||
| * | |||||
| * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, | |||||
| * software distributed under the License is distributed on an | |||||
| * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||||
| * implied. | |||||
| */ | |||||
| #include "./algo.h" | |||||
| #include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh" | |||||
| using namespace megdnn; | |||||
| using namespace cuda; | |||||
| using namespace convolution; | |||||
| #if CUDA_VERSION >= 10020 | |||||
| size_t | |||||
| ConvBiasForwardImpl::AlgoInt4Int4NHWCIMMAImplicitGemm::get_workspace_in_bytes( | |||||
| const SizeArgs& args) const { | |||||
| if (args.preprocessed_filter) { | |||||
| return 0; | |||||
| } else { | |||||
| return args.filter_layout->span().dist_byte(); | |||||
| } | |||||
| } | |||||
| size_t ConvBiasForwardImpl::AlgoInt4Int4NHWCIMMAImplicitGemm:: | |||||
| get_preprocess_workspace_in_bytes(const SizeArgs& args) const { | |||||
| return 0; | |||||
| } | |||||
| SmallVector<TensorLayout> ConvBiasForwardImpl:: | |||||
| AlgoInt4Int4NHWCIMMAImplicitGemm::deduce_preprocessed_filter_layout( | |||||
| const SizeArgs& args) const { | |||||
| return {args.filter_layout->collapse_contiguous()}; | |||||
| } | |||||
| void ConvBiasForwardImpl::AlgoInt4Int4NHWCIMMAImplicitGemm::exec_preprocess( | |||||
| const ExecArgs& args) const { | |||||
| megdnn_assert(args.preprocessed_filter->tensors.size() == 1); | |||||
| void* filter_ptr = args.preprocessed_filter->tensors[0].raw_ptr; | |||||
| reorder_filter(args, m_algo_param.access_size, filter_ptr); | |||||
| } | |||||
| std::tuple<void*, void*> | |||||
| ConvBiasForwardImpl::AlgoInt4Int4NHWCIMMAImplicitGemm::prepare_filter_bias( | |||||
| const ExecArgs& args) const { | |||||
| void* filter_ptr = nullptr; | |||||
| if (args.preprocessed_filter) { | |||||
| megdnn_assert(args.preprocessed_filter->tensors.size() == 1); | |||||
| filter_ptr = args.preprocessed_filter->tensors[0].raw_ptr; | |||||
| } else { | |||||
| filter_ptr = reinterpret_cast<void*>(args.workspace.raw_ptr); | |||||
| reorder_filter(args, m_algo_param.access_size, filter_ptr); | |||||
| } | |||||
| void* bias_ptr = args.bias_tensor->raw_ptr; | |||||
| return {filter_ptr, bias_ptr}; | |||||
| } | |||||
| std::tuple<float, float, float, float, float> | |||||
| ConvBiasForwardImpl::AlgoInt4Int4NHWCIMMAImplicitGemm::get_constants( | |||||
| const ExecArgs& args) const { | |||||
| float src_scale = args.src_layout->dtype.param<dtype::QuantizedS4>().scale, | |||||
| filter_scale = | |||||
| args.filter_layout->dtype.param<dtype::QuantizedS4>().scale, | |||||
| bias_scale = | |||||
| args.bias_layout->dtype.param<dtype::QuantizedS32>().scale, | |||||
| dst_scale = args.dst_layout->dtype.param<dtype::QuantizedS4>().scale; | |||||
| float alpha = src_scale * filter_scale / dst_scale, | |||||
| beta = bias_scale / dst_scale, gamma = 0.f, delta = 0.f, theta = 0.f; | |||||
| if (args.z_layout->ndim > 0) { | |||||
| float z_scale = args.z_layout->dtype.param<dtype::QuantizedS4>().scale; | |||||
| gamma = z_scale / dst_scale; | |||||
| } | |||||
| return {alpha, beta, gamma, delta, theta}; | |||||
| } | |||||
| void ConvBiasForwardImpl::AlgoInt4Int4NHWCIMMAImplicitGemm::do_exec( | |||||
| const ExecArgs& args, void* filter_ptr, void* bias_ptr, void* z_ptr, | |||||
| ConvParam kern_param, uint32_t nonlinear_mode, float alpha, float beta, | |||||
| float gamma, float delta, float theta, cudaStream_t stream) const { | |||||
| float dst_scale = args.dst_layout->dtype.param<dtype::QuantizedS4>().scale; | |||||
| cutlass_wrapper::GemmCoord threadblock_shape{m_algo_param.threadblock_m, | |||||
| m_algo_param.threadblock_n, | |||||
| m_algo_param.threadblock_k}; | |||||
| cutlass_wrapper::GemmCoord warp_shape{ | |||||
| m_algo_param.warp_m, m_algo_param.warp_n, m_algo_param.warp_k}; | |||||
| if (kern_param.fh == 1 && kern_param.fw == 1) { | |||||
| cutlass_wrapper::do_conv_bias_int4_int4_implicit_gemm_imma_nhwc<false>( | |||||
| reinterpret_cast<int8_t*>(args.src_tensor->raw_ptr), | |||||
| reinterpret_cast<int8_t*>(filter_ptr), | |||||
| reinterpret_cast<int32_t*>(bias_ptr), | |||||
| reinterpret_cast<int8_t*>(z_ptr), | |||||
| reinterpret_cast<int8_t*>(args.dst_tensor->raw_ptr), nullptr, | |||||
| kern_param, nonlinear_mode, alpha, beta, gamma, dst_scale, | |||||
| threadblock_shape, warp_shape, m_algo_param.access_size, | |||||
| stream); | |||||
| } else { | |||||
| cutlass_wrapper::do_conv_bias_int4_int4_implicit_gemm_imma_nhwc<true>( | |||||
| reinterpret_cast<int8_t*>(args.src_tensor->raw_ptr), | |||||
| reinterpret_cast<int8_t*>(filter_ptr), | |||||
| reinterpret_cast<int32_t*>(bias_ptr), | |||||
| reinterpret_cast<int8_t*>(z_ptr), | |||||
| reinterpret_cast<int8_t*>(args.dst_tensor->raw_ptr), nullptr, | |||||
| kern_param, nonlinear_mode, alpha, beta, gamma, dst_scale, | |||||
| threadblock_shape, warp_shape, m_algo_param.access_size, | |||||
| stream); | |||||
| } | |||||
| } | |||||
| #endif | |||||
| // vim: syntax=cpp.doxygen | |||||
| @@ -60,9 +60,22 @@ bool ConvBiasForwardImpl::AlgoInt4NCHW64IMMAImplicitGemmBase::is_available( | |||||
| args.dst_layout->dtype.enumv() != src_dtype()) | args.dst_layout->dtype.enumv() != src_dtype()) | ||||
| return false; | return false; | ||||
| // uint4 do not support H_SWISH activition | |||||
| if (src_dtype() == DTypeEnum::Quantized4Asymm && | |||||
| param.nonlineMode == NonlineMode::H_SWISH) | |||||
| return false; | |||||
| if (!is_compute_capability_required(7, 5)) | if (!is_compute_capability_required(7, 5)) | ||||
| return false; | return false; | ||||
| size_t fh = args.filter_layout->operator[](1), | |||||
| fw = args.filter_layout->operator[](2); | |||||
| // param buffer size is 4K, use 3.4K to store precomputed offset | |||||
| size_t kMaxFilterPixels = 848 / (2 * m_algo_param.warp_k / 64) - 2; | |||||
| if (fh * fw > kMaxFilterPixels) | |||||
| return false; | |||||
| return true; | return true; | ||||
| } | } | ||||
| @@ -108,7 +121,7 @@ void ConvBiasForwardImpl::AlgoInt4NCHW64IMMAImplicitGemmBase::exec( | |||||
| std::string ConvBiasForwardImpl::AlgoInt4NCHW64IMMAImplicitGemmBase::to_string( | std::string ConvBiasForwardImpl::AlgoInt4NCHW64IMMAImplicitGemmBase::to_string( | ||||
| AlgoParam algo_param) { | AlgoParam algo_param) { | ||||
| return ssprintf("%uX%uX%u_%uX%uX%u", algo_param.threadblock_m, | |||||
| return ssprintf("%dX%dX%d_%dX%dX%d", algo_param.threadblock_m, | |||||
| algo_param.threadblock_n, algo_param.threadblock_k, | algo_param.threadblock_n, algo_param.threadblock_k, | ||||
| algo_param.warp_m, algo_param.warp_n, algo_param.warp_k); | algo_param.warp_m, algo_param.warp_n, algo_param.warp_k); | ||||
| } | } | ||||
| @@ -0,0 +1,159 @@ | |||||
| /** | |||||
| * \file dnn/src/cuda/conv_bias/implicit_gemm_int4_nhwc_imma_base.cpp | |||||
| * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") | |||||
| * | |||||
| * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, | |||||
| * software distributed under the License is distributed on an | |||||
| * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||||
| * implied. | |||||
| */ | |||||
| #include "./algo.h" | |||||
| #include "src/common/conv_bias.h" | |||||
| #include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh" | |||||
| #include "src/cuda/conv_bias/reduce_filter.cuh" | |||||
| #include "src/cuda/convolution_helper/parameter.cuh" | |||||
| #include "src/cuda/utils.h" | |||||
| using namespace megdnn; | |||||
| using namespace cuda; | |||||
| using namespace convolution; | |||||
| #if CUDA_VERSION >= 10020 | |||||
| std::string ConvBiasForwardImpl::AlgoInt4NHWCIMMAImplicitGemmBase::param() | |||||
| const { | |||||
| std::string ret; | |||||
| serialize_write_pod(m_algo_param, ret); | |||||
| return ret; | |||||
| } | |||||
| bool ConvBiasForwardImpl::AlgoInt4NHWCIMMAImplicitGemmBase::is_available( | |||||
| const SizeArgs& args) const { | |||||
| if (args.bias_layout->ndim <= 0) | |||||
| return false; | |||||
| using Param = param::ConvBias; | |||||
| using Format = Param::Format; | |||||
| using Sparse = Param::Sparse; | |||||
| using Mode = Param::Mode; | |||||
| using NonlineMode = megdnn::param::ConvBias::NonlineMode; | |||||
| auto&& param = args.opr->param(); | |||||
| if (!check_bias_share_in_channel(*(args.bias_layout), param.format)) | |||||
| return false; | |||||
| if (param.format != Format::NHWC || param.sparse != Sparse::DENSE || | |||||
| param.mode != Mode::CROSS_CORRELATION) | |||||
| return false; | |||||
| if (param.nonlineMode != NonlineMode::IDENTITY && | |||||
| param.nonlineMode != NonlineMode::RELU && | |||||
| param.nonlineMode != NonlineMode::H_SWISH) | |||||
| return false; | |||||
| if (args.src_layout->dtype.enumv() != src_dtype() || | |||||
| args.filter_layout->dtype.enumv() != DTypeEnum::QuantizedS4 || | |||||
| args.bias_layout->dtype.enumv() != DTypeEnum::QuantizedS32 || | |||||
| args.dst_layout->dtype.enumv() != src_dtype()) | |||||
| return false; | |||||
| // uint4 do not support H_SWISH activition | |||||
| if (src_dtype() == DTypeEnum::Quantized4Asymm && | |||||
| param.nonlineMode == NonlineMode::H_SWISH) | |||||
| return false; | |||||
| if (!is_compute_capability_required(7, 5)) | |||||
| return false; | |||||
| size_t co = args.filter_layout->operator[](0), | |||||
| ci = args.filter_layout->operator[](3), | |||||
| fh = args.filter_layout->operator[](1), | |||||
| fw = args.filter_layout->operator[](2); | |||||
| // param buffer size is 4K, use 3.4K to store precomputed offset | |||||
| size_t kMaxFilterPixels = | |||||
| 848 / (m_algo_param.warp_k / m_algo_param.access_size) - 1; | |||||
| if (fh * fw > kMaxFilterPixels) | |||||
| return false; | |||||
| // co should be aligned with 8, and ci should be aligned with | |||||
| // algo_param.access_size | |||||
| if ((co % 8 != 0) || (ci % m_algo_param.access_size != 0)) | |||||
| return false; | |||||
| return true; | |||||
| } | |||||
| void ConvBiasForwardImpl::AlgoInt4NHWCIMMAImplicitGemmBase::exec( | |||||
| const ExecArgs& args) const { | |||||
| auto&& param = args.opr->param(); | |||||
| auto&& fm = args.filter_meta; | |||||
| size_t n = args.src_layout->operator[](0), | |||||
| ci = args.src_layout->operator[](3), | |||||
| hi = args.src_layout->operator[](1), | |||||
| wi = args.src_layout->operator[](2); | |||||
| size_t co = args.dst_layout->operator[](3), | |||||
| ho = args.dst_layout->operator[](1), | |||||
| wo = args.dst_layout->operator[](2); | |||||
| UNPACK_CONV_PARAMETER(fm, param); | |||||
| MARK_USED_VAR | |||||
| void* filter_ptr = nullptr; | |||||
| void* bias_ptr = nullptr; | |||||
| void* z_ptr = nullptr; | |||||
| std::tie(filter_ptr, bias_ptr) = prepare_filter_bias(args); | |||||
| if (args.z_layout->ndim > 0) | |||||
| z_ptr = args.z_tensor->raw_ptr; | |||||
| float alpha, beta, gamma, delta, theta; | |||||
| std::tie(alpha, beta, gamma, delta, theta) = get_constants(args); | |||||
| ConvParam kern_param; | |||||
| kern_param.n = n, kern_param.co = co, kern_param.ci = ci, | |||||
| kern_param.hi = hi, kern_param.wi = wi, kern_param.ho = ho, | |||||
| kern_param.wo = wo, kern_param.ph = ph, kern_param.pw = pw, | |||||
| kern_param.sh = sh, kern_param.sw = sw, kern_param.fh = fh, | |||||
| kern_param.fw = fw; | |||||
| uint32_t nonlinear_mode = static_cast<uint32_t>(param.nonlineMode); | |||||
| cudaStream_t stream = cuda_stream(args.opr->handle()); | |||||
| do_exec(args, filter_ptr, bias_ptr, z_ptr, kern_param, nonlinear_mode, | |||||
| alpha, beta, gamma, delta, theta, stream); | |||||
| } | |||||
| std::string ConvBiasForwardImpl::AlgoInt4NHWCIMMAImplicitGemmBase::to_string( | |||||
| AlgoParam algo_param) { | |||||
| return ssprintf("%dX%dX%d_%dX%dX%d_%d", algo_param.threadblock_m, | |||||
| algo_param.threadblock_n, algo_param.threadblock_k, | |||||
| algo_param.warp_m, algo_param.warp_n, algo_param.warp_k, | |||||
| algo_param.access_size); | |||||
| } | |||||
| void ConvBiasForwardImpl::AlgoInt4NHWCIMMAImplicitGemmBase::reorder_filter( | |||||
| const ExecArgs& args, const int iterleaved, | |||||
| void* reordered_filter) const { | |||||
| size_t co = args.filter_layout->operator[](0), | |||||
| ci = args.filter_layout->operator[](3), | |||||
| fh = args.filter_layout->operator[](1), | |||||
| fw = args.filter_layout->operator[](2); | |||||
| // reformat grad from nhwc to ncxhwx | |||||
| TensorLayout exec_src{{co, fh, fw, ci / iterleaved, (size_t)iterleaved / 2}, | |||||
| dtype::Int8()}; | |||||
| TensorLayout exec_dst{{co, ci / iterleaved, fh, fw, (size_t)iterleaved / 2}, | |||||
| dtype::Int8()}; | |||||
| exec_src = exec_src.dimshuffle({0, 3, 1, 2, 4}); | |||||
| auto&& relayout = args.opr->handle()->create_operator<RelayoutForward>(); | |||||
| relayout->exec({args.filter_tensor->raw_ptr, exec_src}, | |||||
| {reordered_filter, exec_dst}); | |||||
| } | |||||
| #endif | |||||
| // vim: syntax=cpp.doxygen | |||||
| @@ -75,8 +75,9 @@ bool ConvBiasForwardImpl::AlgoInt8NCHW32IMMAImplicitGemm::is_available( | |||||
| // only support sm_75 or later, platform should have tensorcore int8 | // only support sm_75 or later, platform should have tensorcore int8 | ||||
| // support | // support | ||||
| available &= is_compute_capability_required(7, 5); | available &= is_compute_capability_required(7, 5); | ||||
| // FIXME: too large filter size is not supported now | |||||
| available &= fh * fw <= 49; | |||||
| // FIXME: too large filter size is not supported now | |||||
| size_t kMaxFilterPixels = 848 / (2 * m_algo_param.warp_k / 32) - 2; | |||||
| available &= fh * fw <= kMaxFilterPixels; | |||||
| return available; | return available; | ||||
| } | } | ||||
| @@ -95,8 +95,10 @@ bool ConvBiasForwardImpl::AlgoInt8NCHW4DotProdImplicitGemm::is_available( | |||||
| // only support sm_61 or later, platform should have fast native int8 | // only support sm_61 or later, platform should have fast native int8 | ||||
| // support | // support | ||||
| available &= is_compute_capability_required(6, 1); | available &= is_compute_capability_required(6, 1); | ||||
| // FIXME: too large filter size is not supported now | |||||
| available &= fh * fw <= 49; | |||||
| // FIXME: too large filter size is not supported now | |||||
| size_t kMaxFilterPixels = 848 / (2 * m_algo_param.warp_k / 4) - 2; | |||||
| available &= fh * fw <= kMaxFilterPixels; | |||||
| ; | |||||
| return available; | return available; | ||||
| } | } | ||||
| @@ -0,0 +1,186 @@ | |||||
| /** | |||||
| * \file dnn/src/cuda/conv_bias/implicit_gemm_uint4_int4_nhwc_imma.cpp | |||||
| * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") | |||||
| * | |||||
| * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. | |||||
| * | |||||
| * Unless required by applicable law or agreed to in writing, | |||||
| * software distributed under the License is distributed on an | |||||
| * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or | |||||
| * implied. | |||||
| */ | |||||
| #include "./algo.h" | |||||
| #include "src/cuda/conv_bias/cutlass_convolution_wrapper.cuh" | |||||
| #include "src/cuda/conv_bias/reduce_filter.cuh" | |||||
| #include "src/cuda/utils.h" | |||||
| using namespace megdnn; | |||||
| using namespace cuda; | |||||
| using namespace convolution; | |||||
| #if CUDA_VERSION >= 10020 | |||||
| size_t | |||||
| ConvBiasForwardImpl::AlgoUInt4Int4NHWCIMMAImplicitGemm::get_workspace_in_bytes( | |||||
| const SizeArgs& args) const { | |||||
| if (args.preprocessed_filter) { | |||||
| return 0; | |||||
| } else { | |||||
| size_t ws_filter = args.filter_layout->span().dist_byte(), | |||||
| ws_bias = args.bias_layout->span().dist_byte(), | |||||
| ws_reduce_filter = get_preprocess_workspace_in_bytes(args); | |||||
| return ws_filter + ws_bias + ws_reduce_filter; | |||||
| } | |||||
| } | |||||
| size_t ConvBiasForwardImpl::AlgoUInt4Int4NHWCIMMAImplicitGemm:: | |||||
| get_preprocess_workspace_in_bytes(const SizeArgs& args) const { | |||||
| size_t co = args.filter_layout->operator[](0), | |||||
| ci = args.filter_layout->operator[](3), | |||||
| fh = args.filter_layout->operator[](1), | |||||
| fw = args.filter_layout->operator[](2); | |||||
| size_t ws_size_reduce_filter = co * sizeof(int32_t); | |||||
| size_t A = co, B = ci * fh * fw / 8, C = 1; | |||||
| ws_size_reduce_filter += do_dispatch_reduce_workspace_in_bytes(A, B, C); | |||||
| return ws_size_reduce_filter; | |||||
| } | |||||
| SmallVector<TensorLayout> ConvBiasForwardImpl:: | |||||
| AlgoUInt4Int4NHWCIMMAImplicitGemm::deduce_preprocessed_filter_layout( | |||||
| const SizeArgs& args) const { | |||||
| return {args.filter_layout->collapse_contiguous(), | |||||
| args.bias_layout->collapse_contiguous()}; | |||||
| } | |||||
| void ConvBiasForwardImpl::AlgoUInt4Int4NHWCIMMAImplicitGemm::exec_preprocess( | |||||
| const ExecArgs& args) const { | |||||
| megdnn_assert(args.preprocessed_filter->tensors.size() == 2); | |||||
| void* filter_ptr = args.preprocessed_filter->tensors[0].raw_ptr; | |||||
| void* bias_ptr = args.preprocessed_filter->tensors[1].raw_ptr; | |||||
| void* reduce_filter_ptr = reinterpret_cast<void*>(args.workspace.raw_ptr); | |||||
| void* reduce_workspace = reinterpret_cast<void*>( | |||||
| args.workspace.raw_ptr + args.bias_layout->span().dist_byte()); | |||||
| reorder_filter(args, m_algo_param.access_size, filter_ptr); | |||||
| update_bias(args, bias_ptr, reduce_filter_ptr, reduce_workspace); | |||||
| } | |||||
| std::tuple<void*, void*> | |||||
| ConvBiasForwardImpl::AlgoUInt4Int4NHWCIMMAImplicitGemm::prepare_filter_bias( | |||||
| const ExecArgs& args) const { | |||||
| void* filter_ptr = nullptr; | |||||
| void* bias_ptr = nullptr; | |||||
| if (args.preprocessed_filter) { | |||||
| megdnn_assert(args.preprocessed_filter->tensors.size() == 2); | |||||
| filter_ptr = args.preprocessed_filter->tensors[0].raw_ptr; | |||||
| bias_ptr = args.preprocessed_filter->tensors[1].raw_ptr; | |||||
| return {filter_ptr, bias_ptr}; | |||||
| } else { | |||||
| filter_ptr = reinterpret_cast<void*>(args.workspace.raw_ptr); | |||||
| bias_ptr = | |||||
| reinterpret_cast<void*>(args.workspace.raw_ptr + | |||||
| args.filter_layout->span().dist_byte()); | |||||
| void* reduce_filter_ptr = | |||||
| reinterpret_cast<void*>(args.workspace.raw_ptr + | |||||
| args.filter_layout->span().dist_byte() + | |||||
| args.bias_layout->span().dist_byte()); | |||||
| void* reduce_workspace = | |||||
| reinterpret_cast<void*>(args.workspace.raw_ptr + | |||||
| args.filter_layout->span().dist_byte() + | |||||
| args.bias_layout->span().dist_byte() + | |||||
| args.bias_layout->span().dist_byte()); | |||||
| reorder_filter(args, m_algo_param.access_size, filter_ptr); | |||||
| update_bias(args, bias_ptr, reduce_filter_ptr, reduce_workspace); | |||||
| } | |||||
| return {filter_ptr, bias_ptr}; | |||||
| } | |||||
| std::tuple<float, float, float, float, float> | |||||
| ConvBiasForwardImpl::AlgoUInt4Int4NHWCIMMAImplicitGemm::get_constants( | |||||
| const ExecArgs& args) const { | |||||
| float src_scale = | |||||
| args.src_layout->dtype.param<dtype::Quantized4Asymm>().scale, | |||||
| filter_scale = | |||||
| args.filter_layout->dtype.param<dtype::QuantizedS4>().scale, | |||||
| bias_scale = | |||||
| args.bias_layout->dtype.param<dtype::QuantizedS32>().scale, | |||||
| dst_scale = | |||||
| args.dst_layout->dtype.param<dtype::Quantized4Asymm>().scale; | |||||
| uint8_t dst_zero = | |||||
| args.dst_layout->dtype.param<dtype::Quantized4Asymm>().zero_point; | |||||
| float alpha = src_scale * filter_scale / dst_scale, | |||||
| beta = bias_scale / dst_scale, gamma = 0.f, delta = 0.f, | |||||
| theta = dst_zero; | |||||
| if (args.z_layout->ndim > 0) { | |||||
| float z_scale = | |||||
| args.z_layout->dtype.param<dtype::Quantized4Asymm>().scale; | |||||
| gamma = z_scale / dst_scale; | |||||
| uint8_t z_zero = | |||||
| args.z_layout->dtype.param<dtype::Quantized4Asymm>().zero_point; | |||||
| delta = -z_zero * gamma; | |||||
| } | |||||
| return {alpha, beta, gamma, delta, theta}; | |||||
| } | |||||
| void ConvBiasForwardImpl::AlgoUInt4Int4NHWCIMMAImplicitGemm::do_exec( | |||||
| const ExecArgs& args, void* filter_ptr, void* bias_ptr, void* z_ptr, | |||||
| ConvParam kern_param, uint32_t nonlinear_mode, float alpha, float beta, | |||||
| float gamma, float delta, float theta, cudaStream_t stream) const { | |||||
| float dst_scale = | |||||
| args.dst_layout->dtype.param<dtype::Quantized4Asymm>().scale; | |||||
| uint8_t src_zero = | |||||
| args.src_layout->dtype.param<dtype::Quantized4Asymm>().zero_point; | |||||
| cutlass_wrapper::GemmCoord threadblock_shape{m_algo_param.threadblock_m, | |||||
| m_algo_param.threadblock_n, | |||||
| m_algo_param.threadblock_k}; | |||||
| cutlass_wrapper::GemmCoord warp_shape{ | |||||
| m_algo_param.warp_m, m_algo_param.warp_n, m_algo_param.warp_k}; | |||||
| if (kern_param.fh == 1 && kern_param.fw == 1) { | |||||
| cutlass_wrapper::do_conv_bias_uint4_int4_implicit_gemm_imma_nhwc<false>( | |||||
| reinterpret_cast<uint8_t*>(args.src_tensor->raw_ptr), | |||||
| reinterpret_cast<int8_t*>(filter_ptr), | |||||
| reinterpret_cast<int32_t*>(bias_ptr), | |||||
| reinterpret_cast<uint8_t*>(z_ptr), | |||||
| reinterpret_cast<uint8_t*>(args.dst_tensor->raw_ptr), nullptr, | |||||
| kern_param, nonlinear_mode, alpha, beta, gamma, delta, theta, | |||||
| dst_scale, src_zero, threadblock_shape, warp_shape, | |||||
| m_algo_param.access_size, stream); | |||||
| } else { | |||||
| cutlass_wrapper::do_conv_bias_uint4_int4_implicit_gemm_imma_nhwc<true>( | |||||
| reinterpret_cast<uint8_t*>(args.src_tensor->raw_ptr), | |||||
| reinterpret_cast<int8_t*>(filter_ptr), | |||||
| reinterpret_cast<int32_t*>(bias_ptr), | |||||
| reinterpret_cast<uint8_t*>(z_ptr), | |||||
| reinterpret_cast<uint8_t*>(args.dst_tensor->raw_ptr), nullptr, | |||||
| kern_param, nonlinear_mode, alpha, beta, gamma, delta, theta, | |||||
| dst_scale, src_zero, threadblock_shape, warp_shape, | |||||
| m_algo_param.access_size, stream); | |||||
| } | |||||
| } | |||||
| void ConvBiasForwardImpl::AlgoUInt4Int4NHWCIMMAImplicitGemm::update_bias( | |||||
| const ExecArgs& args, void* updated_bias, void* reduce_filter_ptr, | |||||
| void* reduce_workspace) const { | |||||
| size_t co = args.filter_layout->operator[](0), | |||||
| ci = args.filter_layout->operator[](3), | |||||
| fh = args.filter_layout->operator[](1), | |||||
| fw = args.filter_layout->operator[](2); | |||||
| auto&& stream = cuda_stream(args.opr->handle()); | |||||
| int src_zero_point = | |||||
| args.src_tensor->layout.dtype.param<dtype::Quantized4Asymm>() | |||||
| .zero_point; | |||||
| do_dispatch_reduce_filter_and_update_bias_4bit<true>( | |||||
| reinterpret_cast<uint8_t*>(args.filter_tensor->raw_ptr), | |||||
| args.bias_tensor->compatible_ptr<int32_t>(), co, ci * fh * fw / 8, | |||||
| reinterpret_cast<int32_t*>(updated_bias), | |||||
| reinterpret_cast<int32_t*>(reduce_workspace), src_zero_point, | |||||
| stream); | |||||
| } | |||||
| #endif | |||||
| // vim: syntax=cpp.doxygen | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_hswish_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_identity_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_identity_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_identity_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_identity_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_identity_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_identity_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_1x1_relu_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_128x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_256x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_hswish_s4_256x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_128x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_256x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_identity_s4_256x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_128x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_256x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_s4_i8832fprop_relu_s4_256x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::int4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_identity_u4_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_identity_u4_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_identity_u4_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_identity_u4_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_identity_u4_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_identity_u4_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_1x1_relu_u4_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| false, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_128x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_256x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_identity_u4_s4_256x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationClamp< | |||||
| cutlass::uint4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x32x64_64x32x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x32x64_64x32x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x32x64_64x32x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 32, 64>, | |||||
| cutlass::gemm::GemmShape<64, 32, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x64x64_64x64x64_2_nhwc_nc16hw16" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<16>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 16, | |||||
| 16, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x64x64_64x64x64_2_nhwc_nc32hw32" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<32>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_128x64x64_64x64x64_2_nhwc_nc8hw8" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorNCxHWx<8>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNHWC, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<128, 64, 64>, | |||||
| cutlass::gemm::GemmShape<64, 64, 64>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 8, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNHWCThreadblockSwizzle, | |||||
| 2, | |||||
| 8, | |||||
| 8, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_TN>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -1,55 +0,0 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_256x128x128_64x64x128_2_nc64hw64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -0,0 +1,59 @@ | |||||
| #if !MEGDNN_TEGRA_X1 | |||||
| // ignore warning of cutlass | |||||
| #pragma GCC diagnostic push | |||||
| #pragma GCC diagnostic ignored "-Wunused-parameter" | |||||
| #pragma GCC diagnostic ignored "-Wstrict-aliasing" | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | |||||
| // kernel instance "cutlass_tensorop_u4_i8832fprop_relu_u4_s4_256x128x128_64x64x128_2_nc64hw64_c64rsk64" generated by cutlass generator | |||||
| using Convolution = | |||||
| typename cutlass::conv::device::Convolution< | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| cutlass::int4b_t, | |||||
| cutlass::layout::TensorCxRSKx<64>, | |||||
| cutlass::uint4b_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::layout::TensorNCxHWx<64>, | |||||
| int32_t, | |||||
| cutlass::conv::ConvType::kConvolution, | |||||
| cutlass::arch::OpClassTensorOp, | |||||
| cutlass::arch::Sm75, | |||||
| cutlass::gemm::GemmShape<256, 128, 128>, | |||||
| cutlass::gemm::GemmShape<64, 64, 128>, | |||||
| cutlass::gemm::GemmShape<8, 8, 32>, | |||||
| cutlass::epilogue::thread::BiasAddLinearCombinationReluClamp< | |||||
| cutlass::uint4b_t, | |||||
| 16, | |||||
| int32_t, | |||||
| int32_t, | |||||
| float | |||||
| >, | |||||
| cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, | |||||
| 2, | |||||
| 32, | |||||
| 32, | |||||
| true, | |||||
| cutlass::arch::OpMultiplyAddSaturate, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( | |||||
| const typename Convolution::ElementSrc* d_src, | |||||
| const typename Convolution::ElementFilter* d_filter, | |||||
| const typename Convolution::ElementBias* d_bias, | |||||
| const typename Convolution::ElementDst* d_z, | |||||
| typename Convolution::ElementDst* d_dst, | |||||
| int* workspace, | |||||
| typename Convolution::ConvolutionParameter const& conv_param, | |||||
| typename Convolution::EpilogueOutputOp::Params const& epilogue, | |||||
| cudaStream_t stream, | |||||
| typename Convolution::ExtraParam extra_param); | |||||
| #pragma GCC diagnostic pop | |||||
| #endif | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_128x128x32_64x32x32_2_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_128x128x32_64x32x32_2_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 16, | 16, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_128x32x32_64x32x32_2_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_128x32x32_64x32x32_2_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 16, | 16, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_128x64x32_64x32x32_2_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_128x64x32_64x32x32_2_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 16, | 16, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_16x128x16_16x128x16_1_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_16x128x16_16x128x16_1_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 8, | 8, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_16x64x8_16x64x8_2_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_16x64x8_16x64x8_2_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 4, | 4, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_32x128x32_32x64x32_2_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_32x128x32_32x64x32_2_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 16, | 16, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_32x32x32_32x32x32_2_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_32x32x32_32x32x32_2_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 16, | 16, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_32x64x32_32x64x32_2_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_32x64x32_32x64x32_2_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 16, | 16, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_64x128x32_64x32x32_2_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_64x128x32_64x32x32_2_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 16, | 16, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||
| @@ -7,7 +7,7 @@ | |||||
| #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | #include "src/cuda/conv_bias/implicit_gemm_conv_bias_cutlass_wrapper.cuinl" | ||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_64x32x32_64x32x32_2_nc4hw4_nchw" generated by cutlass generator | |||||
| // kernel instance "cutlass_simt_f32_ifprop_1x1_hswish_s8_64x32x32_64x32x32_2_nc4hw4_c4rsk4_nchw" generated by cutlass generator | |||||
| using Convolution = | using Convolution = | ||||
| typename cutlass::conv::device::Convolution< | typename cutlass::conv::device::Convolution< | ||||
| int8_t, | int8_t, | ||||
| @@ -37,7 +37,8 @@ using Convolution = | |||||
| 4, | 4, | ||||
| 16, | 16, | ||||
| false, | false, | ||||
| cutlass::arch::OpMultiplyAdd>; | |||||
| cutlass::arch::OpMultiplyAdd, | |||||
| cutlass::conv::ImplicitGemmMode::GEMM_NT>; | |||||