Browse Source

!12628 Add bond, angle and dihedral modules of Sponge

From: @zhangxinfeng3
Reviewed-by: 
Signed-off-by:
tags/v1.2.0-rc1
mindspore-ci-bot Gitee 5 years ago
parent
commit
be2019c5be
55 changed files with 3822 additions and 0 deletions
  1. +65
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_atom_energy_impl.cu
  2. +26
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh
  3. +63
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_energy_impl.cu
  4. +25
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_energy_impl.cuh
  5. +83
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_impl.cu
  6. +25
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_impl.cuh
  7. +89
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cu
  8. +26
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh
  9. +56
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cu
  10. +26
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh
  11. +55
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cu
  12. +26
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh
  13. +60
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cu
  14. +25
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh
  15. +67
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cu
  16. +26
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh
  17. +67
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cu
  18. +26
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh
  19. +94
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh
  20. +83
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cu
  21. +26
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh
  22. +83
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_energy_impl.cu
  23. +26
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh
  24. +119
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cu
  25. +26
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh
  26. +124
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cu
  27. +27
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh
  28. +34
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_atom_energy_kernel.cc
  29. +106
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_atom_energy_kernel.h
  30. +34
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_energy_kernel.cc
  31. +106
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_energy_kernel.h
  32. +34
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_force_kernel.cc
  33. +106
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_force_kernel.h
  34. +35
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_force_with_atom_energy_kernel.cc
  35. +108
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_force_with_atom_energy_kernel.h
  36. +33
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_atom_energy_cuda_gpu_kernel.cc
  37. +108
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_atom_energy_cuda_gpu_kernel.h
  38. +33
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_energy_cuda_gpu_kernel.cc
  39. +109
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_energy_cuda_gpu_kernel.h
  40. +33
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_cuda_gpu_kernel.cc
  41. +109
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_cuda_gpu_kernel.h
  42. +34
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_energy_kernel.cc
  43. +108
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_energy_kernel.h
  44. +34
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_virial_kernel.cc
  45. +108
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_virial_kernel.h
  46. +38
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_atom_energy_kernel.cc
  47. +126
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_atom_energy_kernel.h
  48. +38
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_energy_kernel.cc
  49. +126
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_energy_kernel.h
  50. +38
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_kernel.cc
  51. +126
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_kernel.h
  52. +39
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_with_atom_energy_kernel.cc
  53. +128
    -0
      mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_with_atom_energy_kernel.h
  54. +16
    -0
      mindspore/ops/operations/__init__.py
  55. +431
    -0
      mindspore/ops/operations/sponge_ops.py

+ 65
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_atom_energy_impl.cu View File

@@ -0,0 +1,65 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *atom_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
atomicAdd(&atom_energy[atom_i], k * dtheta * dtheta);
}
}
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(angle_numbers, uint_crd, scaler, atom_a,
atom_b, atom_c, angle_k, angle_theta0, ene);
return;
}
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
cudaStream_t stream);

+ 26
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh View File

@@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
cudaStream_t stream);
#endif

+ 63
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_energy_impl.cu View File

@@ -0,0 +1,63 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *angle_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
angle_energy[angle_i] = k * dtheta * dtheta;
}
}
void AngleEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(angle_numbers, uint_crd, scaler, atom_a, atom_b,
atom_c, angle_k, angle_theta0, ene);
return;
}
void AngleEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene, cudaStream_t stream);

+ 25
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_energy_impl.cuh View File

@@ -0,0 +1,25 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void AngleEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene, cudaStream_t stream);
#endif

+ 83
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_impl.cu View File

@@ -0,0 +1,83 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleForceKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, VECTOR *frc) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
k = -2 * k * dtheta / sinf(theta);
float common_factor_cross = k * rij_1_rkj_1;
float common_factor_self = k * costheta;
VECTOR fi = common_factor_self * rij_2 * drij - common_factor_cross * drkj;
VECTOR fk = common_factor_self * rkj_2 * drkj - common_factor_cross * drij;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
fi = -fi - fk;
atomicAdd(&frc[atom_j].x, fi.x);
atomicAdd(&frc[atom_j].y, fi.y);
atomicAdd(&frc[atom_j].z, fi.z);
}
}
void AngleForce(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *frc_f, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleForceKernel<<<block_per_grid, thread_per_block, 0, stream>>>(angle_numbers, uint_crd, scaler, atom_a, atom_b,
atom_c, angle_k, angle_theta0, frc);
return;
}
void AngleForce(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *frc_f, cudaStream_t stream);

+ 25
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_impl.cuh View File

@@ -0,0 +1,25 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void AngleForce(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *frc_f, cudaStream_t stream);
#endif

+ 89
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cu View File

@@ -0,0 +1,89 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleForceWithAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0,
VECTOR *frc, float *atom_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
float k2 = k;
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
k = -2 * k * dtheta / sinf(theta);
float common_factor_cross = k * rij_1_rkj_1;
float common_factor_self = k * costheta;
VECTOR fi = common_factor_self * rij_2 * drij - common_factor_cross * drkj;
VECTOR fk = common_factor_self * rkj_2 * drkj - common_factor_cross * drij;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
fi = -fi - fk;
atomicAdd(&frc[atom_j].x, fi.x);
atomicAdd(&frc[atom_j].y, fi.y);
atomicAdd(&frc[atom_j].z, fi.z);
atomicAdd(&atom_energy[atom_i], k2 * dtheta * dtheta);
}
}
void AngleForceWithAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0,
float *frc_f, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleForceWithAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
angle_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, angle_k, angle_theta0, frc, ene);
return;
}
void AngleForceWithAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0,
float *frc_f, float *ene, cudaStream_t stream);

+ 26
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh View File

@@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void AngleForceWithAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0,
float *frc_f, float *ene, cudaStream_t stream);
#endif

+ 56
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cu View File

@@ -0,0 +1,56 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"

__global__ void BondAtomEnergyCudaKernel(const int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *atom_ene) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];

float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];

VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);

float r1 = norm3df(dr.x, dr.y, dr.z);
float tempf = r1 - r0;

atomicAdd(&atom_ene[atom_i], k * tempf * tempf);
}
}

void BondAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene,
cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));

BondAtomEnergyCudaKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a,
atom_b, bond_k, bond_r0, atom_ene);
return;
}

void BondAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene, cudaStream_t stream);

+ 26
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh View File

@@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ATOM_ENERGY_CUDA_GPU_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ATOM_ENERGY_GPU_IMPL_H_

#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"

void BondAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene, cudaStream_t stream);

#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BOND_ATOM_ENERGY_GPU_IMPL_H_

+ 55
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cu View File

@@ -0,0 +1,55 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"

__global__ void BondEnergyCudaKernel(const int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0,
float *bond_ene) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];

float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];

VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);

float r1 = norm3df(dr.x, dr.y, dr.z);
float tempf = r1 - r0;

bond_ene[bond_i] = k * tempf * tempf;
}
}

void BondEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *bond_ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));

BondEnergyCudaKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a, atom_b,
bond_k, bond_r0, bond_ene);
return;
}

void BondEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *bond_ene, cudaStream_t stream);

+ 26
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh View File

@@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ENERGY_CUDA_GPU_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ENERGY_CUDA_GPU_IMPL_H_

#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"

void BondEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *bond_ene, cudaStream_t stream);

#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BOND_ENERGY_CUDA_GPU_IMPL_H_

+ 60
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cu View File

@@ -0,0 +1,60 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"

__global__ void BondForceCudaKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0,
VECTOR *frc) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];

float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];
VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
float r_1 = rnorm3df(dr.x, dr.y, dr.z);
float tempf = 1.0 - r0 * r_1;

VECTOR f = 2 * tempf * k * dr;
atomicAdd(&frc[atom_i].x, -f.x);
atomicAdd(&frc[atom_i].y, -f.y);
atomicAdd(&frc[atom_i].z, -f.z);

atomicAdd(&frc[atom_j].x, f.x);
atomicAdd(&frc[atom_j].y, f.y);
atomicAdd(&frc[atom_j].z, f.z);
}
}

void BondForce(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *frc_f, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
BondForceCudaKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a, atom_b,
bond_k, bond_r0, frc);
return;
}

void BondForce(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *frc_f, cudaStream_t stream);

+ 25
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh View File

@@ -0,0 +1,25 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_CUDA_GPU_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_CUDA_GPU_IMPL_H_

#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"

void BondForce(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *frc_f, cudaStream_t stream);
#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BOND_FORCE_CUDA_GPU_IMPL_H_

+ 67
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cu View File

@@ -0,0 +1,67 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"

__global__ void BondForceWithAtomEnergyKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, VECTOR *frc,
float *atom_energy) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];

float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];

VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);

float abs_r = norm3df(dr.x, dr.y, dr.z);
float r_1 = 1. / abs_r;
float tempf = abs_r - r0;
VECTOR f = 2 * tempf * r_1 * k * dr;

atomicAdd(&frc[atom_i].x, -f.x);
atomicAdd(&frc[atom_i].y, -f.y);
atomicAdd(&frc[atom_i].z, -f.z);

atomicAdd(&frc[atom_j].x, f.x);
atomicAdd(&frc[atom_j].y, f.y);
atomicAdd(&frc[atom_j].z, f.z);

atomicAdd(&atom_energy[atom_i], k * tempf * tempf);
}
}

void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e,
cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
BondForceWithAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a,
atom_b, bond_k, bond_r0, frc, atom_e);
return;
}
void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e,
cudaStream_t stream);

+ 26
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh View File

@@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_ENERGY_IMPL_H_

#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"

void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e,
cudaStream_t stream);
#endif

+ 67
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cu View File

@@ -0,0 +1,67 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"

__global__ void BondForceWithAtomVirialKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, VECTOR *frc,
float *atom_virial) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];

float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];

VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);

float abs_r = norm3df(dr.x, dr.y, dr.z);
float r_1 = 1. / abs_r;
float tempf = (abs_r - r0) * k;
VECTOR f = 2 * tempf * r_1 * dr;

atomicAdd(&frc[atom_i].x, -f.x);
atomicAdd(&frc[atom_i].y, -f.y);
atomicAdd(&frc[atom_i].z, -f.z);

atomicAdd(&frc[atom_j].x, f.x);
atomicAdd(&frc[atom_j].y, f.y);
atomicAdd(&frc[atom_j].z, f.z);

atomicAdd(&atom_virial[atom_i], abs_r * tempf);
}
}

void BondForceWithAtomVirial(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_v,
cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
BondForceWithAtomVirialKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a,
atom_b, bond_k, bond_r0, frc, atom_v);
return;
}
void BondForceWithAtomVirial(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_v,
cudaStream_t stream);

+ 26
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh View File

@@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_IMPL_H_

#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"

void BondForceWithAtomVirial(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_v,
cudaStream_t stream);
#endif

+ 94
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh View File

@@ -0,0 +1,94 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_SPONGE_COMMONHW_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_SPONGE_COMMONHW_H_

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>

#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "runtime/device/gpu/cuda_common.h"

#define CONSTANT_Pi 3.1415926535897932

struct VECTOR {
float x;
float y;
float z;
};
struct UNSIGNED_INT_VECTOR {
unsigned int uint_x;
unsigned int uint_y;
unsigned int uint_z;
};
__device__ __host__ static inline VECTOR Get_Periodic_Displacement(const UNSIGNED_INT_VECTOR uvec_a,
const UNSIGNED_INT_VECTOR uvec_b,
const VECTOR scaler) {
VECTOR dr;
dr.x = (static_cast<int>(uvec_a.uint_x - uvec_b.uint_x)) * scaler.x;
dr.y = (static_cast<int>(uvec_a.uint_y - uvec_b.uint_y)) * scaler.y;
dr.z = (static_cast<int>(uvec_a.uint_z - uvec_b.uint_z)) * scaler.z;
return dr;
}

__device__ __host__ static inline VECTOR operator+(const VECTOR &veca, const VECTOR &vecb) {
VECTOR vec;
vec.x = veca.x + vecb.x;
vec.y = veca.y + vecb.y;
vec.z = veca.z + vecb.z;
return vec;
}
__device__ __host__ static inline float operator*(const VECTOR &veca, const VECTOR &vecb) {
return veca.x * vecb.x + veca.y * vecb.y + veca.z * vecb.z;
}
__device__ __host__ static inline VECTOR operator*(const float &a, const VECTOR &vecb) {
VECTOR vec;
vec.x = a * vecb.x;
vec.y = a * vecb.y;
vec.z = a * vecb.z;
return vec;
}

__device__ __host__ static inline VECTOR operator-(const VECTOR &veca, const VECTOR &vecb) {
VECTOR vec;
vec.x = veca.x - vecb.x;
vec.y = veca.y - vecb.y;
vec.z = veca.z - vecb.z;
return vec;
}

__device__ __host__ static inline VECTOR operator-(const VECTOR &vecb) {
VECTOR vec;
vec.x = -vecb.x;
vec.y = -vecb.y;
vec.z = -vecb.z;
return vec;
}

__device__ __host__ static inline VECTOR operator^(const VECTOR &veca, const VECTOR &vecb) {
VECTOR vec;
vec.x = veca.y * vecb.z - veca.z * vecb.y;
vec.y = veca.z * vecb.x - veca.x * vecb.z;
vec.z = veca.x * vecb.y - veca.y * vecb.x;
return vec;
}

#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_SPONGE_COMMON_H_

+ 83
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cu View File

@@ -0,0 +1,83 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void DihedralAtomEnergyKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b, const int *atom_c,
const int *atom_d, const int *ipn, const float *pk, const float *gamc,
const float *gams, const float *pn, float *ene) {
int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_i < dihedral_numbers) {
int atom_i = atom_a[dihedral_i];
int atom_j = atom_b[dihedral_i];
int atom_k = atom_c[dihedral_i];
int atom_l = atom_d[dihedral_i];
int temp_ipn = ipn[dihedral_i];
float temp_pk = pk[dihedral_i];
float temp_pn = pn[dihedral_i];
float temp_gamc = gamc[dihedral_i];
float temp_gams = gams[dihedral_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]);
VECTOR r1 = drij ^ drkj;
VECTOR r2 = drkl ^ drkj;
float r1_1 = rnorm3df(r1.x, r1.y, r1.z);
float r2_1 = rnorm3df(r2.x, r2.y, r2.z);
float r1_1_r2_1 = r1_1 * r2_1;
float phi = r1 * r2 * r1_1_r2_1;
phi = fmaxf(-0.999999, fminf(phi, 0.999999));
phi = acosf(phi);
float sign = (r2 ^ r1) * drkj;
copysignf(phi, sign);
phi = CONSTANT_Pi - phi;
float nphi = temp_pn * phi;
float cos_nphi = cosf(nphi);
float sin_nphi = sinf(nphi);
atomicAdd(&ene[atom_i], (temp_pk + cos_nphi * temp_gamc + sin_nphi * temp_gams));
}
}
void DihedralAtomEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(dihedral_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
DihedralAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, ene);
return;
}
void DihedralAtomEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream);

+ 26
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh View File

@@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void DihedralAtomEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream);
#endif

+ 83
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_energy_impl.cu View File

@@ -0,0 +1,83 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void DihedralEnergyKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d,
const int *ipn, const float *pk, const float *gamc, const float *gams,
const float *pn, float *ene) {
int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_i < dihedral_numbers) {
int atom_i = atom_a[dihedral_i];
int atom_j = atom_b[dihedral_i];
int atom_k = atom_c[dihedral_i];
int atom_l = atom_d[dihedral_i];
int temp_ipn = ipn[dihedral_i];
float temp_pk = pk[dihedral_i];
float temp_pn = pn[dihedral_i];
float temp_gamc = gamc[dihedral_i];
float temp_gams = gams[dihedral_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]);
VECTOR r1 = drij ^ drkj;
VECTOR r2 = drkl ^ drkj;
float r1_1 = rnorm3df(r1.x, r1.y, r1.z);
float r2_1 = rnorm3df(r2.x, r2.y, r2.z);
float r1_1_r2_1 = r1_1 * r2_1;
float phi = r1 * r2 * r1_1_r2_1;
phi = fmaxf(-0.999999, fminf(phi, 0.999999));
phi = acosf(phi);
float sign = (r2 ^ r1) * drkj;
copysignf(phi, sign);
phi = CONSTANT_Pi - phi;
float nphi = temp_pn * phi;
float cos_nphi = cosf(nphi);
float sin_nphi = sinf(nphi);
ene[dihedral_i] = (temp_pk + cos_nphi * temp_gamc + sin_nphi * temp_gams);
}
}
void DihedralEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(dihedral_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
DihedralEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, ene);
return;
}
void DihedralEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream);

+ 26
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh View File

@@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void DihedralEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream);
#endif

+ 119
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cu View File

@@ -0,0 +1,119 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void DihedralForceKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d,
const int *ipn, const float *pk, const float *gamc, const float *gams,
const float *pn, VECTOR *frc) {
int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_i < dihedral_numbers) {
int atom_i = atom_a[dihedral_i];
int atom_j = atom_b[dihedral_i];
int atom_k = atom_c[dihedral_i];
int atom_l = atom_d[dihedral_i];
int temp_ipn = ipn[dihedral_i];
float temp_pk = pk[dihedral_i];
float temp_pn = pn[dihedral_i];
float temp_gamc = gamc[dihedral_i];
float temp_gams = gams[dihedral_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]);
VECTOR r1 = drij ^ drkj;
VECTOR r2 = drkl ^ drkj;
float r1_1 = rnorm3df(r1.x, r1.y, r1.z);
float r2_1 = rnorm3df(r2.x, r2.y, r2.z);
float r1_2 = r1_1 * r1_1;
float r2_2 = r2_1 * r2_1;
float r1_1_r2_1 = r1_1 * r2_1;
float phi = r1 * r2 * r1_1_r2_1;
phi = fmaxf(-0.999999, fminf(phi, 0.999999));
phi = acosf(phi);
float sign = (r2 ^ r1) * drkj;
copysignf(phi, sign);
phi = CONSTANT_Pi - phi;
float nphi = temp_pn * phi;
float cos_phi = cosf(phi);
float sin_phi = sinf(phi);
float cos_nphi = cosf(nphi);
float sin_nphi = sinf(nphi);
float dE_dphi;
if (fabsf(sin_phi) < 1e-6) {
temp_ipn *= temp_ipn % 2; // (((temp_ipn - 1) & 1) ^ 1)
dE_dphi = temp_gamc * (temp_pn - temp_ipn + temp_ipn * cos_phi);
} else {
dE_dphi = temp_pn * (temp_gamc * sin_nphi - temp_gams * cos_nphi) / sin_phi;
}
VECTOR dphi_dr1 = r1_1_r2_1 * r2 + cos_phi * r1_2 * r1;
VECTOR dphi_dr2 = r1_1_r2_1 * r1 + cos_phi * r2_2 * r2;
VECTOR dE_dri = dE_dphi * drkj ^ dphi_dr1;
VECTOR dE_drl = dE_dphi * dphi_dr2 ^ drkj;
VECTOR dE_drj_part = dE_dphi * ((drij ^ dphi_dr1) + (drkl ^ dphi_dr2));
VECTOR fi = dE_dri;
VECTOR fj = dE_drj_part - dE_dri;
VECTOR fk = -dE_drl - dE_drj_part;
VECTOR fl = dE_drl;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_j].x, fj.x);
atomicAdd(&frc[atom_j].y, fj.y);
atomicAdd(&frc[atom_j].z, fj.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
atomicAdd(&frc[atom_l].x, fl.x);
atomicAdd(&frc[atom_l].y, fl.y);
atomicAdd(&frc[atom_l].z, fl.z);
}
}
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(dihedral_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
DihedralForceKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, frc);
return;
}
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream);

+ 26
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh View File

@@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream);
#endif

+ 124
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cu View File

@@ -0,0 +1,124 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void DihedralForceWithAtomEnergyKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, VECTOR *frc,
float *ene) {
int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_i < dihedral_numbers) {
int atom_i = atom_a[dihedral_i];
int atom_j = atom_b[dihedral_i];
int atom_k = atom_c[dihedral_i];
int atom_l = atom_d[dihedral_i];
int temp_ipn = ipn[dihedral_i];
float temp_pk = pk[dihedral_i];
float temp_pn = pn[dihedral_i];
float temp_gamc = gamc[dihedral_i];
float temp_gams = gams[dihedral_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]);
VECTOR r1 = drij ^ drkj;
VECTOR r2 = drkl ^ drkj;
float r1_1 = rnorm3df(r1.x, r1.y, r1.z);
float r2_1 = rnorm3df(r2.x, r2.y, r2.z);
float r1_2 = r1_1 * r1_1;
float r2_2 = r2_1 * r2_1;
float r1_1_r2_1 = r1_1 * r2_1;
float phi = r1 * r2 * r1_1_r2_1;
phi = fmaxf(-0.999999, fminf(phi, 0.999999));
phi = acosf(phi);
float sign = (r2 ^ r1) * drkj;
copysignf(phi, sign);
phi = CONSTANT_Pi - phi;
float nphi = temp_pn * phi;
float cos_phi = cosf(phi);
float sin_phi = sinf(phi);
float cos_nphi = cosf(nphi);
float sin_nphi = sinf(nphi);
float dE_dphi;
if (fabsf(sin_phi) < 1e-6) {
temp_ipn *= (((temp_ipn - 1) & 1) ^ 1);
dE_dphi = temp_gamc * (temp_pn - temp_ipn + temp_ipn * cos_phi);
} else {
dE_dphi = temp_pn * (temp_gamc * sin_nphi - temp_gams * cos_nphi) / sin_phi;
}
VECTOR dphi_dr1 = r1_1_r2_1 * r2 + cos_phi * r1_2 * r1;
VECTOR dphi_dr2 = r1_1_r2_1 * r1 + cos_phi * r2_2 * r2;
VECTOR dE_dri = dE_dphi * drkj ^ dphi_dr1;
VECTOR dE_drl = dE_dphi * dphi_dr2 ^ drkj;
VECTOR dE_drj_part = dE_dphi * ((drij ^ dphi_dr1) + (drkl ^ dphi_dr2));
VECTOR fi = dE_dri;
VECTOR fj = dE_drj_part - dE_dri;
VECTOR fk = -dE_drl - dE_drj_part;
VECTOR fl = dE_drl;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_j].x, fj.x);
atomicAdd(&frc[atom_j].y, fj.y);
atomicAdd(&frc[atom_j].z, fj.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
atomicAdd(&frc[atom_l].x, fl.x);
atomicAdd(&frc[atom_l].y, fl.y);
atomicAdd(&frc[atom_l].z, fl.z);
atomicAdd(&ene[atom_i], (temp_pk + cos_nphi * temp_gamc + sin_nphi * temp_gams));
}
}
void DihedralForceWithAtomEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn,
const float *pk, const float *gamc, const float *gams, const float *pn, float *frc_f,
float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(dihedral_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
DihedralForceWithAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, frc, ene);
return;
}
void DihedralForceWithAtomEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn,
const float *pk, const float *gamc, const float *gams, const float *pn, float *frc_f,
float *ene, cudaStream_t stream);

+ 27
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh View File

@@ -0,0 +1,27 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_WITH_ATOM_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_WITH_ATOM_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void DihedralForceWithAtomEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn,
const float *pk, const float *gamc, const float *gams, const float *pn, float *frc_f,
float *ene, cudaStream_t stream);
#endif

+ 34
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_atom_energy_kernel.cc View File

@@ -0,0 +1,34 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/sponge/angle/angle_atom_energy_kernel.h"
namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(AngleAtomEnergy,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
AngleAtomEnergyGpuKernel, float, int)
} // namespace kernel
} // namespace mindspore

+ 106
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_atom_energy_kernel.h View File

@@ -0,0 +1,106 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_KERNEL_H_
#include <cuda_runtime_api.h>
#include <vector>
#include <string>
#include <map>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh"
namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class AngleAtomEnergyGpuKernel : public GpuKernel {
public:
AngleAtomEnergyGpuKernel() : ele_uint_crd(1) {}
~AngleAtomEnergyGpuKernel() override = default;
bool Init(const CNodePtr &kernel_node) override {
kernel_node_ = kernel_node;
angle_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "angle_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_atom_c = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_angle_k = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);
auto shape_angle_theta0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6);
for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_atom_c.size(); i++) ele_atom_c *= shape_atom_c[i];
for (size_t i = 0; i < shape_angle_k.size(); i++) ele_angle_k *= shape_angle_k[i];
for (size_t i = 0; i < shape_angle_theta0.size(); i++) ele_angle_theta0 *= shape_angle_theta0[i];
InitSizeLists();
return true;
}
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto atom_c = GetDeviceAddress<const T1>(inputs, 4);
auto angle_k = GetDeviceAddress<T>(inputs, 5);
auto angle_theta0 = GetDeviceAddress<T>(inputs, 6);
auto ene = GetDeviceAddress<T>(outputs, 0);
AngleAtomEnergy(angle_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, angle_k, angle_theta0, ene,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_atom_c * sizeof(T1));
input_size_list_.push_back(ele_angle_k * sizeof(T));
input_size_list_.push_back(ele_angle_theta0 * sizeof(T));
output_size_list_.push_back(ele_uint_crd * sizeof(T));
}
private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_atom_c = 1;
size_t ele_angle_k = 1;
size_t ele_angle_theta0 = 1;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int angle_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 34
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_energy_kernel.cc View File

@@ -0,0 +1,34 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/sponge/angle/angle_energy_kernel.h"
namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(AngleEnergy,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
AngleEnergyGpuKernel, float, int)
} // namespace kernel
} // namespace mindspore

+ 106
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_energy_kernel.h View File

@@ -0,0 +1,106 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_ENERGY_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_ENERGY_KERNEL_H_
#include <cuda_runtime_api.h>
#include <vector>
#include <string>
#include <map>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_energy_impl.cuh"
namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class AngleEnergyGpuKernel : public GpuKernel {
public:
AngleEnergyGpuKernel() : ele_uint_crd(1) {}
~AngleEnergyGpuKernel() override = default;
bool Init(const CNodePtr &kernel_node) override {
kernel_node_ = kernel_node;
angle_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "angle_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_atom_c = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_angle_k = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);
auto shape_angle_theta0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6);
for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_atom_c.size(); i++) ele_atom_c *= shape_atom_c[i];
for (size_t i = 0; i < shape_angle_k.size(); i++) ele_angle_k *= shape_angle_k[i];
for (size_t i = 0; i < shape_angle_theta0.size(); i++) ele_angle_theta0 *= shape_angle_theta0[i];
InitSizeLists();
return true;
}
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto atom_c = GetDeviceAddress<const T1>(inputs, 4);
auto angle_k = GetDeviceAddress<T>(inputs, 5);
auto angle_theta0 = GetDeviceAddress<T>(inputs, 6);
auto ene = GetDeviceAddress<T>(outputs, 0);
AngleEnergy(angle_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, angle_k, angle_theta0, ene,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_atom_c * sizeof(T1));
input_size_list_.push_back(ele_angle_k * sizeof(T));
input_size_list_.push_back(ele_angle_theta0 * sizeof(T));
output_size_list_.push_back(angle_numbers * sizeof(T));
}
private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_atom_c = 1;
size_t ele_angle_k = 1;
size_t ele_angle_theta0 = 1;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int angle_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 34
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_force_kernel.cc View File

@@ -0,0 +1,34 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/sponge/angle/angle_force_kernel.h"
namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(AngleForce,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
AngleForceGpuKernel, float, int)
} // namespace kernel
} // namespace mindspore

+ 106
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_force_kernel.h View File

@@ -0,0 +1,106 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_FORCE_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_FORCE_KERNEL_H_
#include <cuda_runtime_api.h>
#include <vector>
#include <string>
#include <map>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_impl.cuh"
namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class AngleForceGpuKernel : public GpuKernel {
public:
AngleForceGpuKernel() : ele_uint_crd(1) {}
~AngleForceGpuKernel() override = default;
bool Init(const CNodePtr &kernel_node) override {
kernel_node_ = kernel_node;
angle_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "angle_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_atom_c = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_angle_k = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);
auto shape_angle_theta0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6);
for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_atom_c.size(); i++) ele_atom_c *= shape_atom_c[i];
for (size_t i = 0; i < shape_angle_k.size(); i++) ele_angle_k *= shape_angle_k[i];
for (size_t i = 0; i < shape_angle_theta0.size(); i++) ele_angle_theta0 *= shape_angle_theta0[i];
InitSizeLists();
return true;
}
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto atom_c = GetDeviceAddress<const T1>(inputs, 4);
auto angle_k = GetDeviceAddress<T>(inputs, 5);
auto angle_theta0 = GetDeviceAddress<T>(inputs, 6);
auto frc_f = GetDeviceAddress<T>(outputs, 0);
AngleForce(angle_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, angle_k, angle_theta0, frc_f,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_atom_c * sizeof(T1));
input_size_list_.push_back(ele_angle_k * sizeof(T));
input_size_list_.push_back(ele_angle_theta0 * sizeof(T));
output_size_list_.push_back(ele_uint_crd * 3 * sizeof(T));
}
private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_atom_c = 1;
size_t ele_angle_k = 1;
size_t ele_angle_theta0 = 1;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int angle_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 35
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_force_with_atom_energy_kernel.cc View File

@@ -0,0 +1,35 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/sponge/angle/angle_force_with_atom_energy_kernel.h"
namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(AngleForceWithAtomEnergy,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
AngleForceWithAtomEnergyGpuKernel, float, int)
} // namespace kernel
} // namespace mindspore

+ 108
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/angle/angle_force_with_atom_energy_kernel.h View File

@@ -0,0 +1,108 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_KERNEL_H_
#include <cuda_runtime_api.h>
#include <vector>
#include <string>
#include <map>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh"
namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class AngleForceWithAtomEnergyGpuKernel : public GpuKernel {
public:
AngleForceWithAtomEnergyGpuKernel() : ele_uint_crd(1) {}
~AngleForceWithAtomEnergyGpuKernel() override = default;
bool Init(const CNodePtr &kernel_node) override {
kernel_node_ = kernel_node;
angle_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "angle_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_atom_c = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_angle_k = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);
auto shape_angle_theta0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6);
for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_atom_c.size(); i++) ele_atom_c *= shape_atom_c[i];
for (size_t i = 0; i < shape_angle_k.size(); i++) ele_angle_k *= shape_angle_k[i];
for (size_t i = 0; i < shape_angle_theta0.size(); i++) ele_angle_theta0 *= shape_angle_theta0[i];
InitSizeLists();
return true;
}
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto atom_c = GetDeviceAddress<const T1>(inputs, 4);
auto angle_k = GetDeviceAddress<T>(inputs, 5);
auto angle_theta0 = GetDeviceAddress<T>(inputs, 6);
auto frc_f = GetDeviceAddress<T>(outputs, 0);
auto ene = GetDeviceAddress<T>(outputs, 1);
AngleForceWithAtomEnergy(angle_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, angle_k, angle_theta0, frc_f,
ene, reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_atom_c * sizeof(T1));
input_size_list_.push_back(ele_angle_k * sizeof(T));
input_size_list_.push_back(ele_angle_theta0 * sizeof(T));
output_size_list_.push_back(ele_uint_crd * 3 * sizeof(T));
output_size_list_.push_back(ele_uint_crd * sizeof(T));
}
private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_atom_c = 1;
size_t ele_angle_k = 1;
size_t ele_angle_theta0 = 1;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int angle_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 33
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_atom_energy_cuda_gpu_kernel.cc View File

@@ -0,0 +1,33 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/sponge/bond/bond_atom_energy_cuda_gpu_kernel.h"

namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(BondAtomEnergy,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
BondAtomEnergyCudaGpuKernel, float, int)

} // namespace kernel
} // namespace mindspore

+ 108
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_atom_energy_cuda_gpu_kernel.h View File

@@ -0,0 +1,108 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ATOM_ENERGY_CUDA_GPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ATOM_ENERGY_CUDA_GPU_KERNEL_H_

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh"

#include <cuda_runtime_api.h>
#include <map>
#include <string>
#include <vector>

#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"

namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class BondAtomEnergyCudaGpuKernel : public GpuKernel {
public:
BondAtomEnergyCudaGpuKernel() : ele_uint_crd(1) {}
~BondAtomEnergyCudaGpuKernel() override = default;

bool Init(const CNodePtr &kernel_node) override {
kernel_node_ = kernel_node;
bond_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "bond_numbers"));

auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_bond_k = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_bond_r0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);

for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_bond_k.size(); i++) ele_bond_k *= shape_bond_k[i];
for (size_t i = 0; i < shape_bond_r0.size(); i++) ele_bond_r0 *= shape_bond_r0[i];

InitSizeLists();
return true;
}

const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }

bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto bond_k = GetDeviceAddress<T>(inputs, 4);
auto bond_r0 = GetDeviceAddress<T>(inputs, 5);

auto atom_ene = GetDeviceAddress<T>(outputs, 0);

BondAtomEnergy(bond_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, atom_ene,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}

protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_bond_k * sizeof(T));
input_size_list_.push_back(ele_bond_r0 * sizeof(T));

output_size_list_.push_back(bond_numbers * sizeof(T));
}

private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_bond_k = 1;
size_t ele_bond_r0 = 1;

std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int bond_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ATOM_ENERGY_CUDA_GPU_KERNEL_H_

+ 33
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_energy_cuda_gpu_kernel.cc View File

@@ -0,0 +1,33 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/sponge/bond/bond_energy_cuda_gpu_kernel.h"

namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(BondEnergy,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
BondEnergyCudaGpuKernel, float, int)

} // namespace kernel
} // namespace mindspore

+ 109
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_energy_cuda_gpu_kernel.h View File

@@ -0,0 +1,109 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ENERGY_CUDA_GPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ENERGY_CUDA_GPU_KERNEL_H_

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh"

#include <cuda_runtime_api.h>
#include <map>
#include <string>
#include <vector>

#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"

namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class BondEnergyCudaGpuKernel : public GpuKernel {
public:
BondEnergyCudaGpuKernel() : ele_uint_crd(1) {}
~BondEnergyCudaGpuKernel() override = default;

bool Init(const CNodePtr &kernel_node) override {
// get bond_numbers
kernel_node_ = kernel_node;
bond_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "bond_numbers"));

auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_bond_k = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_bond_r0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);

for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_bond_k.size(); i++) ele_bond_k *= shape_bond_k[i];
for (size_t i = 0; i < shape_bond_r0.size(); i++) ele_bond_r0 *= shape_bond_r0[i];

InitSizeLists();
return true;
}

const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }

bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto bond_k = GetDeviceAddress<T>(inputs, 4);
auto bond_r0 = GetDeviceAddress<T>(inputs, 5);

auto bond_ene = GetDeviceAddress<T>(outputs, 0);

BondEnergy(bond_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, bond_ene,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}

protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_bond_k * sizeof(T));
input_size_list_.push_back(ele_bond_r0 * sizeof(T));

output_size_list_.push_back(bond_numbers * sizeof(T));
}

private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_bond_k = 1;
size_t ele_bond_r0 = 1;

std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int bond_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_ENERGY_CUDA_GPU_KERNEL_H_

+ 33
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_cuda_gpu_kernel.cc View File

@@ -0,0 +1,33 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/sponge/bond/bond_force_cuda_gpu_kernel.h"

namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(BondForce,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
BondForceCudaGpuKernel, float, int)

} // namespace kernel
} // namespace mindspore

+ 109
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_cuda_gpu_kernel.h View File

@@ -0,0 +1,109 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_CUDA_GPU_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_CUDA_GPU_KERNEL_H_

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh"

#include <cuda_runtime_api.h>
#include <map>
#include <string>
#include <vector>

#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"

namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class BondForceCudaGpuKernel : public GpuKernel {
public:
BondForceCudaGpuKernel() : ele_uint_crd(1) {}
~BondForceCudaGpuKernel() override = default;

bool Init(const CNodePtr &kernel_node) override {
// get bond_numbers
kernel_node_ = kernel_node;
bond_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "bond_numbers"));

auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_bond_k = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_bond_r0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);

for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_bond_k.size(); i++) ele_bond_k *= shape_bond_k[i];
for (size_t i = 0; i < shape_bond_r0.size(); i++) ele_bond_r0 *= shape_bond_r0[i];

InitSizeLists();
return true;
}

const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }

bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto bond_k = GetDeviceAddress<T>(inputs, 4);
auto bond_r0 = GetDeviceAddress<T>(inputs, 5);

auto frc_f = GetDeviceAddress<T>(outputs, 0);

BondForce(bond_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, frc_f,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}

protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_bond_k * sizeof(T));
input_size_list_.push_back(ele_bond_r0 * sizeof(T));

output_size_list_.push_back(bond_numbers * 3 * sizeof(T));
}

private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_bond_k = 1;
size_t ele_bond_r0 = 1;

std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int bond_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_CUDA_GPU_KERNEL_H_

+ 34
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_energy_kernel.cc View File

@@ -0,0 +1,34 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_energy_kernel.h"

namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(BondForceWithAtomEnergy,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
BondForceWithAtomEnergyGpuKernel, float, int)

} // namespace kernel
} // namespace mindspore

+ 108
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_energy_kernel.h View File

@@ -0,0 +1,108 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_ENERGY_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_ENERGY_KERNEL_H_

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh"

#include <cuda_runtime_api.h>
#include <map>
#include <string>
#include <vector>

#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"

namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class BondForceWithAtomEnergyGpuKernel : public GpuKernel {
public:
BondForceWithAtomEnergyGpuKernel() : ele_uint_crd(1) {}
~BondForceWithAtomEnergyGpuKernel() override = default;

bool Init(const CNodePtr &kernel_node) override {
// get bond_numbers
kernel_node_ = kernel_node;
bond_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "bond_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_bond_k = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_bond_r0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);

for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_bond_k.size(); i++) ele_bond_k *= shape_bond_k[i];
for (size_t i = 0; i < shape_bond_r0.size(); i++) ele_bond_r0 *= shape_bond_r0[i];
InitSizeLists();
return true;
}

const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }

bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto bond_k = GetDeviceAddress<T>(inputs, 4);
auto bond_r0 = GetDeviceAddress<T>(inputs, 5);

auto frc_f = GetDeviceAddress<T>(outputs, 0);
auto atom_e = GetDeviceAddress<T>(outputs, 1);
BondForceWithAtomEnergy(bond_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, frc_f, atom_e,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}

protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_bond_k * sizeof(T));
input_size_list_.push_back(ele_bond_r0 * sizeof(T));

output_size_list_.push_back(bond_numbers * 3 * sizeof(T));
output_size_list_.push_back(bond_numbers * sizeof(T));
}

private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_bond_k = 1;
size_t ele_bond_r0 = 1;

std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int bond_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 34
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_virial_kernel.cc View File

@@ -0,0 +1,34 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_virial_kernel.h"

namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(BondForceWithAtomVirial,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
BondForceWithAtomVirialGpuKernel, float, int)

} // namespace kernel
} // namespace mindspore

+ 108
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/bond/bond_force_with_atom_virial_kernel.h View File

@@ -0,0 +1,108 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONG_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_KERNEL_H_

#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh"

#include <cuda_runtime_api.h>
#include <map>
#include <string>
#include <vector>

#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"

namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class BondForceWithAtomVirialGpuKernel : public GpuKernel {
public:
BondForceWithAtomVirialGpuKernel() : ele_uint_crd(1) {}
~BondForceWithAtomVirialGpuKernel() override = default;

bool Init(const CNodePtr &kernel_node) override {
// get bond_numbers
kernel_node_ = kernel_node;
bond_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "bond_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_bond_k = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_bond_r0 = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);

for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_bond_k.size(); i++) ele_bond_k *= shape_bond_k[i];
for (size_t i = 0; i < shape_bond_r0.size(); i++) ele_bond_r0 *= shape_bond_r0[i];
InitSizeLists();
return true;
}

const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }

bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto bond_k = GetDeviceAddress<T>(inputs, 4);
auto bond_r0 = GetDeviceAddress<T>(inputs, 5);

auto frc_f = GetDeviceAddress<T>(outputs, 0);
auto atom_v = GetDeviceAddress<T>(outputs, 1);
BondForceWithAtomVirial(bond_numbers, uint_crd_f, scaler_f, atom_a, atom_b, bond_k, bond_r0, frc_f, atom_v,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}

protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_bond_k * sizeof(T));
input_size_list_.push_back(ele_bond_r0 * sizeof(T));

output_size_list_.push_back(bond_numbers * 3 * sizeof(T));
output_size_list_.push_back(bond_numbers * sizeof(T));
}

private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_bond_k = 1;
size_t ele_bond_r0 = 1;

std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int bond_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 38
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_atom_energy_kernel.cc View File

@@ -0,0 +1,38 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/sponge/dihedral/dihedral_atom_energy_kernel.h"
namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(DihedralAtomEnergy,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
DihedralAtomEnergyGpuKernel, float, int)
} // namespace kernel
} // namespace mindspore

+ 126
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_atom_energy_kernel.h View File

@@ -0,0 +1,126 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_KERNEL_H_
#include <cuda_runtime_api.h>
#include <vector>
#include <string>
#include <map>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh"
namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class DihedralAtomEnergyGpuKernel : public GpuKernel {
public:
DihedralAtomEnergyGpuKernel() : ele_uint_crd(1) {}
~DihedralAtomEnergyGpuKernel() override = default;
bool Init(const CNodePtr &kernel_node) override {
kernel_node_ = kernel_node;
dihedral_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "dihedral_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_atom_c = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_atom_d = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);
auto shape_ipn = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6);
auto shape_pk = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7);
auto shape_gamc = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8);
auto shape_gams = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9);
auto shape_pn = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10);
for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_atom_c.size(); i++) ele_atom_c *= shape_atom_c[i];
for (size_t i = 0; i < shape_atom_d.size(); i++) ele_atom_d *= shape_atom_d[i];
for (size_t i = 0; i < shape_ipn.size(); i++) ele_ipn *= shape_ipn[i];
for (size_t i = 0; i < shape_pk.size(); i++) ele_pk *= shape_pk[i];
for (size_t i = 0; i < shape_gamc.size(); i++) ele_gamc *= shape_gamc[i];
for (size_t i = 0; i < shape_gams.size(); i++) ele_gams *= shape_gams[i];
for (size_t i = 0; i < shape_pn.size(); i++) ele_pn *= shape_pn[i];
InitSizeLists();
return true;
}
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto atom_c = GetDeviceAddress<const T1>(inputs, 4);
auto atom_d = GetDeviceAddress<const T1>(inputs, 5);
auto ipn = GetDeviceAddress<const T1>(inputs, 6);
auto pk = GetDeviceAddress<T>(inputs, 7);
auto gamc = GetDeviceAddress<T>(inputs, 8);
auto gams = GetDeviceAddress<T>(inputs, 9);
auto pn = GetDeviceAddress<T>(inputs, 10);
auto ene = GetDeviceAddress<T>(outputs, 0);
DihedralAtomEnergy(dihedral_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn,
ene, reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_atom_c * sizeof(T1));
input_size_list_.push_back(ele_atom_d * sizeof(T1));
input_size_list_.push_back(ele_ipn * sizeof(T1));
input_size_list_.push_back(ele_pk * sizeof(T));
input_size_list_.push_back(ele_gamc * sizeof(T));
input_size_list_.push_back(ele_gams * sizeof(T));
input_size_list_.push_back(ele_pn * sizeof(T));
output_size_list_.push_back(ele_uint_crd * sizeof(T));
}
private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_atom_c = 1;
size_t ele_atom_d = 1;
size_t ele_ipn = 1;
size_t ele_pk = 1;
size_t ele_gamc = 1;
size_t ele_gams = 1;
size_t ele_pn = 1;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int dihedral_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 38
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_energy_kernel.cc View File

@@ -0,0 +1,38 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/sponge/dihedral/dihedral_energy_kernel.h"
namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(DihedralEnergy,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
DihedralEnergyGpuKernel, float, int)
} // namespace kernel
} // namespace mindspore

+ 126
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_energy_kernel.h View File

@@ -0,0 +1,126 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_KERNEL_H_
#include <cuda_runtime_api.h>
#include <vector>
#include <string>
#include <map>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh"
namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class DihedralEnergyGpuKernel : public GpuKernel {
public:
DihedralEnergyGpuKernel() : ele_uint_crd(1) {}
~DihedralEnergyGpuKernel() override = default;
bool Init(const CNodePtr &kernel_node) override {
kernel_node_ = kernel_node;
dihedral_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "dihedral_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_atom_c = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_atom_d = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);
auto shape_ipn = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6);
auto shape_pk = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7);
auto shape_gamc = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8);
auto shape_gams = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9);
auto shape_pn = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10);
for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_atom_c.size(); i++) ele_atom_c *= shape_atom_c[i];
for (size_t i = 0; i < shape_atom_d.size(); i++) ele_atom_d *= shape_atom_d[i];
for (size_t i = 0; i < shape_ipn.size(); i++) ele_ipn *= shape_ipn[i];
for (size_t i = 0; i < shape_pk.size(); i++) ele_pk *= shape_pk[i];
for (size_t i = 0; i < shape_gamc.size(); i++) ele_gamc *= shape_gamc[i];
for (size_t i = 0; i < shape_gams.size(); i++) ele_gams *= shape_gams[i];
for (size_t i = 0; i < shape_pn.size(); i++) ele_pn *= shape_pn[i];
InitSizeLists();
return true;
}
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto atom_c = GetDeviceAddress<const T1>(inputs, 4);
auto atom_d = GetDeviceAddress<const T1>(inputs, 5);
auto ipn = GetDeviceAddress<const T1>(inputs, 6);
auto pk = GetDeviceAddress<T>(inputs, 7);
auto gamc = GetDeviceAddress<T>(inputs, 8);
auto gams = GetDeviceAddress<T>(inputs, 9);
auto pn = GetDeviceAddress<T>(inputs, 10);
auto ene = GetDeviceAddress<T>(outputs, 0);
DihedralEnergy(dihedral_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, ene,
reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_atom_c * sizeof(T1));
input_size_list_.push_back(ele_atom_d * sizeof(T1));
input_size_list_.push_back(ele_ipn * sizeof(T1));
input_size_list_.push_back(ele_pk * sizeof(T));
input_size_list_.push_back(ele_gamc * sizeof(T));
input_size_list_.push_back(ele_gams * sizeof(T));
input_size_list_.push_back(ele_pn * sizeof(T));
output_size_list_.push_back(dihedral_numbers * sizeof(T));
}
private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_atom_c = 1;
size_t ele_atom_d = 1;
size_t ele_ipn = 1;
size_t ele_pk = 1;
size_t ele_gamc = 1;
size_t ele_gams = 1;
size_t ele_pn = 1;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int dihedral_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 38
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_kernel.cc View File

@@ -0,0 +1,38 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_kernel.h"
namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(DihedralForce,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
DihedralForceGpuKernel, float, int)
} // namespace kernel
} // namespace mindspore

+ 126
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_kernel.h View File

@@ -0,0 +1,126 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_KERNEL_H_
#include <cuda_runtime_api.h>
#include <vector>
#include <string>
#include <map>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh"
namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class DihedralForceGpuKernel : public GpuKernel {
public:
DihedralForceGpuKernel() : ele_uint_crd(1) {}
~DihedralForceGpuKernel() override = default;
bool Init(const CNodePtr &kernel_node) override {
kernel_node_ = kernel_node;
dihedral_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "dihedral_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_atom_c = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_atom_d = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);
auto shape_ipn = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6);
auto shape_pk = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7);
auto shape_gamc = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8);
auto shape_gams = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9);
auto shape_pn = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10);
for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_atom_c.size(); i++) ele_atom_c *= shape_atom_c[i];
for (size_t i = 0; i < shape_atom_d.size(); i++) ele_atom_d *= shape_atom_d[i];
for (size_t i = 0; i < shape_ipn.size(); i++) ele_ipn *= shape_ipn[i];
for (size_t i = 0; i < shape_pk.size(); i++) ele_pk *= shape_pk[i];
for (size_t i = 0; i < shape_gamc.size(); i++) ele_gamc *= shape_gamc[i];
for (size_t i = 0; i < shape_gams.size(); i++) ele_gams *= shape_gams[i];
for (size_t i = 0; i < shape_pn.size(); i++) ele_pn *= shape_pn[i];
InitSizeLists();
return true;
}
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto atom_c = GetDeviceAddress<const T1>(inputs, 4);
auto atom_d = GetDeviceAddress<const T1>(inputs, 5);
auto ipn = GetDeviceAddress<const T1>(inputs, 6);
auto pk = GetDeviceAddress<T>(inputs, 7);
auto gamc = GetDeviceAddress<T>(inputs, 8);
auto gams = GetDeviceAddress<T>(inputs, 9);
auto pn = GetDeviceAddress<T>(inputs, 10);
auto frc_f = GetDeviceAddress<T>(outputs, 0);
DihedralForce(dihedral_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn,
frc_f, reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_atom_c * sizeof(T1));
input_size_list_.push_back(ele_atom_d * sizeof(T1));
input_size_list_.push_back(ele_ipn * sizeof(T1));
input_size_list_.push_back(ele_pk * sizeof(T));
input_size_list_.push_back(ele_gamc * sizeof(T));
input_size_list_.push_back(ele_gams * sizeof(T));
input_size_list_.push_back(ele_pn * sizeof(T));
output_size_list_.push_back(ele_uint_crd * 3 * sizeof(T));
}
private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_atom_c = 1;
size_t ele_atom_d = 1;
size_t ele_ipn = 1;
size_t ele_pk = 1;
size_t ele_gamc = 1;
size_t ele_gams = 1;
size_t ele_pn = 1;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int dihedral_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 39
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_with_atom_energy_kernel.cc View File

@@ -0,0 +1,39 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_with_atom_energy_kernel.h"
namespace mindspore {
namespace kernel {
MS_REG_GPU_KERNEL_TWO(DihedralForceWithAtomEnergy,
KernelAttr()
.AddInputAttr(kNumberTypeUInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeInt32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddInputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32)
.AddOutputAttr(kNumberTypeFloat32),
DihedralForceWithAtomEnergyGpuKernel, float, int)
} // namespace kernel
} // namespace mindspore

+ 128
- 0
mindspore/ccsrc/backend/kernel_compiler/gpu/sponge/dihedral/dihedral_force_with_atom_energy_kernel.h View File

@@ -0,0 +1,128 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_WITH_ATOM_ENERGY_KERNEL_H_
#define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_WITH_ATOM_ENERGY_KERNEL_H_
#include <cuda_runtime_api.h>
#include <vector>
#include <string>
#include <map>
#include "backend/kernel_compiler/gpu/gpu_kernel.h"
#include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
#include "runtime/device/gpu/cuda_common.h"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_with_atom_energy_impl.cuh"
namespace mindspore {
namespace kernel {
template <typename T, typename T1>
class DihedralForceWithAtomEnergyGpuKernel : public GpuKernel {
public:
DihedralForceWithAtomEnergyGpuKernel() : ele_uint_crd(1) {}
~DihedralForceWithAtomEnergyGpuKernel() override = default;
bool Init(const CNodePtr &kernel_node) override {
kernel_node_ = kernel_node;
dihedral_numbers = static_cast<int>(GetAttr<int64_t>(kernel_node, "dihedral_numbers"));
auto shape_uint_crd = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
auto shape_scaler = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1);
auto shape_atom_a = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 2);
auto shape_atom_b = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 3);
auto shape_atom_c = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4);
auto shape_atom_d = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 5);
auto shape_ipn = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6);
auto shape_pk = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 7);
auto shape_gamc = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 8);
auto shape_gams = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 9);
auto shape_pn = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 10);
for (size_t i = 0; i < shape_uint_crd.size(); i++) ele_uint_crd *= shape_uint_crd[i];
for (size_t i = 0; i < shape_scaler.size(); i++) ele_scaler *= shape_scaler[i];
for (size_t i = 0; i < shape_atom_a.size(); i++) ele_atom_a *= shape_atom_a[i];
for (size_t i = 0; i < shape_atom_b.size(); i++) ele_atom_b *= shape_atom_b[i];
for (size_t i = 0; i < shape_atom_c.size(); i++) ele_atom_c *= shape_atom_c[i];
for (size_t i = 0; i < shape_atom_d.size(); i++) ele_atom_d *= shape_atom_d[i];
for (size_t i = 0; i < shape_ipn.size(); i++) ele_ipn *= shape_ipn[i];
for (size_t i = 0; i < shape_pk.size(); i++) ele_pk *= shape_pk[i];
for (size_t i = 0; i < shape_gamc.size(); i++) ele_gamc *= shape_gamc[i];
for (size_t i = 0; i < shape_gams.size(); i++) ele_gams *= shape_gams[i];
for (size_t i = 0; i < shape_pn.size(); i++) ele_pn *= shape_pn[i];
InitSizeLists();
return true;
}
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; }
const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; }
bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &,
const std::vector<AddressPtr> &outputs, void *stream_ptr) override {
auto uint_crd_f = GetDeviceAddress<const T1>(inputs, 0);
auto scaler_f = GetDeviceAddress<T>(inputs, 1);
auto atom_a = GetDeviceAddress<const T1>(inputs, 2);
auto atom_b = GetDeviceAddress<const T1>(inputs, 3);
auto atom_c = GetDeviceAddress<const T1>(inputs, 4);
auto atom_d = GetDeviceAddress<const T1>(inputs, 5);
auto ipn = GetDeviceAddress<const T1>(inputs, 6);
auto pk = GetDeviceAddress<T>(inputs, 7);
auto gamc = GetDeviceAddress<T>(inputs, 8);
auto gams = GetDeviceAddress<T>(inputs, 9);
auto pn = GetDeviceAddress<T>(inputs, 10);
auto frc_f = GetDeviceAddress<T>(outputs, 0);
auto ene = GetDeviceAddress<T>(outputs, 1);
DihedralForceWithAtomEnergy(dihedral_numbers, uint_crd_f, scaler_f, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc,
gams, pn, frc_f, ene, reinterpret_cast<cudaStream_t>(stream_ptr));
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(ele_uint_crd * sizeof(T1));
input_size_list_.push_back(ele_scaler * sizeof(T));
input_size_list_.push_back(ele_atom_a * sizeof(T1));
input_size_list_.push_back(ele_atom_b * sizeof(T1));
input_size_list_.push_back(ele_atom_c * sizeof(T1));
input_size_list_.push_back(ele_atom_d * sizeof(T1));
input_size_list_.push_back(ele_ipn * sizeof(T1));
input_size_list_.push_back(ele_pk * sizeof(T));
input_size_list_.push_back(ele_gamc * sizeof(T));
input_size_list_.push_back(ele_gams * sizeof(T));
input_size_list_.push_back(ele_pn * sizeof(T));
output_size_list_.push_back(ele_uint_crd * 3 * sizeof(T));
output_size_list_.push_back(ele_uint_crd * sizeof(T));
}
private:
size_t ele_uint_crd = 1;
size_t ele_scaler = 1;
size_t ele_atom_a = 1;
size_t ele_atom_b = 1;
size_t ele_atom_c = 1;
size_t ele_atom_d = 1;
size_t ele_ipn = 1;
size_t ele_pk = 1;
size_t ele_gamc = 1;
size_t ele_gams = 1;
size_t ele_pn = 1;
std::vector<size_t> input_size_list_;
std::vector<size_t> output_size_list_;
std::vector<size_t> workspace_size_list_;
int dihedral_numbers;
};
} // namespace kernel
} // namespace mindspore
#endif

+ 16
- 0
mindspore/ops/operations/__init__.py View File

@@ -99,6 +99,9 @@ from ._embedding_cache_ops import (CacheSwapHashmap, SearchCacheIdx, CacheSwapTa
SubAndFilter,
MapUniform, DynamicAssign, PadAndShift)
from .quantum_ops import PQC
from .sponge_ops import (BondForce, BondEnergy, BondAtomEnergy, BondForceWithAtomEnergy, BondForceWithAtomVirial,
DihedralForce, DihedralEnergy, DihedralAtomEnergy, DihedralForceWithAtomEnergy,
AngleForce, AngleEnergy, AngleAtomEnergy, AngleForceWithAtomEnergy)

__all__ = [
'Unique',
@@ -421,6 +424,19 @@ __all__ = [
"Range",
"IndexAdd",
"PQC",
"BondForce",
"BondEnergy",
"BondAtomEnergy",
"BondForceWithAtomEnergy",
"BondForceWithAtomVirial",
"DihedralForce",
"DihedralEnergy",
"DihedralAtomEnergy",
"DihedralForceWithAtomEnergy",
"AngleForce",
"AngleEnergy",
"AngleAtomEnergy",
"AngleForceWithAtomEnergy",
]

__all__.sort()

+ 431
- 0
mindspore/ops/operations/sponge_ops.py View File

@@ -0,0 +1,431 @@
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Operators for sponge."""
from ..primitive import PrimitiveWithInfer, prim_attr_register
from ..._checkparam import Validator as validator
from ...common import dtype as mstype
class BondForce(PrimitiveWithInfer):
"""
BondForce:
Calculate the force exerted by the simple harmonic bond on the
corresponding atoms. Assume the number of harmonic bonds is M and
the number of atoms is N.
Inputs:
- **uint_crd_f** (Tensor, uint32 ) - [N, 3], the unsigned int coordinate
value of each atom.
- **scaler_f** (Tensor, float32) - [3, 1], the 3-D scale factor (x, y, z),
between the real space float coordinates and the unsigned int coordinates.
- **atom_a** (Tensor, int32) - [M, 1], the first atom index of each bond.
- **atom_b** (Tensor, int32) - [M, 1], the second atom index of each bond.
- **bond_k** (Tensor, float32) - [M, 1], the force constant of each bond.
- **bond_r0** (Tensor, float32) - [M, 1], the equlibrium length of each bond.
Outputs:
- **frc_f** (float32 Tensor) - [N, 3], the force felt by each atom.
Supported Platforms:
``GPU``
Examples:
"""
@prim_attr_register
def __init__(self, bond_numbers):
self.bond_numbers = bond_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'],
outputs=['frc_f'])
self.add_prim_attr('bond_numbers', self.bond_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('bond_k_type', bond_k_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('bond_r0_type', bond_r0_type, [mstype.float32], self.name)
return bond_r0_type
class BondEnergy(PrimitiveWithInfer):
"""
BondEnergyCuda:
Calculate the harmonic potential energy between each bonded atom pair.
Assume our system has N atoms and M harmonic bonds.
Inputs:
Same as operator BondForce().
Outputs:
- **bond_ene** (Tensor, float32) - [M, 1], The harmonic potential energy
for each bond.
Supported Platforms:
``GPU``
Examples:
"""
@prim_attr_register
def __init__(self, bond_numbers):
self.bond_numbers = bond_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'],
outputs=['bond_ene'])
self.add_prim_attr('bond_numbers', self.bond_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('bond_k_type', bond_k_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('bond_r0_type', bond_r0_type, [mstype.float32], self.name)
return bond_r0_type
class BondAtomEnergy(PrimitiveWithInfer):
"""
BondAtomEnergyCuda:
Add the potential energy caused by simple harmonic bonds to the total
potential energy of each atom.
Inputs:
Same as operator BondForce().
Outputs:
- **atom_ene** (Tensor, float32) - [N, 1], he accumulated potential
energy for each atom.
Supported Platforms:
``GPU``
Examples:
"""
@prim_attr_register
def __init__(self, bond_numbers):
self.bond_numbers = bond_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'],
outputs=['atom_ene'])
self.add_prim_attr('bond_numbers', self.bond_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('bond_k_type', bond_k_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('bond_r0_type', bond_r0_type, [mstype.float32], self.name)
return bond_r0_type
class BondForceWithAtomEnergy(PrimitiveWithInfer):
"""
BondForceWithAtomEnergy:
Calculate bond force and harmonic potential energy together.
Inputs:
Same as operator BondForce().
Outputs:
- **frc_f** (Tensor, float32) - [N, 3], Same as operator BondForce().
- **atom_e** (Tensor, float32) - [N, 1], Same as atom_ene in operator BondAtomEnergy().
Supported Platforms:
``GPU``
Examples:
"""
@prim_attr_register
def __init__(self, bond_numbers):
self.bond_numbers = bond_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'],
outputs=['frc_f', 'atom_e'])
self.add_prim_attr('bond_numbers', self.bond_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('bond_k_type', bond_k_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('bond_r0_type', bond_r0_type, [mstype.float32], self.name)
return bond_r0_type, bond_r0_type
class BondForceWithAtomVirial(PrimitiveWithInfer):
"""
BondForceWithAtomVirial:
Calculate bond force and the virial coefficient caused by simple harmonic
bond for each atom together.
Inputs:
Same as operator BondForce()
Outputs:
- **frc_f** (Tensor, float32) - [N, 3], Same as operator BondForce().
- **atom_v** (Tensor, float32) - [N, 1],The accumulated virial coefficient
for each atom.
Supported Platforms:
``GPU``
Examples:
"""
@prim_attr_register
def __init__(self, bond_numbers):
self.bond_numbers = bond_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'bond_k', 'bond_r0'],
outputs=['frc_f', 'atom_v'])
self.add_prim_attr('bond_numbers', self.bond_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, bond_k_type, bond_r0_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('bond_k_type', bond_k_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('bond_r0_type', bond_r0_type, [mstype.float32], self.name)
return bond_r0_type, bond_r0_type
class DihedralForce(PrimitiveWithInfer):
"""
DihedralForce:
"""
@prim_attr_register
def __init__(self, dihedral_numbers):
self.dihedral_numbers = dihedral_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'atom_d', 'ipn', 'pk',
'gamc', 'gams', 'pn'],
outputs=['frc_f'])
self.add_prim_attr('dihedral_numbers', self.dihedral_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, atom_d_type,
ipn_type, pk_type, gamc_type, gams_type, pn_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_c_type', atom_c_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_d_type', atom_d_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('ipn_type', ipn_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('pk_type', pk_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('gamc_type', gamc_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('gams_type', gams_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('pn_type', pn_type, [mstype.float32], self.name)
return pn_type
class DihedralEnergy(PrimitiveWithInfer):
"""
DihedralEnergy:
"""
@prim_attr_register
def __init__(self, dihedral_numbers):
self.dihedral_numbers = dihedral_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'atom_d', 'ipn', 'pk',
'gamc', 'gams', 'pn'],
outputs=['ene'])
self.add_prim_attr('dihedral_numbers', self.dihedral_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, atom_d_type,
ipn_type, pk_type, gamc_type, gams_type, pn_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_c_type', atom_c_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_d_type', atom_d_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('ipn_type', ipn_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('pk_type', pk_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('gamc_type', gamc_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('gams_type', gams_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('pn_type', pn_type, [mstype.float32], self.name)
return pn_type
class DihedralAtomEnergy(PrimitiveWithInfer):
"""
DihedralAtomEnergy:
"""
@prim_attr_register
def __init__(self, dihedral_numbers):
self.dihedral_numbers = dihedral_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'atom_d', 'ipn', 'pk',
'gamc', 'gams', 'pn'],
outputs=['ene'])
self.add_prim_attr('dihedral_numbers', self.dihedral_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, atom_d_type,
ipn_type, pk_type, gamc_type, gams_type, pn_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_c_type', atom_c_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_d_type', atom_d_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('ipn_type', ipn_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('pk_type', pk_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('gamc_type', gamc_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('gams_type', gams_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('pn_type', pn_type, [mstype.float32], self.name)
return pn_type
class DihedralForceWithAtomEnergy(PrimitiveWithInfer):
"""
DihedralForceWithAtomEnergy:
"""
@prim_attr_register
def __init__(self, dihedral_numbers):
self.dihedral_numbers = dihedral_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'atom_d', 'ipn', 'pk',
'gamc', 'gams', 'pn'],
outputs=['frc_f', 'ene'])
self.add_prim_attr('dihedral_numbers', self.dihedral_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, atom_d_type,
ipn_type, pk_type, gamc_type, gams_type, pn_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_c_type', atom_c_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_d_type', atom_d_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('ipn_type', ipn_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('pk_type', pk_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('gamc_type', gamc_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('gams_type', gams_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('pn_type', pn_type, [mstype.float32], self.name)
return pn_type, pn_type
class AngleForce(PrimitiveWithInfer):
"""
AngleForce:
"""
@prim_attr_register
def __init__(self, angle_numbers):
self.angle_numbers = angle_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'angle_k',
'angle_theta0'],
outputs=['frc_f'])
self.add_prim_attr('angle_numbers', self.angle_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, angle_k_type,
angle_theta0_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_c_type', atom_c_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('angle_k_type', angle_k_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('angle_theta0_type', angle_theta0_type, [mstype.float32], self.name)
return angle_k_type
class AngleEnergy(PrimitiveWithInfer):
"""
AngleEnergy:
"""
@prim_attr_register
def __init__(self, angle_numbers):
self.angle_numbers = angle_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'angle_k',
'angle_theta0'],
outputs=['ene'])
self.add_prim_attr('angle_numbers', self.angle_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, angle_k_type,
angle_theta0_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_c_type', atom_c_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('angle_k_type', angle_k_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('angle_theta0_type', angle_theta0_type, [mstype.float32], self.name)
return angle_k_type
class AngleAtomEnergy(PrimitiveWithInfer):
"""
AngleAtomEnergy:
"""
@prim_attr_register
def __init__(self, angle_numbers):
self.angle_numbers = angle_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'angle_k',
'angle_theta0'],
outputs=['ene'])
self.add_prim_attr('angle_numbers', self.angle_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, angle_k_type,
angle_theta0_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_c_type', atom_c_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('angle_k_type', angle_k_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('angle_theta0_type', angle_theta0_type, [mstype.float32], self.name)
return angle_k_type
class AngleForceWithAtomEnergy(PrimitiveWithInfer):
"""
AngleForceWithAtomEnergy:
"""
@prim_attr_register
def __init__(self, angle_numbers):
self.angle_numbers = angle_numbers
self.init_prim_io_names(inputs=['uint_crd_f', 'scaler_f', 'atom_a', 'atom_b', 'atom_c', 'angle_k',
'angle_theta0'],
outputs=['frc_f', 'ene'])
self.add_prim_attr('angle_numbers', self.angle_numbers)
def infer_dtype(self, uint_crd_f_dtype, scaler_f_type, atom_a_type, atom_b_type, atom_c_type, angle_k_type,
angle_theta0_type):
validator.check_tensor_dtype_valid('uint_crd_f_dtype', uint_crd_f_dtype, [mstype.uint32], self.name)
validator.check_tensor_dtype_valid('scaler_f_type', scaler_f_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('atom_a_type', atom_a_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_b_type', atom_b_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('atom_c_type', atom_c_type, [mstype.int32], self.name)
validator.check_tensor_dtype_valid('angle_k_type', angle_k_type, [mstype.float32], self.name)
validator.check_tensor_dtype_valid('angle_theta0_type', angle_theta0_type, [mstype.float32], self.name)
return angle_k_type, angle_k_type

Loading…
Cancel
Save