You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

AvgPool.py 6.5 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. from __future__ import absolute_import
  2. import numpy as np
  3. from .Node import Op
  4. from .._base import DNNL_LIB
  5. from ..cpu_links import avg_pool as cpu_avg_pool
  6. from ..gpu_links import CuDNN_average_pooling2d
  7. from ..cpu_links import avg_pool_gradient as cpu_avg_pool_gradient
  8. from ..gpu_links import CuDNN_average_pooling2d_gradient
  9. class Avg_Pool2dOp(Op):
  10. def __init__(self, node_A, kernel_H, kernel_W, padding, stride, ctx=None):
  11. super().__init__(Avg_Pool2dOp, [node_A], ctx)
  12. self.padding = padding
  13. self.stride = stride
  14. self.kernel_H = kernel_H
  15. self.kernel_W = kernel_W
  16. def np_average_pooling(self, input, kernel_H, kernel_W, padding=0, stride=1):
  17. N, C, H, W = input.shape
  18. assert((H + 2 * padding - kernel_H) % stride == 0)
  19. assert((W + 2 * padding - kernel_W) % stride == 0)
  20. pooled_H = (H + 2 * padding - kernel_H) // stride + 1
  21. pooled_W = (W + 2 * padding - kernel_W) // stride + 1
  22. pooled_layer = np.zeros(
  23. shape=(N, C, pooled_H, pooled_W), dtype=np.float32)
  24. pooling_size = kernel_H * kernel_W
  25. for n in range(N):
  26. for c in range(C):
  27. for h in range(pooled_H):
  28. for w in range(pooled_W):
  29. hs = h * stride - padding
  30. ws = w * stride - padding
  31. hend = min(hs + kernel_H, H)
  32. wend = min(ws + kernel_W, W)
  33. hs = max(hs, 0)
  34. ws = max(ws, 0)
  35. for i in range(hs, hend):
  36. for j in range(ws, wend):
  37. pooled_layer[n][c][h][w] += input[n][c][i][j]
  38. pooled_layer[n][c][h][w] /= pooling_size
  39. return pooled_layer
  40. def compute(self, input_vals, output_val, stream_handle=None):
  41. if self.on_cpu:
  42. if DNNL_LIB['DnnlAvgPool']:
  43. cpu_avg_pool(
  44. input_vals[0], self.kernel_H, self.kernel_W, output_val, self.padding, self.stride)
  45. else:
  46. output_val[:] = self.np_average_pooling(
  47. input_vals[0].asnumpy(), self.kernel_H, self.kernel_W, self.padding, self.stride)
  48. else:
  49. CuDNN_average_pooling2d(
  50. input_vals[0], self.kernel_H, self.kernel_W, output_val, self.padding, self.stride, stream_handle)
  51. def gradient(self, output_grad):
  52. return [avg_pool2d_gradient_op(self, output_grad, self.inputs[0], self.kernel_H, self.kernel_W, self.padding, self.stride, ctx=self.raw_ctx)]
  53. def infer_shape(self, input_shapes):
  54. """Need to handle input_vals[0].shape != input_vals[1].shape"""
  55. assert len(input_shapes) == 1
  56. N, C, H, W = input_shapes[0]
  57. p_H = (H + 2 * self.padding - self.kernel_H) // self.stride + 1
  58. p_W = (W + 2 * self.padding - self.kernel_W) // self.stride + 1
  59. return (N, C, p_H, p_W)
  60. class Avg_Pool2d_GradientOp(Op):
  61. def __init__(self, node_out, node_out_gradient, node_in, kernel_H, kernel_W, padding, stride, ctx=None):
  62. super().__init__(Avg_Pool2d_GradientOp, [
  63. node_out, node_out_gradient, node_in], ctx)
  64. self.padding = padding
  65. self.stride = stride
  66. self.kernel_H = kernel_H
  67. self.kernel_W = kernel_W
  68. def np_average_pooling_gradient(self, gradient_y, kernel_H, kernel_W, padding=0, stride=1):
  69. N, C, pooled_H, pooled_W = gradient_y.shape
  70. H = (pooled_H - 1) * stride + kernel_H - 2 * padding
  71. W = (pooled_W - 1) * stride + kernel_W - 2 * padding
  72. gradient_x = np.zeros(shape=(N, C, H, W), dtype=np.float32)
  73. pooling_size = kernel_H * kernel_W
  74. for n in range(N):
  75. for c in range(C):
  76. for h in range(pooled_H):
  77. for w in range(pooled_W):
  78. hs = h * stride - padding
  79. ws = w * stride - padding
  80. hend = min(hs + kernel_H, H)
  81. wend = min(ws + kernel_W, W)
  82. hs = max(hs, 0)
  83. ws = max(ws, 0)
  84. for i in range(hs, hend):
  85. for j in range(ws, wend):
  86. gradient_x[n][c][i][j] += gradient_y[n][c][h][w] / \
  87. pooling_size
  88. return gradient_x
  89. def compute(self, input_vals, output_val, stream_handle=None):
  90. if self.on_cpu:
  91. if DNNL_LIB['DnnlAvgPool_Gradient']:
  92. cpu_avg_pool_gradient(
  93. input_vals[1], self.kernel_H, self.kernel_W, output_val, self.padding, self.stride)
  94. else:
  95. output_val[:] = self.np_average_pooling_gradient(
  96. input_vals[1].asnumpy(), self.kernel_H, self.kernel_W, self.padding, self.stride)
  97. else:
  98. CuDNN_average_pooling2d_gradient(
  99. input_vals[0], input_vals[1], input_vals[2], self.kernel_H, self.kernel_W, output_val, self.padding, self.stride, stream_handle)
  100. def gradient(self, output_grad):
  101. raise NotImplementedError
  102. def infer_shape(self, input_shapes):
  103. assert len(input_shapes) == 3
  104. return input_shapes[2]
  105. def avg_pool2d_op(node_A, kernel_H, kernel_W, padding, stride, ctx=None):
  106. """Average pooling node.
  107. Parameters:
  108. ----
  109. node_A : Node
  110. Input node.
  111. kernel_H :
  112. Kernel height.
  113. kernel_W :
  114. Kernel width.
  115. padding :
  116. Padding size.
  117. stride :
  118. Stride size.
  119. Returns:
  120. ----
  121. A new Node instance created by Op.
  122. """
  123. return Avg_Pool2dOp(node_A, kernel_H, kernel_W, padding, stride, ctx=ctx)
  124. def avg_pool2d_gradient_op(node_out, node_out_gradient, node_in, kernel_H, kernel_W, padding, stride, ctx=None):
  125. """Gradient node of average pooling.
  126. Parameters:
  127. ----
  128. node_out : Node
  129. Output node of average pooling.
  130. node_out_gradient : Node
  131. Previous gradient node.
  132. node_in : Node
  133. Input node of average pooling.
  134. kernel_H :
  135. Kernel height.
  136. kernel_W :
  137. Kernel width.
  138. padding :
  139. Padding size.
  140. stride :
  141. Stride size.
  142. Returns:
  143. ----
  144. A new Node instance created by Op.
  145. """
  146. return Avg_Pool2d_GradientOp(node_out, node_out_gradient, node_in, kernel_H, kernel_W, padding, stride, ctx=ctx)