You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

MaxPool.py 6.8 kB

4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. from __future__ import absolute_import
  2. import numpy as np
  3. from .Node import Op
  4. from .._base import DNNL_LIB
  5. from ..cpu_links import max_pool as cpu_max_pooling
  6. from ..cpu_links import max_pool_gradient as cpu_max_pooling_gradient
  7. from ..gpu_links import CuDNN_max_pooling2d
  8. from ..gpu_links import CuDNN_max_pooling2d_gradient
  9. def np_max_pooling(input, kernel_H, kernel_W, padding=0, stride=1):
  10. N, C, H, W = input.shape
  11. assert((H + 2 * padding - kernel_H) % stride == 0)
  12. assert((W + 2 * padding - kernel_W) % stride == 0)
  13. pooled_H = (H + 2 * padding - kernel_H) // stride + 1
  14. pooled_W = (W + 2 * padding - kernel_W) // stride + 1
  15. pooled_layer = np.zeros(shape=(N, C, pooled_H, pooled_W), dtype=np.float32)
  16. pooling_size = kernel_H * kernel_W
  17. for n in range(N):
  18. for c in range(C):
  19. for h in range(pooled_H):
  20. for w in range(pooled_W):
  21. hs = h * stride - padding
  22. ws = w * stride - padding
  23. hend = min(hs + kernel_H, H)
  24. wend = min(ws + kernel_W, W)
  25. hs = max(hs, 0)
  26. ws = max(ws, 0)
  27. hargmax = hs
  28. wargmax = ws
  29. for i in range(hs, hend):
  30. for j in range(ws, wend):
  31. if input[n][c][i][j] > input[n][c][hargmax][wargmax]:
  32. hargmax = i
  33. wargmax = j
  34. pooled_layer[n][c][h][w] = input[n][c][hargmax][wargmax]
  35. return pooled_layer
  36. def np_max_pooling_gradient(input, gradient_y, kernel_H, kernel_W, padding=0, stride=1):
  37. N, C, pooled_H, pooled_W = gradient_y.shape
  38. H = (pooled_H - 1) * stride + kernel_H - 2 * padding
  39. W = (pooled_W - 1) * stride + kernel_W - 2 * padding
  40. gradient_x = np.zeros(shape=(N, C, H, W), dtype=np.float32)
  41. pooling_size = kernel_H * kernel_W
  42. for n in range(N):
  43. for c in range(C):
  44. for h in range(pooled_H):
  45. for w in range(pooled_W):
  46. hs = h * stride - padding
  47. ws = w * stride - padding
  48. hend = min(hs + kernel_H, H)
  49. wend = min(ws + kernel_W, W)
  50. hs = max(hs, 0)
  51. ws = max(ws, 0)
  52. hargmax = hs
  53. wargmax = ws
  54. for i in range(hs, hend):
  55. for j in range(ws, wend):
  56. if input[n][c][i][j] > input[n][c][hargmax][wargmax]:
  57. hargmax = i
  58. wargmax = j
  59. gradient_x[n][c][hargmax][wargmax] += gradient_y[n][c][h][w]
  60. return gradient_x
  61. class Max_Pool2dOp(Op):
  62. def __init__(self, node_A, kernel_H, kernel_W, padding, stride, ctx=None):
  63. super().__init__(Max_Pool2dOp, [node_A], ctx)
  64. self.padding = padding
  65. self.stride = stride
  66. self.kernel_H = kernel_H
  67. self.kernel_W = kernel_W
  68. def compute(self, input_vals, output_val, stream_handle=None):
  69. if self.on_cpu:
  70. if DNNL_LIB['DnnlMaxPool']:
  71. cpu_max_pooling(
  72. input_vals[0], self.kernel_H, self.kernel_W, output_val, self.padding, self.stride)
  73. else:
  74. output_val[:] = np_max_pooling(input_vals[0].asnumpy(
  75. ), self.kernel_H, self.kernel_W, self.padding, self.stride)
  76. else:
  77. CuDNN_max_pooling2d(
  78. input_vals[0], self.kernel_H, self.kernel_W, output_val, self.padding, self.stride, stream_handle)
  79. def gradient(self, output_grad):
  80. return [max_pool2d_gradient_op(self, output_grad, self.inputs[0], self.kernel_H, self.kernel_W, self.padding, self.stride, ctx=self.raw_ctx)]
  81. def infer_shape(self, input_shapes):
  82. """Need to handle input_vals[0].shape != input_vals[1].shape"""
  83. assert len(input_shapes) == 1
  84. N, C, H, W = input_shapes[0]
  85. p_H = (H + 2 * self.padding - self.kernel_H) // self.stride + 1
  86. p_W = (W + 2 * self.padding - self.kernel_W) // self.stride + 1
  87. return (N, C, p_H, p_W)
  88. class Max_Pool2d_GradientOp(Op):
  89. def __init__(self, node_out, node_out_gradient, node_in, kernel_H, kernel_W, padding, stride, ctx=None):
  90. super().__init__(Max_Pool2d_GradientOp, [
  91. node_out, node_out_gradient, node_in], ctx)
  92. self.padding = padding
  93. self.stride = stride
  94. self.kernel_H = kernel_H
  95. self.kernel_W = kernel_W
  96. def compute(self, input_vals, output_val, stream_handle=None):
  97. if self.on_cpu:
  98. if DNNL_LIB['DnnlMaxPool_Gradient']:
  99. cpu_max_pooling_gradient(
  100. input_vals[2], input_vals[1], self.kernel_H, self.kernel_W, output_val, self.padding, self.stride)
  101. else:
  102. output_val[:] = np_max_pooling_gradient(input_vals[2].asnumpy(
  103. ), input_vals[1].asnumpy(), self.kernel_H, self.kernel_W, self.padding, self.stride)
  104. else:
  105. CuDNN_max_pooling2d_gradient(
  106. input_vals[0], input_vals[1], input_vals[2], self.kernel_H, self.kernel_W, output_val, self.padding, self.stride, stream_handle)
  107. def gradient(self, output_grad):
  108. raise NotImplementedError
  109. def infer_shape(self, input_shapes):
  110. assert len(input_shapes) == 3
  111. return input_shapes[2]
  112. def max_pool2d_op(node_A, kernel_H, kernel_W, padding, stride, ctx=None):
  113. """Make a new instance of Max_Pool2dOp and call the instance.
  114. Parameters:
  115. ----
  116. node_A : Node
  117. Input Node
  118. kernel_H : scalar value
  119. Size of pool(height)
  120. kernel_W : scalar value
  121. Size of pool(width)
  122. padding : scalar value
  123. Padding edge
  124. stride : scalar value
  125. Step Length of the kernel
  126. Returns:
  127. ----
  128. A new Node instance created by Op.
  129. """
  130. return Max_Pool2dOp(node_A, kernel_H, kernel_W, padding, stride, ctx=ctx)
  131. def max_pool2d_gradient_op(node_out, node_out_gradient, node_in, kernel_H, kernel_W, padding, stride, ctx=None):
  132. """Make a new instance of Max_Pool2d_GradientOp and call the instance.
  133. Parameters:
  134. ----
  135. node_out : Node
  136. Output Node
  137. node_out_gradient : Node
  138. Gradient array
  139. node_in : Node
  140. Input Node
  141. kernel_H : scalar value
  142. Size of pool(height)
  143. kernel_W : scalar value
  144. Size of pool(width)
  145. padding : scalar value
  146. Padding edge
  147. stride : scalar value
  148. Step Length of the kernel
  149. Returns:
  150. ----
  151. A new Node instance created by Op.
  152. """
  153. return Max_Pool2d_GradientOp(node_out, node_out_gradient, node_in, kernel_H, kernel_W, padding, stride, ctx=ctx)