You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

finemapping.py 5.0 kB

8 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. #coding=utf-8
  2. import cv2
  3. import numpy as np
  4. from skimage.filters import (threshold_otsu, threshold_niblack,
  5. threshold_sauvola)
  6. import niblack_thresholding as nt
  7. import deskew
  8. def fitLine_ransac(pts,zero_add = 0 ):
  9. if len(pts)>=2:
  10. [vx, vy, x, y] = cv2.fitLine(pts, cv2.DIST_HUBER, 0, 0.01, 0.01)
  11. lefty = int((-x * vy / vx) + y)
  12. righty = int(((136- x) * vy / vx) + y)
  13. return lefty+30+zero_add,righty+30+zero_add
  14. return 0,0
  15. #精定位算法
  16. def findContoursAndDrawBoundingBox(image_rgb):
  17. line_upper = [];
  18. line_lower = [];
  19. line_experiment = []
  20. grouped_rects = []
  21. gray_image = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY)
  22. # for k in np.linspace(-1.5, -0.2,10):
  23. for k in np.linspace(-50, 0, 15):
  24. # thresh_niblack = threshold_niblack(gray_image, window_size=21, k=k)
  25. # binary_niblack = gray_image > thresh_niblack
  26. # binary_niblack = binary_niblack.astype(np.uint8) * 255
  27. binary_niblack = cv2.adaptiveThreshold(gray_image,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,17,k)
  28. # cv2.imshow("image1",binary_niblack)
  29. # cv2.waitKey(0)
  30. imagex, contours, hierarchy = cv2.findContours(binary_niblack.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
  31. for contour in contours:
  32. bdbox = cv2.boundingRect(contour)
  33. if (bdbox[3]/float(bdbox[2])>0.7 and bdbox[3]*bdbox[2]>100 and bdbox[3]*bdbox[2]<1200) or (bdbox[3]/float(bdbox[2])>3 and bdbox[3]*bdbox[2]<100):
  34. # cv2.rectangle(rgb,(bdbox[0],bdbox[1]),(bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]),(255,0,0),1)
  35. line_upper.append([bdbox[0],bdbox[1]])
  36. line_lower.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]])
  37. line_experiment.append([bdbox[0],bdbox[1]])
  38. line_experiment.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]])
  39. # grouped_rects.append(bdbox)
  40. rgb = cv2.copyMakeBorder(image_rgb,30,30,0,0,cv2.BORDER_REPLICATE)
  41. leftyA, rightyA = fitLine_ransac(np.array(line_lower),3)
  42. rows,cols = rgb.shape[:2]
  43. # rgb = cv2.line(rgb, (cols - 1, rightyA), (0, leftyA), (0, 0, 255), 1,cv2.LINE_AA)
  44. leftyB, rightyB = fitLine_ransac(np.array(line_upper),-3)
  45. rows,cols = rgb.shape[:2]
  46. # rgb = cv2.line(rgb, (cols - 1, rightyB), (0, leftyB), (0,255, 0), 1,cv2.LINE_AA)
  47. pts_map1 = np.float32([[cols - 1, rightyA], [0, leftyA],[cols - 1, rightyB], [0, leftyB]])
  48. pts_map2 = np.float32([[136,36],[0,36],[136,0],[0,0]])
  49. mat = cv2.getPerspectiveTransform(pts_map1,pts_map2)
  50. image = cv2.warpPerspective(rgb,mat,(136,36),flags=cv2.INTER_CUBIC)
  51. image = deskew.fastDeskew(image)
  52. return image
  53. #多级
  54. def findContoursAndDrawBoundingBox2(image_rgb):
  55. line_upper = [];
  56. line_lower = [];
  57. line_experiment = []
  58. grouped_rects = []
  59. gray_image = cv2.cvtColor(image_rgb,cv2.COLOR_BGR2GRAY)
  60. for k in np.linspace(-1.6, -0.2,10):
  61. # for k in np.linspace(-15, 0, 15):
  62. # #
  63. # thresh_niblack = threshold_niblack(gray_image, window_size=21, k=k)
  64. # binary_niblack = gray_image > thresh_niblack
  65. # binary_niblack = binary_niblack.astype(np.uint8) * 255
  66. binary_niblack = nt.niBlackThreshold(gray_image,19,k)
  67. # cv2.imshow("binary_niblack_opencv",binary_niblack_)
  68. # cv2.imshow("binary_niblack_skimage", binary_niblack)
  69. # cv2.waitKey(0)
  70. imagex, contours, hierarchy = cv2.findContours(binary_niblack.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
  71. for contour in contours:
  72. bdbox = cv2.boundingRect(contour)
  73. if (bdbox[3]/float(bdbox[2])>0.7 and bdbox[3]*bdbox[2]>100 and bdbox[3]*bdbox[2]<1000) or (bdbox[3]/float(bdbox[2])>3 and bdbox[3]*bdbox[2]<100):
  74. # cv2.rectangle(rgb,(bdbox[0],bdbox[1]),(bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]),(255,0,0),1)
  75. line_upper.append([bdbox[0],bdbox[1]])
  76. line_lower.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]])
  77. line_experiment.append([bdbox[0],bdbox[1]])
  78. line_experiment.append([bdbox[0]+bdbox[2],bdbox[1]+bdbox[3]])
  79. # grouped_rects.append(bdbox)
  80. rgb = cv2.copyMakeBorder(image_rgb,30,30,0,0,cv2.BORDER_REPLICATE)
  81. leftyA, rightyA = fitLine_ransac(np.array(line_lower),2)
  82. rows,cols = rgb.shape[:2]
  83. # rgb = cv2.line(rgb, (cols - 1, rightyA), (0, leftyA), (0, 0, 255), 1,cv2.LINE_AA)
  84. leftyB, rightyB = fitLine_ransac(np.array(line_upper),-4)
  85. rows,cols = rgb.shape[:2]
  86. # rgb = cv2.line(rgb, (cols - 1, rightyB), (0, leftyB), (0,255, 0), 1,cv2.LINE_AA)
  87. pts_map1 = np.float32([[cols - 1, rightyA], [0, leftyA],[cols - 1, rightyB], [0, leftyB]])
  88. pts_map2 = np.float32([[136,36],[0,36],[136,0],[0,0]])
  89. mat = cv2.getPerspectiveTransform(pts_map1,pts_map2)
  90. image = cv2.warpPerspective(rgb,mat,(136,36),flags=cv2.INTER_CUBIC)
  91. image = deskew.fastDeskew(image)
  92. return image

高性能开源中文车牌识别框架