问题
i have converted sudoku image into sudoku grid using opencv
now i want to extract each box from image what is best way to do this?
as per my knowledge i am trying to find intersection points of lines to find corner of each box
class SudokuSolverPlay:
def __init__(self, image):
def __preProcess(self, img):
"""return grayscale image"""
def __maskSudoku(self, img):
"""return masked image"""
def __dectactEdge(self, img):
"""return sudoku grid"""
def drawLines(src, dest, iteration=1):
minLineLength = 100
src = cv2.convertScaleAbs(src)
for _ in range(iteration):
lines = cv2.HoughLinesP(image=src, rho=1, theta=np.pi / 180,
threshold=100, lines=np.array([]),
minLineLength=minLineLength, maxLineGap=100)
a, b, c = lines.shape
for i in range(a):
x1, y1, x2, y2 = lines[i][0][0], lines[i][0][1], lines[i][0][2], lines[i][0][3]
cv2.line(dest, (x1, y1), (x2, y2),255, 1, cv2.LINE_AA)
src = cv2.convertScaleAbs(dest)
def findVerticalLines(img):
imgX = cv2.GaussianBlur(img, (5, 5), 0)
kernelx = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 10))
imgY = cv2.Sobel(img, cv2.CV_64F, 1, 0)
imgY = cv2.convertScaleAbs(imgY)
cv2.normalize(imgY, imgY, 0, 255, cv2.NORM_MINMAX)
imgY = cv2.morphologyEx(imgY, cv2.MORPH_CLOSE, kernelx, iterations=1)
return imgY
def findHorizontalLines(img):
"""same as above only args different"""
img1 = np.zeros(img.shape)
edges = cv2.Canny(img, 50, 150, apertureSize=3)
laplacian = cv2.Laplacian(edges, cv2.CV_64F)
drawLines(laplacian, img1, iteration=1)
sby = findVerticalLines(img1)
sbx = findHorizontalLines(img1)
return img1
def solveSudoku(self):
gray = self.__preProcess(self.__originalImg)
masked = self.__maskSudoku(gray)
grid = self.__dectactGrid(masked)
if __name__ == '__main__':
colorImg = cv2.imread('sudoku1.jpg')
solver = SudokuSolverPlay(colorImg)
solver.solveSudoku()
here findVerticalLines()
and findHorizontalLines()
are not able to dictect horizontal and vertical lines properly
- original image
- masked image
- Canny edge dictation
- hough line transform
- horizontal lines
- Vertical Lines
回答1:
One way to solve is to do a morphological operation to find vertical and horizontal lines from the canny edge image, then do a connected component analysis to find the boxes. I have done a sample version below. You can finetune it further to make it better. I started with the masked image as input.
### reading input image
gray_scale=cv2.imread('masked_image.jpg',0)
Performing canny edge detection and adding a dilation layer
img_bin = cv2.Canny(gray_scale,50,110)
dil_kernel = np.ones((3,3), np.uint8)
img_bin=cv2.dilate(img_bin,dil_kernel,iterations=1)
Now, the dilated binary image looks like this.
assuming minimum box size would be 20*20
line_min_width = 20
finding horizontal lines
kernal_h = np.ones((1,line_min_width), np.uint8)
img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernal_h)
finding vertical lines
kernal_v = np.ones((line_min_width,1), np.uint8)
img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernal_v)
merging and adding a dilation layer to close small gaps
img_bin_final=img_bin_h|img_bin_v
final_kernel = np.ones((3,3), np.uint8)
img_bin_final=cv2.dilate(img_bin_final,final_kernel,iterations=1)
applying connected component analysis
ret, labels, stats,centroids = cv2.connectedComponentsWithStats(~img_bin_final, connectivity=8, ltype=cv2.CV_32S)
visualising Connected component image
AS you can see, we have detected some text also as boxes, we can easily remove them with simple filter conditions, Here I'm filtering with the area should be minimum 1000 pixels condition.
drawing rectangles on the detected boxes.
### 1 and 0 and the background and residue connected components whihc we do not require
for x,y,w,h,area in stats[2:]:
# cv2.putText(image,'box',(x-10,y-10),cv2.FONT_HERSHEY_SIMPLEX, 1.0,(0,255,0), 2)
if area>1000:
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
final output image
This answer is based on my solution to find checkboxes/tables in an image using OpenCV. You can find a detailed explanation in my blog at Towards Data Science. Hope this will take you closer to a solution.
Happy coding :)
-- edit 1
code to do connected component visualisation
def imshow_components(labels):
### creating a hsv image, with a unique hue value for each label
label_hue = np.uint8(179*labels/np.max(labels))
### making saturation and volume to be 255
empty_channel = 255*np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, empty_channel, empty_channel])
### converting the hsv image to BGR image
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
labeled_img[label_hue==0] = 0
### returning the color image for visualising Connected Componenets
return labeled_img
来源:https://stackoverflow.com/questions/65717860/extract-boxes-from-sudoku-in-opencv