pythonopencvimage-processingcomputer-visionhoughlinesp

Is there a robust way to cleanly detect lines in Dots And Boxes game with opencv?


I'm currently developing an OpenCV method that can extract the last placed moves on a game of Dots And Boxes. The most straight forward method in my eyes was to detect the lines normally and make sure it doesn't have excess lines. For this I have used the HoughBundler class proposed here.

This works for simple input images like this one:

this one

or this one

this one

However when I make the lines more complicated like this:

like this

it makes the wrong connections:

Here

and

here

the lines are detected and they are counted correctly; 4 and 5 lines.

However this is the output of the more complicated input:

this

As you can see the lines are combined so that it connects points that shouldn't be connected. It has left me puzzled as to why this is. Following is my code.

import cv2
import numpy as np
import math

MAX_KERNEL_LENGTH = 11
MIN_SIZE = 150
ZOOM_FACTOR = 0.80
src = None

#HoughBundler from stackoverflow:
class HoughBundler:     
    def __init__(self,min_distance=5,min_angle=2):
        self.min_distance = min_distance
        self.min_angle = min_angle
    
    def get_orientation(self, line):
        orientation = math.atan2(abs((line[3] - line[1])), abs((line[2] - line[0])))
        return math.degrees(orientation)

    def check_is_line_different(self, line_1, groups, min_distance_to_merge, min_angle_to_merge):
        for group in groups:
            for line_2 in group:
                if self.get_distance(line_2, line_1) < min_distance_to_merge:
                    orientation_1 = self.get_orientation(line_1)
                    orientation_2 = self.get_orientation(line_2)
                    if abs(orientation_1 - orientation_2) < min_angle_to_merge:
                        group.append(line_1)
                        return False
        return True

    def distance_point_to_line(self, point, line):
        px, py = point
        x1, y1, x2, y2 = line

        def line_magnitude(x1, y1, x2, y2):
            line_magnitude = math.sqrt(math.pow((x2 - x1), 2) + math.pow((y2 - y1), 2))
            return line_magnitude

        lmag = line_magnitude(x1, y1, x2, y2)
        if lmag < 0.00000001:
            distance_point_to_line = 9999
            return distance_point_to_line

        u1 = (((px - x1) * (x2 - x1)) + ((py - y1) * (y2 - y1)))
        u = u1 / (lmag * lmag)

        if (u < 0.00001) or (u > 1):
            #// closest point does not fall within the line segment, take the shorter distance
            #// to an endpoint
            ix = line_magnitude(px, py, x1, y1)
            iy = line_magnitude(px, py, x2, y2)
            if ix > iy:
                distance_point_to_line = iy
            else:
                distance_point_to_line = ix
        else:
            # Intersecting point is on the line, use the formula
            ix = x1 + u * (x2 - x1)
            iy = y1 + u * (y2 - y1)
            distance_point_to_line = line_magnitude(px, py, ix, iy)

        return distance_point_to_line

    def get_distance(self, a_line, b_line):
        dist1 = self.distance_point_to_line(a_line[:2], b_line)
        dist2 = self.distance_point_to_line(a_line[2:], b_line)
        dist3 = self.distance_point_to_line(b_line[:2], a_line)
        dist4 = self.distance_point_to_line(b_line[2:], a_line)

        return min(dist1, dist2, dist3, dist4)

    def merge_lines_into_groups(self, lines):
        groups = []  # all lines groups are here
        # first line will create new group every time
        groups.append([lines[0]])
        # if line is different from existing gropus, create a new group
        for line_new in lines[1:]:
            if self.check_is_line_different(line_new, groups, self.min_distance, self.min_angle):
                groups.append([line_new])

        return groups

    def merge_line_segments(self, lines):
        orientation = self.get_orientation(lines[0])
      
        if(len(lines) == 1):
            return np.block([[lines[0][:2], lines[0][2:]]])

        points = []
        for line in lines:
            points.append(line[:2])
            points.append(line[2:])
        if 45 < orientation <= 90:
            #sort by y
            points = sorted(points, key=lambda point: point[1])
        else:
            #sort by x
            points = sorted(points, key=lambda point: point[0])

        return np.block([[points[0],points[-1]]])

    def process_lines(self, lines):
        lines_horizontal  = []
        lines_vertical  = []
  
        for line_i in [l[0] for l in lines]:
            orientation = self.get_orientation(line_i)
            # if vertical
            if 45 < orientation <= 90:
                lines_vertical.append(line_i)
            else:
                lines_horizontal.append(line_i)

        lines_vertical  = sorted(lines_vertical , key=lambda line: line[1])
        lines_horizontal  = sorted(lines_horizontal , key=lambda line: line[0])
        merged_lines_all = []

        # for each cluster in vertical and horizantal lines leave only one line
        for i in [lines_horizontal, lines_vertical]:
            if len(i) > 0:
                groups = self.merge_lines_into_groups(i)
                merged_lines = []
                for group in groups:
                    merged_lines.append(self.merge_line_segments(group))
                merged_lines_all.extend(merged_lines)
        return np.asarray(merged_lines_all)


#globals:
img = cv2.imread('gridwithmoves.png')
src = img
cpy = img
cv2.imshow('BotsAndBoxes',img)

#finding and zooming to paper:

dst = cv2.medianBlur(img, MAX_KERNEL_LENGTH)
# cv2.imshow(window_name,dst)
gray = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,0,400,apertureSize = 3)
# cv2.imshow("Bounding", edges)
cnts = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
    zoomx,zoomy,zoomw,zoomh = cv2.boundingRect(c)
    if (zoomw >= MIN_SIZE and zoomh >= MIN_SIZE):
        zoomx = int(zoomx/ZOOM_FACTOR)
        zoomy = int(zoomy/ZOOM_FACTOR)
        zoomw = int(zoomw*ZOOM_FACTOR)
        zoomh = int(zoomh*ZOOM_FACTOR)
        img = img[zoomy:zoomy+zoomh, zoomx:zoomx+zoomw]
        break
gray = cv2.cvtColor(cpy[zoomy:zoomy+zoomh, zoomx:zoomx+zoomw], cv2.COLOR_BGR2GRAY)

#detecting dots:

edges = cv2.Canny(gray, 75, 150, 3)
cv2.imshow("linesEdges", edges)
# cv2.imshow("src", src)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
threshed = cv2.adaptiveThreshold(blur, 255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,5,2)
cnts = cv2.findContours(threshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2]
xcnts = []
for cnt in cnts:
    x,y,w,h = cv2.boundingRect(cnt)
    if (0 < cv2.contourArea(cnt) < 15):
        xcnts.append(cnt)
        cv2.rectangle(img, (x, y), (x + w, y + h), (0,0,255), 2)

#detecting lines:

lines = cv2.HoughLinesP(edges, 3, np.pi/360, 35, maxLineGap=4) #changing this makes simpler images not work
count_lines = 0
i = 0
lines_cpy = []
for line in lines:
    x1, y1, x2, y2 = line[0]
    i+=1
    draw = True
    for cnt in cnts:
        x,y,w,h = cv2.boundingRect(cnt)
        if (0 < cv2.contourArea(cnt) < 15):
            if (x-w < (x1 + x2)/2 < x+w and y-h < (y1 + y2)/2 < y+h):
                draw = False
    if (draw is True):
        for line in lines:
            x3, y3, x4, y4 = line[0]
            if (x1 is x3 and y1 is y3 and x2 is x4 and y2 is y4):
                continue
            else:
                if (abs((x1-x3)) < 5 and abs((x2-x4)) < 5 and abs((y1-y3)) < 5 and abs((y2-y4)) < 5):
                    lines_cpy.append(line)


#grouping lines:
bundler = HoughBundler(min_distance=18,min_angle=40) #This bundles the lines incorrectly when input image is more complex
lines_cpy = bundler.process_lines(lines_cpy)
lines_cpy2 = []
for line in lines_cpy:
    x1, y1, x2, y2 = line[0]
    if (abs((x1-x2)) < 10 and abs((y1-y2)) < 10):
        pass
    else:
        lines_cpy2.append(line)
        count_lines+=1 
print(count_lines)
print(lines_cpy2)

#draw the lines that are left:
for line in lines_cpy2:
    x1, y1, x2, y2 = line[0]
    cv2.line(img, (x1, y1), (x2, y2), (255,0,0), 2)
print("\nDots number: {}".format(len(xcnts)))
cv2.imshow("linesDetected", img)

#wait for user to quit:
while(1):    
    key = cv2.waitKey(1)
    if key == 27: # exit on ESC
        break
cv2.destroyAllWindows()

The part where I am detecting lines seems to almost work perfectly. However the grouping of the lines makes some lines that aren't supposed to be there. Is there a better algorithm for grouping lines in this scenario or are my arguments just off?


Solution

  • Instead of using Hough Lines, FastLineDetector or LineSegmentDetector you can directly use the contour that you already have and filter for the points and edges. Just for the case that you are not satisfied by the results of the present algorithms and you want more control over what actually is happening. This method can also be helpful in other use cases. Hope it helps.

    import cv2
    import numpy as np
    import numpy.linalg as la
    
    MAX_KERNEL_LENGTH = 11
    MIN_SIZE = 150
    ZOOM_FACTOR = 0.80
    
    
    def get_angle(p1, p2, p3):
        v1 = np.subtract(p2, p1)
        v2 = np.subtract(p2, p3)
        cos = np.inner(v1, v2) / la.norm(v1) / la.norm(v2)
        rad = np.arccos(np.clip(cos, -1.0, 1.0))
        return np.rad2deg(rad)
    
    
    def get_angles(p, d):
        n = len(p)
        return [(p[i], get_angle(p[(i-d) % n], p[i], p[(i+d) % n])) for i in range(n)]
    
    
    def remove_straight(p, d):
        angles = get_angles(p, 2)                           # approximate angles at points
        return [p for (p, a) in angles if a < (180 - d)]    # remove points with almost straight angles
    
    
    img = cv2.imread('img.png')
    
    #finding and zooming to paper:
    dst = cv2.medianBlur(img, MAX_KERNEL_LENGTH)
    gray = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray,0,400,apertureSize = 3)
    cnts = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if len(cnts) == 2 else cnts[1]
    for c in cnts:
        zoomx, zoomy, zoomw, zoomh = cv2.boundingRect(c)
        if (zoomw >= MIN_SIZE and zoomh >= MIN_SIZE):
            zoomx = int(zoomx/ZOOM_FACTOR)
            zoomy = int(zoomy/ZOOM_FACTOR)
            zoomw = int(zoomw*ZOOM_FACTOR)
            zoomh = int(zoomh*ZOOM_FACTOR)
            img = img[zoomy:zoomy+zoomh, zoomx:zoomx+zoomw]
            break
    
    # detecting dots:
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 0)
    threshed = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 5, 2)
    cnts = cv2.findContours(threshed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2]
    xcnts = []
    lcnts = []
    
    for cnt in cnts:
        x,y,w,h = cv2.boundingRect(cnt)
        if 0 < cv2.contourArea(cnt) < 15:
            xcnts.append(cnt)
            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
        else:
            lcnts.append(cnt)
    
    for cnt in lcnts:
        pts = [v[0] for v in cnt]         # get pts from contour
        pts = remove_straight(pts, 60)    # consider bends of less than 60deg deviation as straight
    
        n = len(pts)
        for i in range(n):
            p1 = pts[i]
            p2 = pts[(i + 1) % n]
            if (p1[0] - p2[0] > 4) or (p1[1] - p2[1] > 4):  # filter for long lines, only one direction
                cv2.circle(img, p1, 3, (0, 255, 255), 1)
                cv2.circle(img, p2, 3, (0, 255, 255), 1)
                cv2.line(img, p1, p2, (0, 255, 255), 1)
    
    cv2.imwrite("out.png", img)
    

    enter image description here