pythonopencvdetectedge-detection

Detect drops of water using OpenCV


I am trying to detect drops inside the water, where at first I will detect the edges, but there are light spots in the image, which are also detected as drops.

Original Image

Drops that I want to detect

Noting that the drops are white surrounded by a dark layer.

My code :

import cv2
import numpy as np

def unsharp_mask(img, blur_size = (5,5), imgWeight = 1.5, gaussianWeight = -0.5):
    gaussian = cv2.GaussianBlur(img, (5,5), 0)
    return cv2.addWeighted(img, imgWeight, gaussian, gaussianWeight, 0)

def clahe(img, clip_limit = 2.0):
    clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(5,5))
    return clahe.apply(img)


def get_sobel(img, size = -1):
    sobelx64f = cv2.Sobel(img,cv2.CV_64F,2,0,size)
    abs_sobel64f = np.absolute(sobelx64f)
    return np.uint8(abs_sobel64f)

img = cv2.imread("img_brightened.jpg")
# save color copy for visualizing
imgc = img.copy()
# resize image to make the analytics easier (a form of filtering)
resize_times = 1.5
img = cv2.resize(img, None, img, fx = 1 / resize_times, fy = 1 / resize_times)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("Input", img)

# use sobel operator to evaluate high frequencies
sobel = get_sobel(img)
# experimentally calculated function - needs refining
clip_limit = (-2.556) * np.sum(sobel)/(img.shape[0] * img.shape[1]) + 26.557
# don't apply clahe if there is enough high freq to find blobs
if(clip_limit < 1.0):
    clip_limit = 0.1
# limit clahe if there's not enough details - needs more tests
if(clip_limit > 8.0):
    clip_limit = 8

# apply clahe and unsharp mask to improve high frequencies as much as possible
img = clahe(img, clip_limit)
img = unsharp_mask(img)

# filter the image to ensure edge continuity and perform Canny
img_blurred = (cv2.GaussianBlur(img, (2*2+1,2*2+1), 0))
canny = cv2.Canny(img_blurred, 100, 255)
cv2.imshow("Output", canny)
cv2.waitKey(0)

Result

enter image description here


Solution

  • I used codes from https://github.com/kavyamusty/Shading-removal-of-images/blob/master/Article%20submission.ipynb, which works for removing shadows first, then the cv2.HoughCircles to find the circles.

    The codes as below :

    import cv2
    import numpy as np
    import matplotlib.pyplot as plt
    def max_filtering(N, I_temp):
        wall = np.full((I_temp.shape[0]+(N//2)*2, I_temp.shape[1]+(N//2)*2), -1)
        wall[(N//2):wall.shape[0]-(N//2), (N//2):wall.shape[1]-(N//2)] = I_temp.copy()
        temp = np.full((I_temp.shape[0]+(N//2)*2, I_temp.shape[1]+(N//2)*2), -1)
        for y in range(0,wall.shape[0]):
            for x in range(0,wall.shape[1]):
                if wall[y,x]!=-1:
                    window = wall[y-(N//2):y+(N//2)+1,x-(N//2):x+(N//2)+1]
                    num = np.amax(window)
                    temp[y,x] = num
        A = temp[(N//2):wall.shape[0]-(N//2), (N//2):wall.shape[1]-(N//2)].copy()
        return A
    
    def min_filtering(N, A):
        wall_min = np.full((A.shape[0]+(N//2)*2, A.shape[1]+(N//2)*2), 300)
        wall_min[(N//2):wall_min.shape[0]-(N//2), (N//2):wall_min.shape[1]-(N//2)] = A.copy()
        temp_min = np.full((A.shape[0]+(N//2)*2, A.shape[1]+(N//2)*2), 300)
        for y in range(0,wall_min.shape[0]):
            for x in range(0,wall_min.shape[1]):
                if wall_min[y,x]!=300:
                    window_min = wall_min[y-(N//2):y+(N//2)+1,x-(N//2):x+(N//2)+1]
                    num_min = np.amin(window_min)
                    temp_min[y,x] = num_min
        B = temp_min[(N//2):wall_min.shape[0]-(N//2), (N//2):wall_min.shape[1]-(N//2)].copy()
        return B
    
    def background_subtraction(I, B):
        O = I - B
        norm_img = cv2.normalize(O, None, 0,255, norm_type=cv2.NORM_MINMAX)
        return norm_img
    
    def min_max_filtering(M, N, I):
        if M == 0:
            #max_filtering
            A = max_filtering(N, I)
            #min_filtering
            B = min_filtering(N, A)
            #subtraction
            normalised_img = background_subtraction(I, B)
        elif M == 1:
            #min_filtering
            A = min_filtering(N, I)
            #max_filtering
            B = max_filtering(N, A)
            #subtraction
            normalised_img = background_subtraction(I, B)
        return normalised_img
    
    # Read Image 
    img = cv2.imread(r"D:/Image.jpg")
    # Copy origin image 
    cimg = img.copy()
    
    # Initialization array of uint8 
    img_remove_shadow = np.zeros(np.shape(img), dtype="uint8")
    
    for i in range(np.shape(img)[2]):
        img_remove_shadow[:, :, i] = np.array(min_max_filtering(M = 0, N = 20, I = img[:, :, i]))
    
    # Using median blur 
    img = cv2.medianBlur(img_remove_shadow,5)
    
    # Change to gray image
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    
    cv2.imshow("Removing Shadow", img)
    
    
    
    # Find circles 
    circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 5, np.array([]), 40, 23, 5,20)
    
    circles = np.uint16(np.around(circles))
    
    for i in circles[0,:]:
        # draw the outer circle
        cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
        # draw the center of the circle
        cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
        
    cv2.imshow('detected circles',cimg)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    

    The result is as below:

    Image after removing shadow The detection

    PS: The codes take 11.74s running time, I would appreciate it if someone could optimize the code.