Background: I currently have many grayscale images of steel. Some of them have high brightness, while others have uneven brightness. As shown in the figure below, how can I better extract their outlines?
My method: First, I identify the approximate left and right boundaries to reduce the interference from background noise. Then, I perform the cropping. The code for processing the cropped part is as follows:
m=img.shape[0]//2
count=mayregion
if clip_top:
last_row = img_three[-1, :]
mid_row= img_three[m, :]
mean_pixel = (np.mean(last_row)+np.mean(mid_row))//2
count=(np.sum(last_row > 240)+np.sum(mid_row>240))
else:
last_row = img_three[0, :]
mid_row = img_three[m, :]
mean_pixel = (np.mean(last_row) + np.mean(mid_row)) // 2
count = (np.sum(last_row > 240) + np.sum(mid_row > 240))
if mean_pixel>240:
k=100
elif mean_pixel>210:
k=80
else:
k=30
img_three_color = cv2.cvtColor(img_three, cv2.COLOR_GRAY2BGR)
img_three = cv2.GaussianBlur(img, (7, 7), 1)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
img_open = cv2.morphologyEx(img_three, cv2.MORPH_OPEN, kernel)
img_close = cv2.morphologyEx(img_open, cv2.MORPH_CLOSE, kernel)
_, binary = cv2.threshold(img_close, k, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
filtered_contours = [cnt for cnt in contours if cv2.contourArea(cnt) >= 100]
cv2.drawContours(img_three_color, filtered_contours, -1, (0, 255, 0), 10)
However, the contour obtained by this method still has some drawbacks.
With the inhomogeneous appearance of your object as well as the noise in the background I would go for an approach with global thresholding paired with adaptive thresholding for the borders. Prior to processing, it is a good idea to harmonize the images to a certain minimum average value to compensate global brightness differences.
As your images typically seem to be longer than the field of view and pass one edge, the adaptive thresholding does not work for those border pixels on the edge. Manually closing that border avoids leakages into the foreground.
See below code and result to get a starting point. I did not optimize scaling factor s
, threshold t
, target mean m
, and structuring element size k
yet.
import cv2
import numpy as np
s = 8
t = 127
m = 160
k = 120 / s
def close_edges(img_binary):
# Close mask along image edges if partially present
h, w = img_binary.shape
# Top edge
if np.any(img_binary[0, :]):
first = np.argmax(img_binary[0, :] > 0)
last = w - np.argmax(img_binary[0, ::-1] > 0) - 1
img_binary[0, first:last + 1] = 255
# Bottom edge
if np.any(img_binary[-1, :]):
first = np.argmax(img_binary[-1, :] > 0)
last = w - np.argmax(img_binary[-1, ::-1] > 0) - 1
img_binary[-1, first:last + 1] = 255
# Left edge
if np.any(img_binary[:, 0]):
first = np.argmax(img_binary[:, 0] > 0)
last = h - np.argmax(img_binary[::-1, 0] > 0) - 1
img_binary[first:last + 1, 0] = 255
# Right edge
if np.any(img_binary[:, -1]):
first = np.argmax(img_binary[:, -1] > 0)
last = h - np.argmax(img_binary[::-1, -1] > 0) - 1
img_binary[first:last + 1, -1] = 255
return img_binary
img = cv2.imread('input.jpg', cv2.IMREAD_GRAYSCALE)
# Downsample the image to speed up processing
img = cv2.resize(img, (img.shape[1] // s, img.shape[0] // s), interpolation=cv2.INTER_LINEAR)
# Adjust brightness to a minimum target mean value
mean_value = np.mean(img)
if mean_value < m:
img = img.astype(np.float32) + (m - mean_value)
img = np.clip(img, 0, 255).astype(np.uint8)
# Use adaptive thresholding to create a binary image
img_binary = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 21, 0)
img_binary = np.where(img > t, 255, img_binary).astype(np.uint8)
# Connected components analysis to keep the largest component only
num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(img_binary, connectivity=8)
# Keep only the largest component
largest_label = 1 + np.argmax(stats[1:, cv2.CC_STAT_AREA]) # Skip the background label (0)
img_binary = np.where(labels == largest_label, 255, 0).astype(np.uint8)
img_binary = close_edges(img_binary)
# Morphologically close the mask to fill small holes
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
img_binary = cv2.morphologyEx(img_binary, cv2.MORPH_CLOSE, kernel)
# Find contours in the binary image
contours, _ = cv2.findContours(img_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_three_color = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawContours(img_three_color, contours, -1, (0, 255, 0), 2)
cv2.imshow('Contour', img_three_color)
cv2.waitKey(0)