I am trying to monitor the positional change of a rotating and moving sample head by comparing still camera images before and after a movement command is given. My plan is to do this using Open CV and bounding boxes.
I wrote some code to do this but am struggling to select the specific bounding box I want. There is positional and potential lighting change so I'm not sure I can just mess with the threshold setting of the image to only show the sample. As the sample size will not change, I was planning on filtering through my array of bounding boxes by area to display and compare only the one that covers the sample.
I tried solving for an array of contour areas using the cv2.contourArea() function but kept getting an error "OpenCV(4.7.0) :-1: error: (-5:Bad argument) in function 'contourArea'", > - contour is not a numerical tuple. It seems like I am not going about this correctly.
Any help would be much appreciated. Thanks. Code pasted below.
import numpy as np
import matplotlib.pyplot as plt
import cv2
# read the image
img = cv2.imread("Test_IMG_Rev1\IMG_3_ROT100.jpg")
# Convert to Grayscale and RGB
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# Apply Thresholding on the grayscale image
ret,thresh = cv2.threshold(img_gray,127,255,0)
# Plot Images
fig = plt.figure(figsize=(20,20))
ax = plt.subplot(1, 2, 1)
plt.imshow(img_rgb)
ax.set_title("Original Image")
ax = plt.subplot(1, 2, 2)
plt.imshow(thresh)
ax.set_title("Threshold Image")
#
plt.show()
# Get contours (Rotated)
result1 = img.copy()
contours1 = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours1 = contours1[0] if len(contours1) == 2 else contours1[1]
for cntr1 in contours1:
rect = cv2.minAreaRect(cntr1)
box = cv2.boxPoints(rect)
box = np.int0(box)
G = cv2.drawContours(result1,[box],0,(0,0,255),2)
#
plt.figure(figsize=(10,10))
img_result1 = cv2.cvtColor(result1, cv2.COLOR_BGR2RGB)
plt.imshow(img_result1)
I got it to work, thanks for the help. Full code pasted Below.
import numpy as np
import matplotlib.pyplot as plt
import cv2
import math as eq
from mpl_toolkits.axes_grid1 import ImageGrid
# read the image(s)
img = cv2.imread("Test_IMG_Rev1\IMG_1.jpg")
img_C = cv2.imread("Test_IMG_Rev1\IMG_3_ROT100.jpg")
#
res = img.shape
res_x = res[1] # pixel per square inch
res_y = res[0] # pixel per square inch
#
conv = (1/25400) # convert PPI to um
# Convert to Grayscale and RGB
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#
img_gray_C = cv2.cvtColor(img_C, cv2.COLOR_BGR2GRAY)
img_rgb_C = cv2.cvtColor(img_C, cv2.COLOR_BGR2RGB)
# Apply Thresholding on the grayscale image
ret,thresh = cv2.threshold(img_gray,127,255,0)
ret_C,thresh_C = cv2.threshold(img_gray_C,127,255,0)
# Get contours (Rotated), Image 1
box_Ax = []
#
result_A = img.copy()
contours_A = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
contours_A = contours_A[0] if len(contours_A) == 2 else contours_A[1]
for cntr_A in contours_A:
if cv2.contourArea(cntr_A) > 26000 and cv2.contourArea(cntr_A) < 30000:
rect_A = cv2.minAreaRect(cntr_A)
box_A = cv2.boxPoints(rect_A)
box_A = np.int0(box_A)
box_Ax.append(box_A)
G = cv2.drawContours(result_A,[box_A],0,(0,0,255),5)
#
plt.figure(figsize=(10,10))
img_result_A = cv2.cvtColor(result_A, cv2.COLOR_BGR2RGB)
plt.imshow(img_result_A)
#
# Get contours (Rotated), Image 2 (Comparison Image)
box_Bx = []
#
result_B = img_C.copy()
contours_B = cv2.findContours(thresh_C, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
contours_B = contours_B[0] if len(contours_B) == 2 else contours_B[1]
for cntr_B in contours_B:
if cv2.contourArea(cntr_B) > 26000 and cv2.contourArea(cntr_B) < 30000:
rect_B = cv2.minAreaRect(cntr_B)
box_B = cv2.boxPoints(rect_B)
box_B = np.int0(box_B)
box_Bx.append(box_B)
G = cv2.drawContours(result_B,[box_B],0,(0,0,255),5)
#
plt.figure(figsize=(10,10))
img_result_B = cv2.cvtColor(result_B, cv2.COLOR_BGR2RGB)
plt.imshow(img_result_B)
Here is the image comparison code as well for refrence.
# Calculate Rotation and Position Offset
Img_A_index = np.ravel(box_Ax)
Img_B_index = np.ravel(box_Bx)
#
CoordX_A = ((Img_A_index[0])+(Img_A_index[4]))/2
CoordY_A = ((Img_A_index[1])+(Img_A_index[5]))/2
#
CoordX_B = ((Img_B_index[0])+(Img_B_index[4]))/2
CoordY_B = ((Img_B_index[1])+(Img_B_index[5]))/2
#
X_offset = (CoordX_A-CoordX_B)
Y_offset = (CoordY_A-CoordY_B)
#
Angle_A = (eq.atan(abs((Img_A_index[6])-(Img_A_index[0]))/abs((Img_A_index[7])-
(Img_A_index[1]))))
Angle_B = (eq.atan(abs((Img_B_index[6])-(Img_B_index[0]))/abs((Img_B_index[7])-
(Img_B_index[1]))))
Angle_offset = (Angle_A-Angle_B)