pythonopencvmachine-learningcomputer-visionopenvino

troubleshoot openVINO model output for cv2 rectangle


I am experimenting with this openVINO Zoo model for person detection. What I am stuck on is if I am handling the model architecture correctly. This is the info for the model input and model output. (all 3 links are to the same page but different locations)

import cv2
import numpy as np
import matplotlib.pyplot as plt
from openvino.runtime import Core

MODEL = "person-detection-asl-0001"
PRECISION = "FP16"
MODEL_PATH = "./person-detection-asl-0001/"
CHAR = "/"
FILE_TYPE = ".xml"
FULL_MODEL_STR = MODEL_PATH + CHAR + PRECISION + CHAR + MODEL + FILE_TYPE

print("testing model")
print(FULL_MODEL_STR)

ie_core = Core()


def model_init(model_path):
    model = ie_core.read_model(model=model_path)
    compiled_model = ie_core.compile_model(model=model, device_name="CPU")
    input_keys = compiled_model.input(0)
    output_keys = compiled_model.output(0)
    return input_keys, output_keys, compiled_model


input_key, output_keys, compiled_model = model_init(FULL_MODEL_STR)
print("COMPILED MODEL: ", compiled_model)


# Get input size - Recognition.
height, width = list(input_key.shape)[2:]
print("MODEL DIMENSIONS: ", (height, width))

image = cv2.imread("./ben_sarah.JPG")
# cv2.imshow("image",image)


image_mod = cv2.resize(image, (width, height))
image_mod = image_mod.transpose((2, 0, 1))
image_mod = image_mod.reshape(1, 3, height, width)


# Run inference.
boxes = compiled_model([image_mod])[compiled_model.output('boxes')]
print(f"{MODEL} BOXES.SHAPE: {boxes.shape}")

def postprocess(result, image):

    aligns = image.shape

    detections = result.reshape(-1, 5)
    for i, detection in enumerate(detections):
        xmin, ymin, xmax, ymax, confidence = detection
        if confidence > 0.2:
            xmin = int(max((xmin * image.shape[1]), 10))
            ymin = int(max((ymin * image.shape[0]), 10))
            xmax = int(min((xmax * image.shape[1]), image.shape[1] - 10))
            ymax = int(min((ymax * image.shape[0]), image.shape[0] - 10))

            conf = round(confidence, 2)
            print(f"conf: {conf:.2f}")
            print((xmin, ymin),(xmax, ymax))

            # For bounding box
            cv2.rectangle(image, (xmin, ymin),
                          (xmax, ymax), (255, 255, 255), 5)

            # For the text background
            # Finds space required
            (w, h), _ = cv2.getTextSize(
                f"{conf:.2f}", cv2.FONT_HERSHEY_SIMPLEX, 1.7, 1)

            # Prints the text.
            cv2.rectangle(image, (xmin, ymin + h + 5),
                          (xmin + w + 5, ymin), (255, 255, 255), -1)
            cv2.putText(image, f"{conf:.2f}", (xmin, ymin + h),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.7, (0, 0, 0), 3)

    return image

final = postprocess(boxes, image)
cv2.imwrite(f"./outputs/{PRECISION}-{MODEL}.png", final)
cv2.imshow("final", final)

The code runs...but the model output for creating a box around a detected human, the coordinates are not correct.

For example in the postprocess function I think I am doing something wrong as the:

print(f"conf: {conf:.2f}")
print((xmin, ymin),(xmax, ymax))

returns:

conf: 0.50
(29012, 127753) (1270, 950)

Where the numbers (29012, 127753) to represent (xmin, ymin) aren't correct, I think this is outside the coordinates of the entire image. The original image.shape is (960, 1280, 3).


Solution

  • I guess you want a margin of at least ten pixels from the image edges for drawing the bounding box, so you need:

                xmin = int(max(xmin, 10))
                ymin = int(max(ymin, 10))
                xmax = int(min(xmax, image.shape[1] - 10))
                ymax = int(min(ymax, image.shape[0] - 10))
    
    

    And if xmin, ymin, xmax, ymax are already integers you don't need the int() wrapper.