I'm trying to integrate a IDS uEye camera with OpenCV and it kinda works for now.
Problem I'm facing is that when I use the IDS SDK to view the camera image, I get a full image. But using OpenCV's VideoCapture, I only get the top left quarter of the image.
I just put an image of a rectangle split into quarters to clarify what the full image should be (entire rectangle) and what I'm getting from videocapture (top left quarter only)
(source: kheper.net)
I've already tried to adjust the image width and height via cap.set
and since the VideoCapture
line is after setting the uEye camera's parameters, I'm rather certain it's not a settings issue with the camera and more to do with VideoCapture
itself
char strCamFileName[256];
char* pcImageMemory;
int memId;
int nRet = 0;
SENSORINFO sInfo;
IplImage* img;
HIDS hCam = 0; // index 0 means taking first camera available
RECT rc;
MSG msg;
Mat frame(MaxImageSizeY, MaxImageSizeX, CV_8UC1);
nRet = is_InitCamera(&hCam, hWndDisplay);
if (nRet != IS_SUCCESS)
{
cout << endl << "Error Connecting to Camera" << endl;
cout << "Closing program..." << endl;
return 0;
}
else
{
cout << endl << "Camera initialisation was successful!" << endl << endl;
}
// you can query information about the sensor type of the camera
nRet = is_GetSensorInfo(hCam, &sInfo);
if (nRet == IS_SUCCESS)
{
cout << "Cameramodel: \t\t" << sInfo.strSensorName << endl;
cout << "Maximum image width: \t" << sInfo.nMaxWidth << endl;
cout << "Maximum image height: \t" << sInfo.nMaxHeight << endl << endl << endl;
}
MaxImageSizeX = sInfo.nMaxWidth;
MaxImageSizeY = sInfo.nMaxHeight;
DisplayWidth = MaxImageSizeX;
DisplayHeight = MaxImageSizeY;
int nColorMode = IS_COLORMODE_CBYCRY;
int nBitsPerPixel = 32;
// Get number of available formats and size of list
UINT count;
UINT bytesNeeded = sizeof(IMAGE_FORMAT_LIST);
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_GET_NUM_ENTRIES, &count, sizeof(count));
bytesNeeded += (count - 1) * sizeof(IMAGE_FORMAT_INFO);
void* ptr = malloc(bytesNeeded);
// Create and fill list
IMAGE_FORMAT_LIST* pformatList = (IMAGE_FORMAT_LIST*)ptr;
pformatList->nSizeOfListEntry = sizeof(IMAGE_FORMAT_INFO);
pformatList->nNumListElements = count;
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_GET_LIST, pformatList, bytesNeeded);
// Prepare for creating image buffers
char* pMem = NULL;
int memID = 0;
// Set each format and then capture an image
IMAGE_FORMAT_INFO formatInfo;
// Allocate image mem for current format, set format
nRet = is_AllocImageMem(hCam, MaxImageSizeX, MaxImageSizeY, nBitsPerPixel, &pMem, &memID);
nRet = is_SetImageMem(hCam, pMem, memID);
nRet = is_ImageFormat(hCam, IMGFRMT_CMD_SET_FORMAT, &formatInfo.nFormatID, sizeof(formatInfo.nFormatID));
// Sets the color mode to be used when image data are saved or displayed by the graphics card
is_SetColorMode(hCam, nColorMode);
// allocates an image memory for an image, activates it and sets the way in which the images will be displayed on the screen
int nMemoryId;
is_AllocImageMem(hCam, MaxImageSizeX, MaxImageSizeY, nBitsPerPixel, &pcImageMemory, &nMemoryId);
is_SetImageMem(hCam, pcImageMemory, nMemoryId);
is_SetDisplayMode(hCam, IS_SET_DM_DIB);
is_HotPixel(hCam, IS_HOTPIXEL_DISABLE_CORRECTION, NULL, NULL);
IS_RECT AAOI; // IS_RECT type variable for Auto AOI parameters
AAOI.s32X = MaxImageSizeX / 3 | IS_AOI_IMAGE_POS_ABSOLUTE;
AAOI.s32Width = MaxImageSizeX / 3;
AAOI.s32Y = MaxImageSizeY / 3 | IS_AOI_IMAGE_POS_ABSOLUTE;
AAOI.s32Height = MaxImageSizeY / 3;
double enable = 1;
double disable = 0;
is_SetAutoParameter(hCam, IS_SET_AUTO_SPEED, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_FRAMERATE, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_GAIN, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &disable, 0);
is_AOI(hCam, IS_AOI_AUTO_BRIGHTNESS_SET_AOI, &AAOI, sizeof(AAOI));
is_AOI(hCam, IS_AOI_AUTO_WHITEBALANCE_SET_AOI, &AAOI, sizeof(AAOI));
VideoCapture cap; //--- INITIALIZE VIDEOCAPTURE
int deviceID = 0; // 0 = open default camera
int apiID = cv::CAP_ANY; // 0 = autodetect default API
if (cap.open(deviceID, apiID))
{
cout << "cap opened" << endl;
}
else
{
cout << "cap not opened" << endl;
}
cout << "Press 1 to capture image" << endl
<< "Press 2 to use (last) captured image" << endl;
cap.read(frame);
From what I know VideoCapture
should be able to obtain the entire image from the camera right?
I'm honestly just really confused why VideoCapture
cuts of 3/4 of the image and I would appreciate any help
Alright I found out the problem... Again I left out too much code in the original post (because there's ALOT of irrelevant code related to USB stuff) so I'll include the most important part I left out here
double enable = 1;
double disable = 0;
is_SetAutoParameter(hCam, IS_SET_AUTO_SPEED, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_GAIN, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_FRAMERATE, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SHUTTER, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_GAIN, &disable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_WHITEBALANCE, &enable, 0);
is_SetAutoParameter(hCam, IS_SET_ENABLE_AUTO_SENSOR_SHUTTER, &disable, 0);
is_AOI(hCam, IS_AOI_AUTO_BRIGHTNESS_SET_AOI, &AAOI, sizeof(AAOI));
is_AOI(hCam, IS_AOI_AUTO_WHITEBALANCE_SET_AOI, &AAOI, sizeof(AAOI));
//// Acquires a single image from the camera
//is_FreezeVideo(hCam, IS_WAIT);
//// Output an image from an image memory in the specified window
//int nRenderMode = IS_RENDER_FIT_TO_WINDOW;
//is_RenderBitmap(hCam, nMemoryId, hWndDisplay, nRenderMode);
is_ExitCamera(hCam); // exit camera so that OpenCV can access as camera parameters have been set
CalibSet CS; // declaring variable 'CS' under the class 'CalibSet'
Mat livemap1, livemap2;
FileStorage tfs(inputCalibFile, FileStorage::READ); // Read the settings
if (!tfs.isOpened())
{
cout << "Could not open the calibration file: \"" << inputCalibFile << "\"" << endl;
return -1;
}
tfs["camera_matrix"] >> CS.CamMat;
tfs["distortion_coefficients"] >> CS.DistCoeff;
tfs["image_width"] >> CS.image.width;
tfs["image_height"] >> CS.image.height;
tfs.release(); // close Settings file
So. Basically what the class CalibSet
does is it holds values for a .xml
file that is used to extract values after undistortion calibration.
More about that here Camera calibration data retrieval
But the issue that prevented cap.set
from working was likely these last few lines.
tfs["image_width"] >> CS.image.width;
and tfs["image_height"] >> CS.image.height;
which took the values in "image_width" and "image_height" and stored them in the respective variables in the class CalibSet
.
And guess what... The width and height in the .xml
file was 640x480...
I modified that portion in the .xml
to the supposed 1280x1024 and the live feed from the camera was fixed and I finally got the full image instead of the 1/4 that I got before.