#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat imgOriginal, imgDilate, imgCanny, imgGray, imgBlur, imgWrap, imgCrop,imgScan;
vector<Point> initialPoints, docPoints;
float w = 590, h = 360;
Mat preProcessing(Mat img)
{
cvtColor(img, imgGray, COLOR_BGR2GRAY);
GaussianBlur(imgGray, imgBlur, Size(3, 3), 3, 0);
Canny(imgBlur, imgCanny, 25, 75);
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
dilate(imgCanny, imgDilate, kernel);
return imgDilate;
}
vector<Point> getContours(Mat image)
{
vector< vector<Point >> contours;
vector<Vec4i> hierarchy;
findContours(image, contours, hierarchy ,RETR_EXTERNAL,CHAIN_APPROX_SIMPLE);
vector<vector<Point >> conPoly(contours.size());
vector<Rect> boundRect(contours.size());
vector<Point> biggest;
int maxArea = 0;
for (int i = 0; i < contours.size(); i++)
{
int area = contourArea(contours[i]);
cout << area << endl;
if (area > 1000)
{
float peri = arcLength(contours[i], true);
approxPolyDP(contours[i], conPoly[i], 0.02 * peri, true);
if (area > maxArea && conPoly[i].size() == 4)
{
//drawContours(imgOriginal, conPoly, i, Scalar(255, 0, 255), 5);
biggest = { conPoly[i][0], conPoly[i][1] , conPoly[i][2] , conPoly[i][3] };
maxArea = area;
}
}
}
return biggest;
}
void drawPoints(vector<Point> points, Scalar color)
{
for (int i = 0; i < points.size(); i++)
{
circle(imgOriginal, points[i], 10, color, FILLED);
putText(imgOriginal, to_string(i), points[i], FONT_HERSHEY_PLAIN, 2, color, 2);
}
}
vector<Point> reorder(vector<Point> points)
{
vector<Point> newPoints;
vector<int> sumPoints, subPoints;
for (int i = 0; i < 4; i++)
{
sumPoints.push_back(points[i].x + points[i].y);
subPoints.push_back(points[i].x - points[i].y);
}
newPoints.push_back(points[min_element(sumPoints.begin(), sumPoints.end()) - sumPoints.begin()]); // 0
newPoints.push_back(points[max_element(subPoints.begin(), subPoints.end()) - subPoints.begin()]); // 1
newPoints.push_back(points[min_element(subPoints.begin(), subPoints.end()) - subPoints.begin()]); // 2
newPoints.push_back(points[max_element(sumPoints.begin(), sumPoints.end()) - sumPoints.begin()]); // 3
return newPoints;
}
Mat getWarp(Mat img, vector<Point> points, float w, float h)
{
Point2f src[4] = { points[0], points[1], points[2], points[3] };
Point2f des[4] = { {0.0f,0.0f },{w,0.0f },{0.0f,h },{w,h} };
Mat matrix = getPerspectiveTransform(src, des);
warpPerspective(img, imgWrap, matrix, Point(w, h));
return imgWrap;
}
At This point error occurs In adaptiveThreshold Error : Unhandled exception at 0x00007FFFB9423B29 in Opencv.exe: Microsoft C++ exception: cv::Exception at memory location
Mat Scanner(Mat img)
{
adaptiveThreshold(img, imgScan, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 3, 11); //Error
return imgScan;
}
Error in only this ^^^^^ part of code
rest of the code working only error occurs in adaptiveThreshold
int main()
{
string path = "resource/document5.jpg";
imgOriginal = imread(path);
resize(imgOriginal, imgOriginal,Size(1080,720));
//image processing
imgDilate = preProcessing(imgOriginal);
//Get contours
initialPoints = getContours(imgDilate);
//draw points
docPoints = reorder(initialPoints);
//drawPoints(docPoints, Scalar(0, 255, 0));
//drawPoints(initialPoints, Scalar(0, 0, 255));
imgWrap = getWarp(imgOriginal, docPoints, w, h);
//Crop
Rect roi(5, 5, w - (2 * 5), h - (2 * 5));
imgCrop = imgWrap(roi);
//scan
Scanner(imgCrop);
imshow("imgOrignal contours", imgOriginal);
imshow("imgDilate", imgDilate);
imshow("imgWrap", imgWrap);
imshow("imgCrop", imgCrop);
imshow("imgScan", imgScan);
waitKey(0);
return 0;
}
In scanner part i use adaptiveThreshold but it shows error Unhandled exception at 0x00007FFFB9423B29 in Opencv.exe: Microsoft C++ exception: cv::Exception at memory location
You have to convert input image to gray to use adaptive threshold Use cvtcolor to do it