I'm developing an app which uses OMR (Optical mark recognize) to read bubblesheets, I'm using the OpenCV API but I'm having some troubles with HoughLinesP, when I use it that returns an empty image to me. I used the function of Android, Log.e, to see the total, cols and rows of the image before and after the HoughLinesP.
My code is a conversion of a C++ code to Java, both uses OpenCV.
Before I get the following result:
After I get the following result:
My code:
private void scanImage(){
Mat img = Imgcodecs.imread(mediaStorageDir().getPath() + "/" + "test2.jpg", Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
Log.e("[CANAIS]", String.valueOf(img.channels()));
Size sz = new Size(3,3);
Imgproc.GaussianBlur(img, img, sz, 0);
Imgproc.adaptiveThreshold(img, img, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 75, 10);
Core.bitwise_not(img, img);
Mat img2 = new Mat();
Imgproc.cvtColor(img, img2, Imgproc.COLOR_GRAY2RGB);
Mat img3 = new Mat();
Imgproc.cvtColor(img, img3, Imgproc.COLOR_GRAY2RGB);
MatOfInt4 lines = new MatOfInt4();
Log.e("[ORIGINAL IMAGE]", "" + img.total() + "||" + img.rows() + "||" + img.cols());
Imgproc.HoughLinesP(img, lines, 1, Math.PI/180,80,400,10);
Log.e("[LINES IMAGE]", "" + lines.total() + "||" + lines.rows() + "||" + lines.cols());
for(int i = 0; i < lines.total(); i++){
Mat l = new Mat();
l.put(i, 0, lines.get(i, 0));
Point pr = new Point();
Point ps = new Point();
pr.x = Double.valueOf(l.get(0, 0).toString());
pr.y = Double.valueOf(l.get(1, 0).toString());
ps.x = Double.valueOf(l.get(2, 0).toString());
ps.y = Double.valueOf(l.get(3,0).toString());
Scalar scalar = new Scalar(0,0,255);
Imgproc.line(img2, pr, ps, scalar, 3, Imgproc.LINE_AA, 0);
}
showImage(img2);
MatOfInt4 mt4 = new MatOfInt4(lines);
LinkedList<Point> corners = new LinkedList<>();
for(int i = 0; i < lines.total(); i++){
for(int x = i + 1; x <lines.total(); x++){
MatOfInt4 gen = new MatOfInt4();
MatOfInt4 gen1 = new MatOfInt4();
gen1.put(x, 0, mt4.get(x, 0));
gen.put(i, 0, mt4.get(i, 0));
Point pt = computeIntersect(gen, gen1);
if(pt.x >= 0 && pt.y >= 0 && pt.x < img.cols() && pt.y < img.rows()){
corners.addLast(pt);
}
}
}
Point center = new Point(0,0);
MatOfPoint mtp = new MatOfPoint(center);
for(int i = 0; i < corners.size(); i++){
center.x += corners.get(i).x;
center.y += corners.get(i).y;
}
center.x *= 1./ corners.size();
center.y *= 1./ corners.size();
sortCorners(corners, center);
Rect r = Imgproc.boundingRect(mtp);
Log.e("[RECT]", r.toString());
Mat quad = Mat.zeros(r.height, r.width, CvType.CV_8UC3);
LinkedList<Point> quad_pts = new LinkedList<>();
quad_pts.addLast(new Point(0,0));
quad_pts.addLast(new Point(quad.cols(),0));
quad_pts.addLast(new Point(quad.cols(),quad.rows()));
quad_pts.addLast(new Point(0,quad.rows()));
Mat transmtx = Imgproc.getPerspectiveTransform(Converters.vector_Point2f_to_Mat(corners), Converters.vector_Point2f_to_Mat(quad_pts));
Imgproc.warpPerspective(img3, quad, transmtx, quad.size());
showImage(quad);
Mat cimg = new Mat();
Imgproc.cvtColor(quad, cimg, Imgproc.COLOR_BGR2GRAY);
Mat circles = new Mat();
Imgproc.HoughCircles(cimg, circles, Imgproc.CV_HOUGH_GRADIENT, 1, img.rows()/8, 100, 75, 0,0);
for(int i=0; i < circles.total(); i++){
Point center1 = new Point(Math.round(Double.valueOf(circles.get(i, 0).toString())), Math.round(Double.valueOf(circles.get(i, 1).toString())));
Imgproc.circle(quad, center1, 3, new Scalar(0,255,0), -1,8,0);
}
double averR = 0;
LinkedList<Double> row = new LinkedList<>();
LinkedList<Double> col = new LinkedList<>();
for(int i=0; i < circles.total(); i++){
boolean found = false;
String rr = String.valueOf(Math.round(Double.valueOf(circles.get(i, 2).toString())));
int rrr = Integer.valueOf(rr);
averR += rrr;
String x = String.valueOf(Math.round(Double.valueOf(circles.get(i, 0).toString())));
String y = String.valueOf(Math.round(Double.valueOf(circles.get(i, 1).toString())));
int xx = Integer.valueOf(x);
int yy = Integer.valueOf(y);
for(int j=0; j < row.size(); j++){
double y2 = row.get(j);
if(yy - rrr < y2 && yy + rrr > y2){
found = true;
break;
}
}
if(!found){
row.addLast(Double.valueOf(yy));
}
found = false;
for(int j=0; j < col.size(); j++){
double x2 = col.get(j);
if(xx - rrr < x2 && xx + rrr > x2){
found = true;
break;
}
}
if(!found){
col.addLast(Double.valueOf(xx));
}
}
averR /= circles.total();
Collections.sort(row, new Comparator<Double>() {
@Override
public int compare(Double o1, Double o2) {
return Collator.getInstance().compare(o1, o2);
}
});
Collections.sort(col, new Comparator<Double>() {
@Override
public int compare(Double o1, Double o2) {
return Collator.getInstance().compare(o1, o2);
}
});
for(int i=0;i<row.size();i++){
double max = 0;
double y = row.get(i);
int ind = -1;
for(int j=0;j<col.size();j++){
double x = col.get(i);
Point c = new Point(x,y);
//Use an actual circle if it exists
for(int k=0;k<circles.total();k++){
double x2 = Double.valueOf(circles.get(k, 0).toString());
double y2 = Double.valueOf(circles.get(k, 1).toString());
if(abs(y2-y)<averR && abs(x2-x)<averR){
x = x2;
y = y2;
}
}
// circle outline
Imgproc.circle( quad, c, Integer.valueOf(String.valueOf(averR)), new Scalar(0,0,255), 3, 8, 0 );
Rect rect = new Rect(Integer.valueOf(String.valueOf(x-averR)),Integer.valueOf(String.valueOf(y-averR)),Integer.valueOf(String.valueOf(2*averR)),Integer.valueOf(String.valueOf(2*averR)));
Mat submat = cimg.adjustROI(rect.width, rect.width, rect.height, rect.height);
double p =(double)countNonZero(submat)/(submat.size().width*submat.size().height);
if(p>=0.3 && p>max){
max = p;
ind = j;
}
}
if(ind==-1)
Log.e("[N SEI]", "" + i+1);
else
Log.e("[NSEI]", "" + i+1 + "A" + ind);
}
}
private Point computeIntersect(MatOfInt4 a, MatOfInt4 b){
Point generc = new Point();
generc.x = -1;
generc.y = -1;
int x1 = Integer.valueOf(a.get(0,0).toString());
int y1 = Integer.valueOf(a.get(0,1).toString());
int x2 = Integer.valueOf(a.get(0,2).toString());
int y2 = Integer.valueOf(a.get(0,3).toString());
int x3 = Integer.valueOf(b.get(0,0).toString());
int y3 = Integer.valueOf(b.get(0,1).toString());
int x4 = Integer.valueOf(b.get(0,2).toString());
int y4 = Integer.valueOf(b.get(0,3).toString());
float d = 0;
if(d == ((float)(x1 - x2) * (y3-y4)) - ((y1-y2) * (x3-x4))){
Point pt = new Point();
pt.x = ((x1*y2 - y1*x2) * (x3-x4) - (x1-x2) * (x3*y4 - y3*x4)) / d;
pt.y = ((x1*y2 - y1*x2) * (y3-y4) - (y1-y2) * (x3*y4 - y3*x4)) / d;
return pt;
}
else
return generc;
}
private void sortCorners(LinkedList<Point> corners, Point center){
LinkedList<Point> top = new LinkedList<>();
LinkedList<Point> bot = new LinkedList<>();
Log.e("[CORNERS SIZE]", String.valueOf(corners.size()));
for(int i = 0; i < corners.size(); i++){
if(corners.get(i).y < center.y)
top.addLast(corners.get(i));
else
bot.addLast(corners.get(i));
}
Collections.sort(top, new Comparator<Point>() {
@Override
public int compare(Point o1, Point o2) {
return Collator.getInstance().compare(o1, o2);
}
});
Collections.sort(bot, new Comparator<Point>() {
@Override
public int compare(Point o1, Point o2) {
return Collator.getInstance().compare(o1, o2);
}
});
Log.e("[TOP SIZE]", String.valueOf(top.size()));
Point t1 = top.get(0);
Point tr = top.get(top.size() - 1);
Point b1 = bot.get(0);
Point br = bot.get(bot.size() - 1);
corners.clear();
corners.addLast(t1);
corners.addLast(tr);
corners.addLast(br);
corners.addLast(b1);
}
The C++ code:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <vector>
#include <algorithm>
//g++ main.cpp -o main -I /usr/local/include/opencv -lopencv_core -lopencv_imgproc -lopencv_highgui
using namespace cv;
using namespace std;
cv::Point2f computeIntersect(cv::Vec4i a, cv::Vec4i b)
{
int x1 = a[0], y1 = a[1], x2 = a[2], y2 = a[3];
int x3 = b[0], y3 = b[1], x4 = b[2], y4 = b[3];
if (float d = ((float)(x1-x2) * (y3-y4)) - ((y1-y2) * (x3-x4)))
{
cv::Point2f pt;
pt.x = ((x1*y2 - y1*x2) * (x3-x4) - (x1-x2) * (x3*y4 - y3*x4)) / d;
pt.y = ((x1*y2 - y1*x2) * (y3-y4) - (y1-y2) * (x3*y4 - y3*x4)) / d;
return pt;
}
else
return cv::Point2f(-1, -1);
}
bool comparator2(double a,double b){
return a<b;
}
bool comparator3(Vec3f a,Vec3f b){
return a[0]<b[0];
}
bool comparator(Point2f a,Point2f b){
return a.x<b.x;
}
void sortCorners(std::vector<cv::Point2f>& corners, cv::Point2f center)
{
std::vector<cv::Point2f> top, bot;
for (int i = 0; i < corners.size(); i++)
{
if (corners[i].y < center.y)
top.push_back(corners[i]);
else
bot.push_back(corners[i]);
}
sort(top.begin(),top.end(),comparator);
sort(bot.begin(),bot.end(),comparator);
cv::Point2f tl = top[0];
cv::Point2f tr = top[top.size()-1];
cv::Point2f bl = bot[0];
cv::Point2f br = bot[bot.size()-1];
corners.clear();
corners.push_back(tl);
corners.push_back(tr);
corners.push_back(br);
corners.push_back(bl);
}
int main(int argc, char* argv[]){
Mat img = imread("example.jpg",0);
cv::Size size(3,3);
cv::GaussianBlur(img,img,size,0);
adaptiveThreshold(img, img,255,CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY,75,10);
cv::bitwise_not(img, img);
cv::Mat img2;
cvtColor(img,img2, CV_GRAY2RGB);
cv::Mat img3;
cvtColor(img,img3, CV_GRAY2RGB);
vector<Vec4i> lines;
HoughLinesP(img, lines, 1, CV_PI/180, 80, 400, 10);
for( size_t i = 0; i < lines.size(); i++ )
{
Vec4i l = lines[i];
line( img2, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA);
}
imshow("example",img2);
std::vector<cv::Point2f> corners;
for (int i = 0; i < lines.size(); i++)
{
for (int j = i+1; j < lines.size(); j++)
{
cv::Point2f pt = computeIntersect(lines[i], lines[j]);
if (pt.x >= 0 && pt.y >= 0 && pt.x < img.cols && pt.y < img.rows)
corners.push_back(pt);
}
}
// Get mass center
cv::Point2f center(0,0);
for (int i = 0; i < corners.size(); i++)
center += corners[i];
center *= (1. / corners.size());
sortCorners(corners, center);
Rect r = boundingRect(corners);
cout<<r<<endl;
cv::Mat quad = cv::Mat::zeros(r.height, r.width, CV_8UC3);
// Corners of the destination image
std::vector<cv::Point2f> quad_pts;
quad_pts.push_back(cv::Point2f(0, 0));
quad_pts.push_back(cv::Point2f(quad.cols, 0));
quad_pts.push_back(cv::Point2f(quad.cols, quad.rows));
quad_pts.push_back(cv::Point2f(0, quad.rows));
// Get transformation matrix
cv::Mat transmtx = cv::getPerspectiveTransform(corners, quad_pts);
// Apply perspective transformation
cv::warpPerspective(img3, quad, transmtx, quad.size());
imshow("example2",quad);
Mat cimg;
cvtColor(quad,cimg, CV_BGR2GRAY);
vector<Vec3f> circles;
HoughCircles(cimg, circles, CV_HOUGH_GRADIENT, 1, img.rows/8, 100, 75, 0, 0 );
for( size_t i = 0; i < circles.size(); i++ ){
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
// circle center
circle( quad, center, 3, Scalar(0,255,0), -1, 8, 0 );
}
imshow("example4",quad);
waitKey();
double averR = 0;
vector<double> row;
vector<double> col;
//Find rows and columns of circles for interpolation
for(int i=0;i<circles.size();i++){
bool found = false;
int r = cvRound(circles[i][2]);
averR += r;
int x = cvRound(circles[i][0]);
int y = cvRound(circles[i][1]);
for(int j=0;j<row.size();j++){
double y2 = row[j];
if(y - r < y2 && y + r > y2){
found = true;
break;
}
}
if(!found){
row.push_back(y);
}
found = false;
for(int j=0;j<col.size();j++){
double x2 = col[j];
if(x - r < x2 && x + r > x2){
found = true;
break;
}
}
if(!found){
col.push_back(x);
}
}
averR /= circles.size();
sort(row.begin(),row.end(),comparator2);
sort(col.begin(),col.end(),comparator2);
for(int i=0;i<row.size();i++){
double max = 0;
double y = row[i];
int ind = -1;
for(int j=0;j<col.size();j++){
double x = col[j];
Point c(x,y);
//Use an actual circle if it exists
for(int k=0;k<circles.size();k++){
double x2 = circles[k][0];
double y2 = circles[k][1];
if(abs(y2-y)<averR && abs(x2-x)<averR){
x = x2;
y = y2;
}
}
// circle outline
circle( quad, c, averR, Scalar(0,0,255), 3, 8, 0 );
Rect rect(x-averR,y-averR,2*averR,2*averR);
Mat submat = cimg(rect);
double p =(double)countNonZero(submat)/(submat.size().width*submat.size().height);
if(p>=0.3 && p>max){
max = p;
ind = j;
}
}
if(ind==-1)printf("%d:-",i+1);
else printf("%d:%c",i+1,'A'+ind);
cout<<endl;
}
// circle outline*/
imshow("example3",quad);
waitKey();
return 0;
}
When I run the app, I get the following error at this line:
sortCorners(corners, center);
Error:
java.lang.IndexOutOfBoundsException
If anyone could tell me where is the error and how I could arrange it, I will be grateful.
You need to run the Canny edge detector to convert the gray image to a binary image before running it through the HoughLinesP transform.
From the HoughLinesP documentation.
"Finds line segments in a binary image using the probabilistic Hough transform."