/* Test: line fitting in implementing a virtual studio
using cvHoughLines2() function in OpenCV library
ref.
1) swPark_2000rti.pdf
2) 박승우_1999대한전자공학회지 제36권 S편 제7호
3) http://opencv.willowgarage.com/documentation/feature_detection.html?highlight=cvhoughlines#cvHoughLines2
camera: Logitech QuickCam Pro 4000
2010, lym
*/
#include <OpenCV/OpenCV.h>
#include <iostream>
using namespace std;
// non-maximum suppression (NMS)
void nonMaximumSuppression ( IplImage* image, int kernel, int threshold )
{
for ( int y = 0; y < image->height; y++ )
{
// cout << "y = " << y << endl;
for ( int x = 0; x < image->width; x++ )
{
float intensity = CV_IMAGE_ELEM( image, float, y, x );
if ( intensity > threshold ) {
float neighbor;
int flag = 0;
for ( int ky = -kernel; ky <= kernel; ky++ ) // in y-direction
{
if ( y+ky < 0 || y+ky >= image->height ) { // border check
continue;
}
for ( int kx = -kernel; kx <= kernel; kx++ ) // in x-direction
{
if ( x+kx < 0 || x+kx >= image->width ) { // border check
continue;
}
neighbor = CV_IMAGE_ELEM( image, float, y+ky, x+kx );
if ( intensity < neighbor ) {
CV_IMAGE_ELEM( image, float, y, x ) = 0.0;
flag = 1;
break;
}
}
if ( 1 == flag ) {
break;
}
}
}
else {
CV_IMAGE_ELEM( image, float, y, x ) = 0.0;
}
}
}
}
// non-maximum suppression (NMS)
void nonMaximumSuppression2 ( IplImage* image, IplImage* image2, int kernel)
{
float neighbor, neighbor2;
for ( int y = 0; y < image->height; y++ )
{
// cout << "y = " << y << endl;
for ( int x = 0; x < image->width; x++ )
{
float intensity = CV_IMAGE_ELEM( image, float, y, x );
// if ( intensity > threshold ) {
if (intensity > 0) {
int flag = 0;
for ( int ky = -kernel; ky <= kernel; ky++ ) // in y-direction
{
if ( y+ky < 0 || y+ky >= image->height ) { // border check
continue;
}
for ( int kx = -kernel; kx <= kernel; kx++ ) // in x-direction
{
if ( x+kx < 0 || x+kx >= image->width ) { // border check
continue;
}
neighbor = CV_IMAGE_ELEM( image, float, y+ky, x+kx );
neighbor2 = CV_IMAGE_ELEM( image2, float, y+ky, x+kx );
// if ( intensity < neighbor ) {
if ( intensity < neighbor || intensity < neighbor2) {
CV_IMAGE_ELEM( image, float, y, x ) = 0.0;
flag = 1;
break;
}
}
if ( 1 == flag ) {
break;
}
}
}
else {
CV_IMAGE_ELEM( image, float, y, x ) = 0.0;
}
}
}
}
// in the gradient direction
void selectEdges( IplImage* image1, IplImage* image2 )
{
for ( int y = 0; y < image1->height; y++ )
{
// cout << "y = " << y << endl;
for ( int x = 0; x < image1->width; x++ )
{
if( x == image1->width - 1 ) {
x = x;
}
float intensity1 = CV_IMAGE_ELEM( image1, float, y, x );
if ( intensity1 > 0.0 ) { // if the pixel is a edge point surviving NMS
float intensity2 = CV_IMAGE_ELEM( image2, float, y, x );
// compare it with the gradient value in the other direction
if ( intensity1 < intensity2 ) {
CV_IMAGE_ELEM( image1, float, y, x ) = 0.0;
}
}
}
}
}
// draw found lines
void drawLines ( CvSeq* lines, IplImage* image )
{
for( int i = 0; i < MIN(lines->total,100); i++ )
{
float* line = (float*)cvGetSeqElem(lines,i);
float rho = line[0];
float theta = line[1];
CvPoint pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
// cvLine(<#CvArr * img#>, <#CvPoint pt1#>, <#CvPoint pt2#>, <#CvScalar color#>, <#int thickness#>, <#int line_type#>, <#int shift#>)
cvLine( image, pt1, pt2, CV_RGB(255,0,0), 1, 8 );
}
}
// calculate intersection points of lines in X and Y to find corner points
void findIntersections ( CvSeq* linesX, CvSeq* linesY, IplImage* image )
// find intersections of lines, "linesX" & "linesY", and draw them in "image"
{
for( int i = 0; i < MIN(linesX->total,100); i++ )
{
float* lineX = (float*)cvGetSeqElem(linesX,i);
for( int j = 0; j < MIN(linesY->total,100); j++ )
{
float* lineY = (float*)cvGetSeqElem(linesY,j);
float rhoX = lineX[0];
float rhoY = lineY[0];
float thetaX = lineX[1], thetaY = lineY[1];
double aX = cos(thetaX), bX = sin(thetaX);
double aY = cos(thetaY), bY = sin(thetaY);
CvPoint c; // the intersection point of lineX[i] and lineY[j]
double Cx = ( rhoX*bY - rhoY*bX ) / ( aX*bY - bX*aY ) ;
double Cy = ( rhoX - aX*Cx ) / bX ;
c.x = cvRound(Cx);
c.y = cvRound(Cy);
// display the points in an image
cvCircle( image, c, 3, CV_RGB(0,255,100), 2 /* , <#int line_type#>, <#int shift#> */ );
}
}
}
int width = 320;
int height = 240;
int main()
{
IplImage* iplInput = 0; // input image
IplImage* iplGray = 0; // grey image converted from input image
IplImage *iplTemp = 0; // converted image from input image with a change of bit depth
IplImage* iplDoGx = 0, *iplDoGxClone; // filtered image by DoG in x-direction
IplImage* iplDoGy = 0, *iplDoGyClone; // filtered image by DoG in y-direction
IplImage* iplEdgeX = 0, *iplEdgeY = 0; // edge-detected image in each direction, to be used as input in line-fitting
double minValx, maxValx;
double minValy, maxValy;
double minValt, maxValt;
double threshold = 0.0;
int kernel = 1;
double h[] = { -1, -7, -15, 0, 15, 7, 1 };
CvMat DoGx = cvMat( 1, 7, CV_64FC1, h );
CvMat* DoGy = cvCreateMat( 7, 1, CV_64FC1 );
cvTranspose( &DoGx, DoGy ); // transpose(&DoGx) -> DoGy
// create windows
cvNamedWindow("input");
cvNamedWindow( "temp" );
char title_fx[200], title_fy[200];
sprintf(title_fx, "filtered image by DoGx");
sprintf(title_fy, "filtered image by DoGy");
cvNamedWindow(title_fx);
cvNamedWindow(title_fy);
char title_ex[200], title_ey[200];
sprintf(title_ex, "detected edges in x direction");
sprintf(title_ey, "detected edges in y direction");
cvNamedWindow(title_ex);
cvNamedWindow(title_ey);
cvMoveWindow( "temp", 110+width, 100 );
cvMoveWindow( title_fx, 100, 100+height+30 );
cvMoveWindow( title_fy, 110+width, 100+height+30 );
cvMoveWindow( title_ex, 100, 100+(height+30)*2 );
cvMoveWindow( title_ey, 110+width, 100+(height+30)*2 );
// output information of lines found by Hough transform
CvMemStorage* storageX = cvCreateMemStorage(0), *storageY = cvCreateMemStorage(0);
CvSeq* linesX = 0, *linesY = 0;
// initialize capture from a camera
CvCapture* capture = cvCaptureFromCAM(0); // capture from video device #0
int count = 0; // number of grabbed frames
while(1) {
// get video frames from the camera
if ( !cvGrabFrame(capture) ) {
printf("Could not grab a frame\n\7");
exit(0);
}
else {
cvGrabFrame( capture ); // capture a frame
iplInput = cvRetrieveFrame(capture); // retrieve the caputred frame
// iplInput = cvLoadImage("P:/input.bmp"); // retrieve the caputred frame
if(iplInput) {
if(0 == count) {
// create an image header and allocate the image data
iplGray = cvCreateImage(cvGetSize(iplInput), 8, 1);
iplTemp = cvCreateImage(cvGetSize(iplInput), IPL_DEPTH_32F, 1);
iplDoGx = cvCreateImage(cvGetSize(iplInput), IPL_DEPTH_32F, 1);
iplDoGy = cvCreateImage(cvGetSize(iplInput), IPL_DEPTH_32F, 1);
iplDoGyClone = cvCloneImage(iplDoGy), iplDoGxClone = cvCloneImage(iplDoGx);
iplEdgeX = cvCreateImage(cvGetSize(iplInput), 8, 1);
iplEdgeY = cvCreateImage(cvGetSize(iplInput), 8, 1);
}
// convert the input color image to gray one
cvCvtColor(iplInput, iplGray, CV_BGR2GRAY); // convert an image from one color space to another
// convert one array to another with optional linear transformation
cvConvert(iplGray, iplTemp);
// increase the frame number
count++;
}
// cvShowImage( "input", iplInput );
// convolve an image with the kernel
// void cvFilter2D(const CvArr* src, CvArr* dst, const CvMat* kernel, CvPoint anchor=cvPoint(-1, -1)
cvFilter2D( iplTemp, iplDoGx, &DoGx ); // convolve an image with the DoG kernel in x-direction
cvFilter2D( iplTemp, iplDoGy, DoGy ); // convolve an image with the DoG kernel in y-direction
// ref. http://opencv.willowgarage.com/documentation/operations_on_arrays.html?highlight=cvabs#cvAbsDiffS
cvAbs(iplDoGx, iplDoGx); cvAbs(iplDoGy, iplDoGy);
// normalize the pixel values
// http://opencv.willowgarage.com/documentation/operations_on_arrays.html?highlight=cvminmax#cvMinMaxLoc
// void cvMinMaxLoc(const CvArr* arr, double* minVal, double* maxVal, CvPoint* minLoc=NULL, CvPoint* maxLoc=NULL, const CvArr* mask=NULL)¶
cvMinMaxLoc( iplDoGx, &minValx, &maxValx );
cvMinMaxLoc( iplDoGy, &minValy, &maxValy );
cvMinMaxLoc( iplTemp, &minValt, &maxValt );
cvScale( iplDoGx, iplDoGx, 1.0 / maxValx );
cvScale( iplDoGy, iplDoGy, 1.0 / maxValy );
cvScale( iplTemp, iplTemp, 1.0 / maxValt );
// display windows
cvShowImage( "temp", iplTemp );
cvShowImage( title_fx, iplDoGx );
cvShowImage( title_fy, iplDoGy );
// save images to files
cvSaveImage("gray.bmp", iplGray);
cvSaveImage("temp.bmp", iplTemp);
cvSaveImage("DoGx.bmp", iplDoGx);
cvSaveImage("DoGy.bmp", iplDoGy);
// non-Maximum suppression (NMS)
// void NonMaximumSuppression ( IplImage* image, int kernel, int threshold )
// nonMaximumSuppression ( iplDoGx, kernel, threshold ); nonMaximumSuppression ( iplDoGy, kernel, threshold );
// cvCopyImage(iplDoGy, iplDoGyClone), cvCopyImage(iplDoGx, iplDoGxClone);
cvCopy(iplDoGy, iplDoGyClone), cvCopy(iplDoGx, iplDoGxClone);
nonMaximumSuppression2 ( iplDoGx, iplDoGyClone, kernel );
nonMaximumSuppression2 ( iplDoGy, iplDoGxClone, kernel );
// normalize the pixel values
// http://opencv.willowgarage.com/documentation/operations_on_arrays.html?highlight=cvminmax#cvMinMaxLoc
// void cvMinMaxLoc(const CvArr* arr, double* minVal, double* maxVal, CvPoint* minLoc=NULL, CvPoint* maxLoc=NULL, const CvArr* mask=NULL)¶
cvMinMaxLoc( iplDoGx, &minValx, &maxValx );
cvMinMaxLoc( iplDoGy, &minValy, &maxValy );
cvMinMaxLoc( iplTemp, &minValt, &maxValt );
cvScale( iplDoGx, iplDoGx, 1.0 / maxValx );
cvScale( iplDoGy, iplDoGy, 1.0 / maxValy );
cvScale( iplTemp, iplTemp, 1.0 / maxValt );
// display windows
cvShowImage( title_ex, iplDoGx );
cvShowImage( title_ey, iplDoGy );
// save images to files
cvSaveImage("edgeX.bmp", iplDoGx);
cvSaveImage("edgeY.bmp", iplDoGy);
// line fitting
cvConvert(iplDoGx, iplEdgeY);
cvConvert(iplDoGy, iplEdgeX);
// ref. http://opencv.willowgarage.com/documentation/feature_detection.html?highlight=cvhoughlines#cvHoughLines2
// CvSeq* cvHoughLines2(CvArr* image, void* storage, int method, double rho, double theta, int threshold, double param1=0, double param2=0)
double rho = 1.0; // distance resolution in pixel-related units
double theta = 1.0; // angle resolution measured in radians
int threshold = 20; // ("A line is returned by the function if the corresponding accumulator value is greater than threshold.")
linesX = cvHoughLines2(iplEdgeX, storageX, CV_HOUGH_STANDARD, 1.0*rho, CV_PI/180*theta, threshold, 0, 0);
linesY = cvHoughLines2(iplEdgeY, storageY, CV_HOUGH_STANDARD, 1.0*rho, CV_PI/180*theta, threshold, 0, 0);
// linesX = cvHoughLines2(iplEdgeX, storageX, CV_HOUGH_PROBABILISTIC, 1.0*rho, CV_PI/180*1.0, 10, 1.0, 1.0);
// linesY = cvHoughLines2(iplEdgeY, storageY, CV_HOUGH_PROBABILISTIC, 1.0*rho, CV_PI/180*theta, 10, 1.0, 1.0);
cout << "# of found lines = " << linesY->total << " vertical " << linesX->total << " horizontal, " << endl;
// draw found lines
drawLines ( linesX, iplInput );
drawLines ( linesY, iplInput );
// find intersections of lines
// void findIntersections ( CvSeq* linesX, CvSeq* linesY, IplImage* image )
findIntersections( linesX, linesY, iplInput );
cvShowImage( "input", iplInput );
cvSaveImage( "input.bmp", iplInput );
if( cvWaitKey(10) >= 0 )
break;
}
}
cvReleaseCapture( &capture ); // release the capture source
cvDestroyWindow( "input" );
cvDestroyWindow( "temp" );
cvDestroyWindow(title_fx);
cvDestroyWindow(title_fy);
cvDestroyWindow(title_ex);
cvDestroyWindow(title_ey);
return 0;
}