Beispiel #1
0
bool Robot::adjustWorldCoordinate(IplImage* image, double coordAdjustRate)
{
    IplImage *img;
    IplImage* src1=cvCreateImage(cvGetSize(image),IPL_DEPTH_8U,1);
    if(image->nChannels==3)
    {
        IplImage *hsv_img = get_hsv(image);
        img=worldMap.getField(hsv_img);
        cvReleaseImage(&hsv_img);
        src1=img;
    }
    else
    {
        img=image;
        src1=img;
            //cvCvtColor(img, src1, CV_BGR2GRAY);
    }
		if( img != 0 )
		{
			IplImage* dst = cvCreateImage( cvGetSize(img), 8, 1 );
			IplImage* color_dst = cvCreateImage( cvGetSize(img), 8, 3 );
			CvMemStorage* storage = cvCreateMemStorage(0);
			CvSeq* ls = 0;
			int i;
			cvCanny( src1, dst, 50, 200, 3 );

			cvCvtColor( dst, color_dst, CV_GRAY2BGR );

			ls = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 2, CV_PI/90, 20, 5, 30 );
			//ls = cvHoughLines2( dst, storage, CV_HOUGH_PROBABILISTIC, 5, CV_PI/30, 10, 20, 5 );
			vector<myLine> tmplines;
			for( i = 0; i < ls->total; i++ )
			{
				CvPoint* tmpl = (CvPoint*)cvGetSeqElem(ls,i);
				cvLine( color_dst, tmpl[0], tmpl[1], CV_RGB(255,0,0), 1, 8 );

                cv::Point2f tmpp[2];
                cv::Point2f scrPos(tmpl[0].x,tmpl[0].y);
                cv::Point2f roboPos=worldMap.coord_screen2robot(scrPos,true);
                cv::Point2f worldPos=worldMap.coord_robot2world(roboPos);
                tmpp[0]=worldPos;
                scrPos=cv::Point2f(tmpl[1].x,tmpl[1].y);
                roboPos=worldMap.coord_screen2robot(scrPos,true);
                worldPos=worldMap.coord_robot2world(roboPos);
                tmpp[1]=worldPos;
                myLine templ(tmpp[0],tmpp[1]);
                if(templ.l>LINE_LENGTH_LBOUND)
                    tmplines.push_back(templ);
//				//printf("length=%f angle=%f\n",sqrt(float((tmpl[1].y-tmpl[0].y)*(tmpl[1].y-tmpl[0].y));
				//	+float((tmpl[1].x-tmpl[0].x)*(tmpl[1].x-tmpl[0].x)))
				//	,atan2(float(tmpl[1].y-tmpl[0].y),float(tmpl[1].x-tmpl[0].x)));
			}
			//printf("\n");
			cvNamedWindow( "Source", 1 );
			cvShowImage( "Source", img );

			cvNamedWindow( "Hough", 1 );
			cvShowImage( "Hough", color_dst );

			cvWaitKey(10);
			cvReleaseImage(&dst);
			cvReleaseImage(&src1);
			cvReleaseImage(&color_dst);
			cvReleaseMemStorage(&storage);
			if(coordAdjustRate==0)
			{
                for(i=0;i<tmplines.size();++i)
                {
                    lines.push_back(tmplines[i]);
                }
			}
			else if(coordAdjustRate==2)
			{
                for(i=0;i<tmplines.size();++i)
                {
                    lines.push_back(tmplines[i]);
                }
                //vector<double> oris;
                vector<int> lineNums;
                vector<double> lineValues;
                int groupId=0;
			    for(i=0;i<lines.size();++i)
			    {
			        bool classified=false;
			        int j;
			        for(j=0;j<i;++j)
			        {
			            double angle=lines[i].theta-lines[j].theta+CV_PI/4.0;   //to make the process simple, add 45 degree
                                                                                //to turn the cared angles to the middle of a phase
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            int phase=(int)(angle/(CV_PI/2.0));
			            double angle90=angle-CV_PI/2.0*(double)phase;
			            phase%=2;
			            if(abs(angle90-CV_PI/4.0)<CV_PI/60.0)//subtract the added 45 degree
			            {
			                lines[i].clsId=lines[j].clsId/2*2+phase;
			                ++lineNums[lines[i].clsId];
			                lineValues[lines[i].clsId]+=lines[i].l;
			                classified=true;
			                break;
			            }
			        }
			        if(classified==false)
			        {
			            lines[i].clsId=groupId;
                        lineNums.push_back(1);
                        lineNums.push_back(0);
                        lineValues.push_back(lines[i].l);
                        lineValues.push_back(0);
			            groupId+=2;
			        }
			    }
			    int maxValueGroup=0;
			    double maxValue=0;
			    for(i=0;i<lineNums.size();i+=2)
			    {
			        if(lineValues[i]+lineValues[i+1]>maxValue)
			        {
			            maxValue=lineValues[i]+lineValues[i+1];
			            maxValueGroup=i;
			        }
			    }
			    maxValueGroup/=2;
			    double sumAngle=0;
			    double sumL=0;
			    for(i=0;i<lines.size();++i)
			    {
			        if(lines[i].clsId/2==maxValueGroup)
			        {
			            double angle=lines[i].theta+CV_PI/4.0;//similar strategy, add 45 degree
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            double angle90=angle-CV_PI/2.0*(double)((int)(angle/(CV_PI/2.0)));
			            sumAngle+=(angle90-CV_PI/4.0)*lines[i].l;//subtract 45 degree
			            sumL+=lines[i].l;
			        }
			    }
			    if(sumL==0)
			    {
                    //printf("false 2 sumL=0\n");
			        return false;
			    }
			    mainAngle=sumAngle/sumL;
			    mainGroupId=maxValueGroup;
			    //printf("mainAngle=%f mainGroupId=%d\n",mainAngle,mainGroupId);
			}
			else if(coordAdjustRate==1)
            {
                CvRect bBox=worldMap.getMap_bbox();
                    //printf("in func param=1\n");
                    //printf("tmplines.size=%d\n",tmplines.size());
                for(i=0;i<tmplines.size();++i)
                {
                    cv::Point2f imgPos=world2image(tmplines[i].p[0]);
                    if(!(imgPos.x>bBox.x-BBOX_DELTA && imgPos.x<bBox.x+bBox.width+BBOX_DELTA && imgPos.y>bBox.y-BBOX_DELTA && imgPos.y<bBox.y+bBox.height+BBOX_DELTA))
                        continue;
			        bool classified=false;
			        double minAngle=CV_PI;
			        int minAnglePhase=0;
			        int bestJ=-1;
			        int j;
			        for(j=0;j<lines.size();++j)
			        {
			            if(lines[j].clsId/2!=mainGroupId)
                            continue;
			            double angle=tmplines[i].theta-lines[j].theta+CV_PI/4.0;   //to make the process simple, add 45 degree
                                                                                //to turn the cared angles to the middle of a phase
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            int phase=(int)(angle/(CV_PI/2.0));
			            double angle90=angle-CV_PI/2.0*(double)phase;
			            phase%=2;
			            if(abs(angle90-CV_PI/4.0)<minAngle)//subtract the added 45 degree
			            {
			                minAngle=abs(angle90-CV_PI/4.0);
			                bestJ=j;
                                minAnglePhase=phase;
			            }
			        }
			        if(bestJ>-1)
			        {
			            //if(minAngle<CV_PI/6.0)
                        tmplines[i].clsId=mainGroupId*2+minAnglePhase;
                        classified=true;
                        //printf("nearest main ori found. angle diff=%f\n",minAngle);
			        }
			    }
			    double sumAngle=0;
			    double sumL=0;
			    for(i=0;i<tmplines.size();++i)
			    {
			        if(tmplines[i].clsId/2==mainGroupId)
			        {
                    //printf("comparing with a main line..i=%d\n",i);
			            double angle=tmplines[i].theta+CV_PI/4.0;//similar strategy, add 45 degree
			            if(angle<0)
                            angle+=CV_PI*2.0;
			            double angle90=angle-CV_PI/2.0*double((int)(angle/(CV_PI/2.0)));
			            sumAngle+=angle90*tmplines[i].l;//use the 45 degree to balance the unwanted lines
			            sumL+=tmplines[i].l;
			        }
			    }
			    if(sumL<LINE_LENGTH_SUM_LBOUND)
			    {
                    //printf("false sumL=%f<%d\n",sumL,LINE_LENGTH_SUM_LBOUND);
			        return false;
			    }
			    double curAngle=sumAngle/sumL-CV_PI/4.0;//subtract 45 degree
			    ori+=curAngle-mainAngle;
                    //printf("true oriChange=%f\n",curAngle-mainAngle);
            }
		}

    return true;
}
Beispiel #2
0
bool CvCaptureCAM_DC1394_v2_CPP::initVidereRectifyMaps( const char* info,
    IplImage* ml[2], IplImage* mr[2] )
{
    float identity_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
    CvMat l_rect = cvMat(3, 3, CV_32F, identity_data), r_rect = l_rect;
    float l_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
    float r_intrinsic_data[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
    CvMat l_intrinsic = cvMat(3, 3, CV_32F, l_intrinsic_data);
    CvMat r_intrinsic = cvMat(3, 3, CV_32F, r_intrinsic_data);
    float l_distortion_data[] = {0,0,0,0,0}, r_distortion_data[] = {0,0,0,0,0};
    CvMat l_distortion = cvMat(1, 5, CV_32F, l_distortion_data);
    CvMat r_distortion = cvMat(1, 5, CV_32F, r_distortion_data);
    IplImage* mx = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
    IplImage* my = cvCreateImage(cvGetSize(ml[0]), IPL_DEPTH_32F, 1);
    int k, j;

    for( k = 0; k < 2; k++ )
    {
        const char* section_name = k == 0 ? "[left_camera]" : "[right_camera]";
        static const char* param_names[] = { "f ", "fy", "Cx", "Cy" "kappa1", "kappa2", "tau1", "tau2", "kappa3", 0 };
        const char* section_start = strstr( info, section_name );
        CvMat* intrinsic = k == 0 ? &l_intrinsic : &r_intrinsic;
        CvMat* distortion = k == 0 ? &l_distortion : &r_distortion;
        CvMat* rectification = k == 0 ? &l_rect : &r_rect;
        IplImage** dst = k == 0 ? ml : mr;
        if( !section_start )
            break;
        section_start += strlen(section_name);
        for( j = 0; param_names[j] != 0; j++ )
        {
            const char* param_value_start = strstr(section_start, param_names[j]);
            float val=0;
            if(!param_value_start)
                break;
            sscanf(param_value_start + strlen(param_names[j]), "%f", &val);
            if( j < 4 )
                intrinsic->data.fl[j == 0 ? 0 : j == 1 ? 4 : j == 2 ? 2 : 5] = val;
            else
                distortion->data.fl[j - 4] = val;
        }
        if( param_names[j] != 0 )
            break;

        // some sanity check for the principal point
        if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.1 ||
            fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.1 )
        {
            cvScale( &intrinsic, &intrinsic, 0.5 ); // try the corrected intrinsic matrix for 2x lower resolution
            if( fabs(mx->width*0.5 - intrinsic->data.fl[2]) > mx->width*0.05 ||
                fabs(my->height*0.5 - intrinsic->data.fl[5]) > my->height*0.05 )
                cvScale( &intrinsic, &intrinsic, 2 ); // revert it back if the new variant is not much better
            intrinsic->data.fl[8] = 1;
        }

        cvInitUndistortRectifyMap( intrinsic, distortion,
                    rectification, intrinsic, mx, my );
        cvConvertMaps( mx, my, dst[0], dst[1] );
    }

    cvReleaseImage( &mx );
    cvReleaseImage( &my );
    return k >= 2;
}
Beispiel #3
0
size_t OpenCVImage::getWidth() const
{
    return m_img ? cvGetSize(m_img).width : 0;
}
Beispiel #4
0
void face_detect_crop(IplImage * input,IplImage * output)
{

    IplImage * img;
    img = cvCreateImage(cvGetSize(input),IPL_DEPTH_8U,1);
    cvCvtColor(input,img,CV_RGB2GRAY);//convert input to Greyscale and store in image
    int face_origin_x,face_origin_y,width,hieght;//variables to crop face


    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 ); //load the face detedction cascade
    storage = cvCreateMemStorage(0);
    int scale = 1;
    CvPoint pt1,pt2;
    int face_number;

    CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,1.1, 2, CV_HAAR_DO_CANNY_PRUNING,cvSize(40, 40) );
    for( face_number = 0; face_number < (faces ? faces->total : 0); face_number++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( faces, face_number );

        //Specifies the points for rectangle.
        /* pt1_____________

           |              |

           |              |

           |              |

           |_____________pt2 */
        pt1.x = r->x*scale;
        pt2.x = (r->x+r->width)*scale;
        pt1.y = r->y*scale;
        pt2.y = (r->y+r->height)*scale;
        cvRectangle( input, pt1, pt2, CV_RGB(255,255,255), 1, 8, 0 );
        CvRect rs=*r;
        //cvNamedWindow("i-O", 1);
        //cvShowImage("i-O",input);
        //cvWaitKey(0);
        cvSetImageROI(img,rs);
    }
    IplImage * frame;
    CvSize s1= {48,48};
    frame=cvCreateImage(s1,IPL_DEPTH_8U,1);

    cvResize(img,frame);
    cvCvtColor(frame,output,CV_GRAY2RGB);

    CvPoint pt;
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name_eye, 0, 0, 0 ); //load the face detedction cascade
    CvSeq* faces1 = cvHaarDetectObjects( input, cascade, storage,1.1, 2, CV_HAAR_DO_CANNY_PRUNING,cvSize(40, 40) );
    for( face_number = 0; face_number < (faces1 ? faces1->total : 0); face_number++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( faces1, face_number );
        pt.x = (r->x*scale);
        pt2.x = ((r->x+r->width)*scale);
        pt.y = (r->y*scale);
        pt2.y = ((r->y+r->height)*scale);
        cvRectangle( input, pt, pt2, CV_RGB(0,255,255), 1, 8, 0 );
    }



}
Beispiel #5
0
int track( IplImage* frame, int flag,int Cx,int Cy,int R )
{

    {

        int i, bin_w, c;

        LOGE("#######################Check1############################");

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
            hue = cvCreateImage( cvGetSize(frame), 8, 1 );
            mask = cvCreateImage( cvGetSize(frame), 8, 1 );
            backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
            hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
            histimg = cvCreateImage( cvSize(320,200), 8, 3 );
            cvZero( histimg );
            LOGE("######################Check2###########################");
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, hsv, CV_BGR2HSV );


        {
            int _vmin = vmin, _vmax = vmax;

            cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
                        cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
            cvSplit( hsv, hue, 0, 0, 0 );
            LOGE("###########################Check3######################");
            if(flag==0)
            {
            	LOGE("###############Initialized#############################");
				selection.x=Cx-R;
				selection.y=Cy-R;
				selection.height=2*R;
				selection.width=2*R;
                float max_val = 0.f;
                cvSetImageROI( hue, selection );
                cvSetImageROI( mask, selection );
                cvCalcHist( &hue, hist, 0, mask );
                cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
                cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
                cvResetImageROI( hue );
                cvResetImageROI( mask );
                track_window = selection;
                track_object = 1;

                cvZero( histimg );
                bin_w = histimg->width / hdims;
                for( i = 0; i < hdims; i++ )
                {
                    int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
                    CvScalar color = hsv2rgb(i*180.f/hdims);
                    cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
                                 cvPoint((i+1)*bin_w,histimg->height - val),
                                 color, -1, 8, 0 );
                }
                LOGE("##############Check4#########################");
            }
            LOGE("##############Check5#########################");
            cvCalcBackProject( &hue, backproject, hist );
            cvAnd( backproject, mask, backproject, 0 );
            cvCamShift( backproject, track_window,
                        cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
                        &track_comp, &track_box );
            track_window = track_comp.rect;
            char buffer[50];
            sprintf(buffer,"vals= %d %d and %d",track_window.x,track_window.y,track_window.width);
            LOGE(buffer);
            if( backproject_mode )
                cvCvtColor( backproject, image, CV_GRAY2BGR );
            if( image->origin )
                track_box.angle = -track_box.angle;
            cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
        }

        if( select_object && selection.width > 0 && selection.height > 0 )
        {
            cvSetImageROI( image, selection );
            cvXorS( image, cvScalarAll(255), image, 0 );
            cvResetImageROI( image );
        }

        LOGE("!!!!!!!!!!!!!!!!!!Done Tracking!!!!!!!!!!!!!!!!!!!!!!!!!!!!");


    }



    return 0;
}
//////////////////////////////////
// main()
//
int _tmain(int argc, _TCHAR* argv[])
{
//	try_conv();
	if( !initAll() ) 
		exitProgram(-1);

	// Capture and display video frames until a face
	// is detected
	int frame_count = 0;
	while( (char)27!=cvWaitKey(1) )
	{
		//Retrieve next image and 
		// Look for a face in the next video frame
		
		//read into pfd_pVideoFrameCopy
		if (!captureVideoFrame()){
			if (frame_count==0)
				throw exception("Failed before reading anything");
			break; //end of video..
		}
		++frame_count;

		CvSeq* pSeq = 0;
		detectFaces(pfd_pVideoFrameCopy,&pSeq);
		
		//Do some filtration of pSeq into pSeqOut, based on history etc,
		//update data structures (history ,face threads etc.)s
		list<Face> & faces_in_this_frame = FdProcessFaces(pfd_pVideoFrameCopy,pSeq);

		//== draw rectrangle for each detected face ==
		if (!faces_in_this_frame.empty()){	//faces detected (??)
			int i = 0;
			for(list<Face>::iterator face_itr = faces_in_this_frame.begin(); face_itr != faces_in_this_frame.end(); ++face_itr)
			{
				CvPoint pt1 = cvPoint(face_itr->x,face_itr->y);
				CvPoint pt2 = cvPoint(face_itr->x + face_itr->width,face_itr->y + face_itr->height);
				if (face_itr->frame_id == frame_count) //detected for this frame
					cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i++%3],3,8,0);
				else //from a previous frame
					cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i++%3],1,4,0);
			}
		}else{ //no faces detected
			Sleep(100);
		}

		cvShowImage( DISPLAY_WINDOW, pfd_pVideoFrameCopy );
		cvReleaseImage(&pfd_pVideoFrameCopy);
	
	} //end input while
	cout << "==========================================================" << endl;
	cout << "========== Input finished ================================" << endl;
	cout << "==========================================================" << endl << endl;
	
	cout << "Press a key to continue with history playback" <<endl;
	char cc = fgetc(stdin);


	cout << "==========================================================" << endl;
	cout << "==== Playback history + rectangles +                 =====" << endl;
	cout << "==== create output video(s)						  =====" << endl;
	cout << "==========================================================" << endl << endl;
	list<FDHistoryEntry> & pHistory = FdGetHistorySeq();
	
	//== VIDEO WRITER START =====================
	int isColor = 1;
	int fps     = 12;//30;//25;  // or 30
	int frameW  = 640; // 744 for firewire cameras
	int frameH  = 480; // 480 for firewire cameras
	CvVideoWriter * playbackVidWriter=cvCreateVideoWriter((OUTPUT_PLAYBACK_VIDEOS_DIR + "\\playback.avi").c_str(),
								PFD_VIDEO_OUTPUT_FORMAT,
							   fps,cvSize(frameW,frameH),isColor);
	CvVideoWriter *  croppedVidWriter = 0;
	if (!playbackVidWriter) {
		cerr << "can't create vid writer" << endl;
		exitProgram(-1);
	}
	bool wasWrittenToVideo = false;
	//== VIDEO WRITER END =====================

	int index = 0;
	// play recorded sequence----------------------------
	// i.e. just what's in the history
	int playback_counter = 0;

	cout << "start finding consensus rect " << endl;
	//find min max
	bool found =false;
	int min_x = INT_MAX,//pFaceRect->x,
		max_x = 0,//pFaceRect->x+pFaceRect->width,
		min_y = INT_MAX,//pFaceRect->y,
		max_y = 0;//pFaceRect->y+pFaceRect->height;
	for (list<FDHistoryEntry>::iterator itr = pHistory.begin() ; itr != pHistory.end(); ++itr)
	{
		CvSeq* pFacesSeq = itr->pFacesSeq;
		assert(pFacesSeq);
		//TODO Might want to convert to Face here
		CvRect * pFaceRect = (CvRect*)cvGetSeqElem(pFacesSeq, 0); //works only on first rec series
		if (pFaceRect){
			found = true;
			if (pFaceRect->x < min_x) min_x = pFaceRect->x;
			if (pFaceRect->x+pFaceRect->width > max_x) max_x = pFaceRect->x + pFaceRect->width;
			
			if (pFaceRect->y < min_y) min_y = pFaceRect->y;
			if (pFaceRect->y+pFaceRect->height > max_y) max_y =  pFaceRect->y+pFaceRect->height;
		}
	}
	//assert(found); //some rect in history..
	CvRect consensus_rect;
	consensus_rect.x = min_x;
	consensus_rect.y = min_y;
	consensus_rect.width  = max_x - min_x;
	consensus_rect.height = max_y - min_y;

	Sleep(3000); //just to make sure that pruneHistory isn't modifying..
	cout << "start playback loop " << endl;
	int k = 0;
	for (list<FDHistoryEntry>::iterator itr = pHistory.begin() ; itr != pHistory.end(); ++itr)
	{
		cout << ++k << endl;
		//cvResetImageROI(history_itr->pFrame);  //now reset by FDFaceThread
		pfd_pVideoFrameCopy = cvCreateImage( cvGetSize(itr->pFrame ), 8, 3 ); //TODO query image for its properties
		cvCopy( itr->pFrame , pfd_pVideoFrameCopy, 0 );
		CvSeq* pFacesSeq = itr->pFacesSeq;
#ifndef NO_RECTS_ON_PLAYBACK
		for(int i = 0 ;i < pFacesSeq->total ;i++){				
			Face * pFaceRect = (Face*)cvGetSeqElem(pFacesSeq, i);
			assert(pFaceRect != NULL);
			CvPoint pt1 = cvPoint(pFaceRect->x,pFaceRect->y);
			CvPoint pt2 = cvPoint(pFaceRect->x + pFaceRect->width,pFaceRect->y + pFaceRect->height);
			if (itr->frame_id == pFaceRect->frame_id)
				cvRectangle( pfd_pVideoFrameCopy, pt1, pt2,	 colorArr[i%3],3,8,0);
			else
				cvRectangle( pfd_pVideoFrameCopy, pt1, pt2, colorArr[i%3],1,4,0);
		}
#endif
		if (pFacesSeq->total > 0) 
		{	
			assert(found);
			//write 1st sequence if exists to cropped vid
			if (!croppedVidWriter)
				croppedVidWriter=cvCreateVideoWriter((OUTPUT_PLAYBACK_VIDEOS_DIR + "\\cropped_playback.avi").c_str(),
									PFD_VIDEO_OUTPUT_FORMAT,
	 						   fps,cvSize(max_x-min_x,max_y-min_y),isColor);
			assert(croppedVidWriter);


			cvResetImageROI(pfd_pVideoFrameCopy);
			cvSetImageROI(pfd_pVideoFrameCopy,consensus_rect);
			//write cropped image to video file
			IplImage *croppedImg = cvCreateImage(cvGetSize(pfd_pVideoFrameCopy),
								   pfd_pVideoFrameCopy->depth,
								   pfd_pVideoFrameCopy->nChannels);	
			assert(croppedImg);
			cvCopy(pfd_pVideoFrameCopy, croppedImg, NULL);
			assert(croppedVidWriter);
			cvWriteFrame(croppedVidWriter,croppedImg);
			cvReleaseImage(&croppedImg);
		}

		cvShowImage( DISPLAY_WINDOW, pfd_pVideoFrameCopy );
		cvResetImageROI(pfd_pVideoFrameCopy); //CROP_PLAYBACK_FACE
		cvWriteFrame(playbackVidWriter,pfd_pVideoFrameCopy);
		if( (char)27==cvWaitKey(1) ) break;//exitProgram(0);
		Sleep(50);	
		++playback_counter;	
	}

	
	cvReleaseVideoWriter(&playbackVidWriter);
	cvReleaseVideoWriter(&croppedVidWriter);
	exitProgram(0);
	//-----------------------------------------------------------
	//-----------------------------------------------------------
	//-----------------------------------------------------------
}
Beispiel #7
0
void JDDetect(IplImage* sourceImage)
{
	const int max_corners = 3;
	const char* filename = "JINGLONGLONG.jpg";
	int cornerCount = max_corners;//角点的最大数目

	//用于保存角点的坐标
	CvPoint2D32f corners[max_corners];

	IplImage*grayImage = 0, *corners1 = 0, *corners2 = 0;

	cvNamedWindow("IMAGE", CV_WINDOW_AUTOSIZE);
	//读入图像,检测
	//sourceImage=cvLoadImage(filename,1);
	grayImage = cvCreateImage(cvGetSize(sourceImage), IPL_DEPTH_8U, 1);

	//cvCvtColor(sourceImage,grayImage,CV_RGB2GRAY);
	grayImage = sourceImage;
	corners1 = cvCreateImage(cvGetSize(sourceImage), IPL_DEPTH_32F, 1);
	corners2 = cvCreateImage(cvGetSize(sourceImage), IPL_DEPTH_32F, 1);

	//void cvGoodFeaturesToTrack( 
	//	const CvArr* image,
	//	CvArr* eig_image, 
	//	CvArr* temp_image,
	//	CvPoint2D32f* corners,
	//	int* corner_count,
	//	double quality_level,
	//	double min_distance,
	//	const CvArr* mask=NULL
	//	int block_size   =3,
	//	int use_harris =0,
	//	double k = 0.4)
	cvGoodFeaturesToTrack(
		grayImage,          //	const CvArr* image,           
		corners1,           //	CvArr* eig_image,           
		corners2,			//	CvArr* temp_image,
		corners,            //	CvPoint2D32f* corners,        
		&cornerCount,       //	int* corner_count,          
		0.05,               //	double quality_level,    
		10,                  //	double min_distance,   
		0,                  //	const CvArr* mask=NULL
		3,					//	int block_size   =3,
		0,                  //	int use_harris =0,                                              
		0.4					//	double k = 0.4)
		);
	if (cornerCount)
	{
		int i = 0;
		CvScalar color = CV_RGB(0, 0, 255);
		for (i; i<cornerCount; i++)
			cvCircle(
			sourceImage,     //要画圆的图像
			cvPoint((int)(corners[i].x), (int)(corners[i].y)),           //圆心
			6,   //半径
			color,//颜色
			2,    //貌似是线宽
			CV_AA,//线型
			0    //圆心坐标和和半径的小数点的数目
			);
	}
	//cvShowImage("IMAGE", sourceImage);

	//cvWaitKey(0);
	//释放资源	
	cvReleaseImage(&corners1);
	cvReleaseImage(&corners2);
	cvReleaseImage(&grayImage);
	cvDestroyWindow("IMAGE");
}
Beispiel #8
0
bool Classifier::run(const IplImage *frame, CObjectList *objects, bool scored)
{
    double xDiff = 0, yDiff = 0;
    optical_flow(frame, &xDiff, &yDiff);
    
    totalXDiff += xDiff;
    totalYDiff += yDiff;
    if (!scored)
        return true;
    
    cout << "--------------------------------------" << endl;
    cout << "\t\tRun" << endl;
    
    assert((frame != NULL) && (objects != NULL));
    
    printf("Let's go!\n");
    
    for (int i = 0; i < (int)prevObjects.size(); ++i) {
        if (prevObjects[i].rect.x > -20 && prevObjects[i].rect.x < frame->width 
         && prevObjects[i].rect.y > -20 && prevObjects[i].rect.y < frame->height) {
            objects->push_back(prevObjects[i]);
            cout << prevObjects[i].label << " is now at (" << prevObjects[i].rect.x << ", " << prevObjects[i].rect.y << ")" << endl;
        }
    }
    
    //printf("HEY OPTICAL FLOW!!!! %f %f\n", totalXDiff, totalYDiff);
    
    // move old objects
    for (int i = 0; i < (int)objects->size(); ++i) {
        (*objects)[i].rect.x -= totalXDiff * 3;
        (*objects)[i].rect.y -= totalYDiff * 3;
    }
    
    cout << "Flow: " << totalXDiff << " " << totalYDiff << endl;
    totalYDiff = 0;
    totalXDiff = 0;
    
    
    // Convert to grayscale.
    IplImage *gray  = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
    cvCvtColor(frame, gray, CV_BGR2GRAY);
    
    // Resize by half first, as per the handout.
    double scale = 2.0;
    IplImage *dst = cvCreateImage(cvSize(gray->width  / scale, gray->height  / scale), gray->depth,  gray->nChannels);
    cvResize(gray, dst);

    printf("About to do SURF\n");
    CvSeq *keypoints = 0, *descriptors = 0;
    CvSURFParams params = cvSURFParams(100, SURF_SIZE == 128);
    cvExtractSURF(dst, 0, &keypoints, &descriptors, storage, params);
    
    cout << "desc: " << descriptors->total << endl;
    if (descriptors->total == 0) return false;
    
    vector<float> desc;
    desc.resize(descriptors->total * descriptors->elem_size/sizeof(float));
    cvCvtSeqToArray(descriptors, &desc[0]);
    
    vector<CvSURFPoint> keypts;
    keypts.resize(keypoints->total);
    cvCvtSeqToArray(keypoints, &keypts[0]);
    
    vector<float *> features;
    int where = 0;
    for (int pt = 0; pt < keypoints->total; ++pt) {
        float *f = new float[SURF_SIZE];
        for (int j = 0; j < SURF_SIZE; ++j) {
            f[j] = desc[where];
            ++where;
        }
        features.push_back(f);
    }
    printf("Done SURF\n");

    printf("Clustering...\n");
    vector<int> cluster(features.size());
    for (int i = 0; i < (int)features.size(); ++i) {
        cluster[i] = best_cluster(centers, features[i]);
    }
    printf("Done clustering...\n");
    
    vector<FoundObject> newObjects;
    run_boxscan(dst, cluster, keypts, features, newObjects, objects);
    for (int i = 0; i < (int)newObjects.size(); ++i) {
        if (newObjects[i].object.rect.x > -20 && newObjects[i].object.rect.x < frame->width 
         && newObjects[i].object.rect.y > -20 && newObjects[i].object.rect.y < frame->height) {
            objects->push_back(newObjects[i].object);
            cout << "Found object: " << newObjects[i].object.label << " at (" ;
            cout << newObjects[i].object.rect.x << ", " << newObjects[i].object.rect.y << ")" << endl;
        }
    }
    
    prevObjects = *objects;
    
    cvReleaseImage(&gray);
  
    return true;
}
Beispiel #9
0
static void
cvTsCalcHist( IplImage** _images, CvHistogram* hist, IplImage* _mask, int* channels )
{
    int x, y, k, cdims;
    union
    {
        float* fl;
        uchar* ptr;
    }
    plane[CV_MAX_DIM];
    int nch[CV_MAX_DIM];
    int dims[CV_MAX_DIM];
    int uniform = CV_IS_UNIFORM_HIST(hist);
    CvSize img_size = cvGetSize(_images[0]);
    CvMat images[CV_MAX_DIM], mask = cvMat(1,1,CV_8U);
    int img_depth = _images[0]->depth;

    cdims = cvGetDims( hist->bins, dims );

    cvZero( hist->bins );

    for( k = 0; k < cdims; k++ )
    {
        cvGetMat( _images[k], &images[k] );
        nch[k] = _images[k]->nChannels;
    }

    if( _mask )
        cvGetMat( _mask, &mask );

    for( y = 0; y < img_size.height; y++ )
    {
        const uchar* mptr = _mask ? &CV_MAT_ELEM(mask, uchar, y, 0 ) : 0;

        if( img_depth == IPL_DEPTH_8U )
            for( k = 0; k < cdims; k++ )
                plane[k].ptr = &CV_MAT_ELEM(images[k], uchar, y, 0 ) + channels[k];
        else
            for( k = 0; k < cdims; k++ )
                plane[k].fl = &CV_MAT_ELEM(images[k], float, y, 0 ) + channels[k];

        for( x = 0; x < img_size.width; x++ )
        {
            float val[CV_MAX_DIM];
            int idx[CV_MAX_DIM];
            
            if( mptr && !mptr[x] )
                continue;
            if( img_depth == IPL_DEPTH_8U )
                for( k = 0; k < cdims; k++ )
                    val[k] = plane[k].ptr[x*nch[k]];
            else
                for( k = 0; k < cdims; k++ )
                    val[k] = plane[k].fl[x*nch[k]];

            idx[cdims-1] = -1;

            if( uniform )
            {
                for( k = 0; k < cdims; k++ )
                {
                    double v = val[k], lo = hist->thresh[k][0], hi = hist->thresh[k][1];
                    idx[k] = cvFloor((v - lo)*dims[k]/(hi - lo));
                    if( idx[k] < 0 || idx[k] >= dims[k] )
                        break;
                }
            }
            else
            {
                for( k = 0; k < cdims; k++ )
                {
                    float v = val[k];
                    float* t = hist->thresh2[k];
                    int j, n = dims[k];

                    for( j = 0; j <= n; j++ )
                        if( v < t[j] )
                            break;
                    if( j <= 0 || j > n )
                        break;
                    idx[k] = j-1;
                }
            }

            if( k < cdims )
                continue;

            (*(float*)cvPtrND( hist->bins, idx ))++;
        }
    }
}
Beispiel #10
0
int main(int argc, char* argv[])
{
	IplImage* src = cvLoadImage(argv[1],CV_LOAD_IMAGE_UNCHANGED);

	// get HSV form of image
	IplImage* hsvSrc = cvCreateImage(cvGetSize(src),8,3);
	cvCvtColor(src,hsvSrc,CV_BGR2HSV);

	// initialize Histogram
	int numBins = 256;
    	CvHistogram *hist = cvCreateHist(1,&numBins,CV_HIST_ARRAY,NULL,1);
    	cvClearHist(hist);


	// Separate hsv image into 3 channels
	IplImage* hueCh = cvCreateImage(cvGetSize(hsvSrc),8,1);
	IplImage* satCh = cvCreateImage(cvGetSize(hsvSrc),8,1);
	IplImage* valCh = cvCreateImage(cvGetSize(hsvSrc),8,1);

		cvSplit(hsvSrc,hueCh,satCh,valCh,NULL);


	// **** Rendering Histogram ****

	// --- Hue Channel ---
	cvCalcHist(&hueCh,hist, 0, NULL);
		IplImage* imgHistHue = drawHistogram(hist);
		cvClearHist(hist);

	// --- Sat Channel ---
	cvCalcHist(&satCh, hist, 0, NULL);
	IplImage* imgHistSat = drawHistogram(hist);
		cvClearHist(hist);

	// --- Val Channel ---
	cvCalcHist(&valCh, hist, 0, NULL);
		IplImage* imgHistVal = drawHistogram(hist);
		cvClearHist(hist);

	cvStartWindowThread();

	// display histogram
	cvNamedWindow("Hue",CV_WINDOW_NORMAL);
	cvNamedWindow("Sat",CV_WINDOW_NORMAL);
	cvNamedWindow("Val",CV_WINDOW_NORMAL);

	cvShowImage("Hue",imgHistHue);
	cvShowImage("Sat",imgHistSat);
	cvShowImage("Val",imgHistVal);


	// wait for key press
	cvWaitKey(0);

	// release memory
	cvDestroyAllWindows();
	cvReleaseImage(&src);

	cvReleaseImage(&hueCh);
	cvReleaseImage(&satCh);
	cvReleaseImage(&valCh);

	cvReleaseImage(&imgHistHue);
	cvReleaseImage(&imgHistSat);
	cvReleaseImage(&imgHistVal);

	return 0;
}// end of main
int main(int argc, char *argv[]) {
printf("\nentered main\n");
   IplImage* img;
   IplImage* img1;
   CvCapture*  capture;
   int is_data_ready = 0;
   int width, height, key;
   width       = 640;
   height      = 480;
   printf("\nvars declared\n");
   capture = cvCaptureFromCAM(0);
   img = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
   img = cvQueryFrame(capture);
   img1 = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
   cvZero(img);
   cvZero(img1);
   printf("\nstart capture\n");
   
/*
*********************************************************************/


   int  imgsize = img1->imageSize;


   int sock;   /* fd for socket connection */
   struct sockaddr_in server;   /* Socket info. for server */
   struct sockaddr_in client;   /* Socket info. about us */
   int clientLen;   /* Length of client socket struct. */
   struct hostent *hp;   /* Return value from gethostbyname() */
   char buf[BUFFER_SIZE];   /* Received data buffer */
   int i, bytes;   /* loop counter */
printf("\ndeclaration\n");
   if (argc != 2)
      die("Usage: client hostname");
     
   /* Open 3 sockets and send same message each time. */

   for (i = 0; i < 3; ++i)
   {
      /* Open a socket --- not bound yet. */
      /* Internet TCP type. */
      if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0)
         pdie("Opening stream socket");
      
      /* Prepare to connect to server. */
      bzero((char *) &server, sizeof(server));
      server.sin_family = AF_INET;
      if ((hp = gethostbyname(argv[1])) == NULL) {
         sprintf(buf, "%s: unknown host\n", argv[1]);
         die(buf);
      }
      bcopy(hp->h_addr, &server.sin_addr, hp->h_length);
      server.sin_port = htons((u_short) SERVER_PORT);
      
      /* Try to connect */
      if (connect(sock, (struct sockaddr *) &server, sizeof(server)) < 0)
         pdie("Connecting stream socket");
      
      /* Determine what port client's using. */
      clientLen = sizeof(client);
      if (getsockname(sock, (struct sockaddr *) &client, &clientLen))
         pdie("Getting socket name");
      
      if (clientLen != sizeof(client))
         die("getsockname() overwrote name structure");
      
      printf("Client socket has port %hu\n", ntohs(client.sin_port));
      

      
   while(1)
    {

	/* get a frame from camera */
        img = cvQueryFrame(capture);
        if (!img) break;
        //img->origin = 0;
	
	//pthread_mutex_lock(&mutex);
       	cvCvtColor(img, img1, CV_BGR2GRAY);
	img1->origin = 0;
        is_data_ready = 1;
//	pthread_mutex_unlock(&mutex);


         /* send the grayscaled frame, thread safe */
  //      pthread_mutex_lock(&mutex);
        if (is_data_ready) {
            bytes = send(sock, img1->imageData, imgsize, 0);
            is_data_ready = 0;
        }
    //    pthread_mutex_unlock(&mutex);
 
        /* if something went wrong, restart the connection */
        if (bytes != imgsize) {
            fprintf(stderr, "Connection closed.\n");
            //close(sock);

        }
 
        /* have we terminated yet? */
        pthread_testcancel();
 
        /* no, take a rest for a while */
        usleep(1000);
    }
      
      printf("C: %s\n", buf);
      
      /* Close this connection. */
      close(sock);
   }

   exit(0);

}
std::list<Garbage*> 
GarbageRecognition::garbageList(IplImage * src, IplImage * model){

                    
    
	std::list<Garbage*> garbageList;

	//cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
	//object model

	//image for the histogram-based filter
	//could be a parameter

	utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
	CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

	//~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
	//~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);



	//gets a frame for setting  image size
	//CvSize srcSize = cvSize(frameWidth,frameHeight);
	CvSize srcSize = cvGetSize(src);

	//images for HSV conversion
	IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
	IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );



	//Image for thresholding
	IplImage * threshImage=cvCreateImage(srcSize,8,1);

	//image for equalization
	IplImage * equalizedImage=cvCreateImage(srcSize,8,1);

	//image for Morphing operations(Dilate-erode)
	IplImage * morphImage=cvCreateImage(srcSize,8,1);

	//image for image smoothing
	IplImage * smoothImage=cvCreateImage(srcSize,8,1);

	//image for contour-finding operations
	IplImage * contourImage=cvCreateImage(srcSize,8,3);

	int frameCounter=1;
	int cont_index=0;

	//convolution kernel for morph operations
	IplConvKernel* element;

	CvRect boundingRect;

	//contours
	CvSeq * contours;

	//Main loop


	frameCounter++;
//	printf("frame number:%d\n",frameCounter);

	//convert image to hsv
	cvCvtColor( src, hsv, CV_BGR2HSV );
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );

	//equalize Saturation Channel image
	cvEqualizeHist(s_plane,equalizedImage);

	//threshold the equalized Saturation channel image
	cvThreshold(equalizedImage,threshImage,THRESHOLD_VALUE,255,
	CV_THRESH_BINARY);

	//apply morphologic operations
	element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
		MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
		CV_SHAPE_RECT, NULL);

	cvDilate(threshImage,morphImage,element,MORPH_DILATE_ITER);
	cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);

	//apply smooth gaussian-filter
	cvSmooth(morphImage,smoothImage,CV_GAUSSIAN,3,0,0,0);

	//get all contours
	contours=myFindContours(smoothImage);

	cont_index=0;
	cvCopy(src,contourImage,0);
	


	while(contours!=NULL){

		CvSeq * aContour=getPolygon(contours);
		utils::Contours * ct = new Contours(aContour);

	
	    /*int	pf = ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER);

		int raf = ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO);

		// int af = ct->areaFilter(MINCONTOUR_AREA,MAXCONTOUR_AREA);
		int baf = ct->boxAreaFilter(BOXFILTER_TOLERANCE);

        int hmf = ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN);
        */


		//apply filters

    
		if( ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER) && 
			ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO) && 
			ct->boxAreaFilter(BOXFILTER_TOLERANCE) && 	
			ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN)){
				
				//if passed filters
				ct->printContour(3,cvScalar(127,127,0,0),
					contourImage);
				
				//get contour bounding box
				boundingRect=cvBoundingRect(ct->getContour(),0);
				cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
						cvPoint(boundingRect.x+boundingRect.width,
						boundingRect.y+boundingRect.height),
						_GREEN,1,8,0);
				//build garbage List
			
				//printf(" c %d,%d\n",boundingRect.x,boundingRect.y);

				utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
					boundingRect.y,boundingRect.width,boundingRect.height);



				utils::Garbage * aGarbage = new utils::Garbage(r);

				garbageList.push_back(aGarbage);


			}

		delete ct;
		cvReleaseMemStorage( &aContour->storage );
		contours=contours->h_next;
		cont_index++;
	}

   // cvShowImage("output",contourImage);
   // cvWaitKey(0);
	delete h;
	
	
	cvReleaseHist(&testImageHistogram);
	//Image for thresholding
	//cvReleaseMemStorage( &contours->storage );
	cvReleaseImage(&threshImage);
	cvReleaseImage(&equalizedImage);
	cvReleaseImage(&morphImage);
	cvReleaseImage(&smoothImage);
	cvReleaseImage(&contourImage);
	
	cvReleaseImage(&hsv);
	cvReleaseImage(&h_plane);
	cvReleaseImage(&s_plane);
	cvReleaseImage(&v_plane);

	return garbageList;
}
Beispiel #13
0
int main(int argc, char *argv[])
{
	IplImage* img = 0;
	int height,width,step,channels;
	unsigned char *data;

	// load an image
	img= cvLoadImage("kantai.png");
	if(!img){
		printf("Could not load image file: %s\n",argv[1]);
		exit(0);
	}

	// get the image data
	height    = img->height;
	width     = img->width;
	step      = img->widthStep;
	channels  = img->nChannels;
	data      = (uchar *)img->imageData;

	printf("Processing a %dx%d image with %d channels\n",height,width,channels);
	printf("step = %d\n", step);

	IplImage* imgGrayscale = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); // 8-bit grayscale is enough.
	// convert to grayscale.
	cvCvtColor(img, imgGrayscale, CV_BGR2GRAY);

	// Create an image for the outputs
	IplImage* imgSobelX = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 ); // to prevent overflow.
	IplImage* imgSobelY = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
	IplImage* imgSobelAdded = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
	IplImage* imgSobel = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 ); // final image is enough to be an 8-bit plane.


	// Sobel
	cvSobel(imgGrayscale, imgSobelX, 1, 0, 3);
	cvSobel(imgGrayscale, imgSobelY, 0, 1, 3);
	cvAdd(imgSobelX, imgSobelY, imgSobelAdded);
	cvConvertScaleAbs(imgSobelAdded, imgSobel); //scaled to 8-bit level; important for visibility.


	//----------------------- OULINE EXTRACTION -------------------------------
	// Normal diff
	IplImage* imgNormDiff = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgNormDiff);
	norm_diff(imgNormDiff);

	// Roberts
	IplImage* imgRoberts = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgRoberts);
	roberts(imgRoberts);

	// Sobel
	IplImage* imgSobel2 = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgSobel2);
	sobel(imgSobel2);

	// Laplacian
	IplImage* imgLap = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgLap);
	laplacian(imgLap);

	//--------------------------- ENHANCEMENT --------------------------------
	// Laplacian
	IplImage* imgLap2 = cvCreateImage(cvGetSize(img), 8, 3);
	IplImage* imgRed = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgGreen = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgBlue = cvCreateImage(cvGetSize(img), 8, 1);

	cvSplit(img, imgRed, imgGreen, imgBlue, NULL);

	laplacian2(imgBlue);
	laplacian2(imgGreen);
	laplacian2(imgRed);
	cvMerge(imgRed,imgGreen,imgBlue, NULL, imgLap2);

	// Variant
	IplImage* imgVariant = cvCreateImage(cvGetSize(img), 8, 3);
	IplImage* imgRed2 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgGreen2 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgBlue2 = cvCreateImage(cvGetSize(img), 8, 1);

	cvSplit(img, imgRed2, imgGreen2, imgBlue2, NULL);

	variant(imgBlue2);
	variant(imgGreen2);
	variant(imgRed2);
	cvMerge(imgRed2,imgGreen2,imgBlue2, NULL, imgVariant);

	// Sobel
	IplImage* imgSobel3 = cvCreateImage(cvGetSize(img), 8, 3);
	IplImage* imgRed3 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgGreen3 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgBlue3 = cvCreateImage(cvGetSize(img), 8, 1);

	cvSplit(img, imgRed3, imgGreen3, imgBlue3, NULL);

	sobel2(imgBlue3);
	sobel2(imgGreen3);
	sobel2(imgRed3);
	cvMerge(imgRed3,imgGreen3,imgBlue3, NULL, imgSobel3);




	// create a window
	cvNamedWindow("Original", CV_WINDOW_KEEPRATIO);

	cvNamedWindow("Normal different line", CV_WINDOW_KEEPRATIO);
	cvNamedWindow("Roberts line", CV_WINDOW_FREERATIO);
	cvNamedWindow("Sobel line", CV_WINDOW_FREERATIO);
	cvNamedWindow("Laplacian line", CV_WINDOW_KEEPRATIO);

	cvNamedWindow("Laplacian Color", CV_WINDOW_KEEPRATIO);
	cvNamedWindow("Variant", CV_WINDOW_KEEPRATIO);
	cvNamedWindow("Sobel", CV_WINDOW_KEEPRATIO);
	/*cvNamedWindow( "Sobel-x" );
  cvNamedWindow( "Sobel-y" );
  cvNamedWindow( "Sobel-Added" );
  cvNamedWindow( "Sobel-Added (scaled)" );*/

	// show the image
	cvShowImage("Original", img);
	cvShowImage("Normal different line", imgNormDiff);
	cvShowImage("Roberts line",imgRoberts);
	cvShowImage("Sobel line", imgSobel2);
	cvShowImage("Laplacian line", imgLap);

	cvShowImage("Laplacian Color", imgLap2);
	cvShowImage("Variant", imgVariant);
	cvShowImage("Sobel", imgSobel3);

	/*cvShowImage("Sobel-x", imgSobelX);
  cvShowImage("Sobel-y", imgSobelY);
  cvShowImage("Sobel-Added", imgSobelAdded);
  cvShowImage("Sobel-Added (scaled)", imgSobel);*/

	// wait for a key
	cvWaitKey(0);

	// release the image
	cvReleaseImage(&img);
	cvReleaseImage(&imgGrayscale);
	cvReleaseImage(&imgNormDiff);
	cvReleaseImage(&imgRoberts);
	cvReleaseImage(&imgSobel2);
	cvReleaseImage(&imgLap);

	cvReleaseImage(&imgLap2);
	cvReleaseImage(&imgVariant);
	cvReleaseImage(&imgSobel3);

	cvReleaseImage(&imgSobelX);
	cvReleaseImage(&imgSobelY);
	cvReleaseImage(&imgSobelAdded);
	cvReleaseImage(&imgSobel);


	return 0;
}
Beispiel #14
0
int main(int argc, char *argv[]) {
	if (argc < 3){
		printf("Usage: %s <image-file-name1> <image-file-name2>\n", argv[0]);
		exit(1);
	}

	IplImage* img1 = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
	if (!img1) {
		printf("Could not load image file: %s\n", argv[1]);
		exit(1);
	}
	IplImage* img1f = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1);
	cvConvertScale(img1, img1f, 1.0 / 255.0);

	IplImage* img2 = cvLoadImage(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
	if (!img2) {
		printf("Could not load image file: %s\n", argv[2]);
		exit(1);
	}
	IplImage* img2f = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1);
	cvConvertScale(img2, img2f, 1.0 / 255.0);

	/**
	 * Aufgabe: Homographien (5 Punkte)
	 * 
	 * Unter der Annahme, dass Bilder mit einer verzerrungsfreien Lochbildkamera
	 * aufgenommen werden, kann man Aufnahmen mit verschiedenen Bildebenen und
	 * gleichem Projektionszentren durch projektive Abbildungen, sogenannte
	 * Homographien, beschreiben.
	 * 
	 * - Schreibe Translation als Homographie auf (auf Papier!).
	 * - Verschiebe die Bildebene eines Testbildes um 20 Pixel nach rechts, ohne
	 *   das Projektionszentrum zu ändern. Benutze dafür \code{cvWarpPerspective}.
	 * - Wieviele Punktkorrespondenzen benötigt man mindestens, um eine projektive
	 *   Abbildung zwischen zwei Bildern bis auf eine Skalierung eindeutig zu
	 *   bestimmen? Warum? (Schriftlich beantworten!)
	 */

/* TODO */
	IplImage* img_moved = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1);
	cv::Mat matImg1f_task1 = cv::Mat(img1f);
	cv::Mat matImgMoved = cv::Mat(img_moved);

	float data[] = { 1, 0, -20, 0, 1, 0, 0, 0, 1 };
	cv::Mat trans(3, 3, CV_32FC1, data);;
	cv::warpPerspective(matImg1f_task1, matImgMoved, trans, matImgMoved.size());
	cv::namedWindow("mainWin", CV_WINDOW_AUTOSIZE);
	cv::Mat img_moved_final(img_moved);
	cv::imshow("mainWin", img_moved_final);
	cvWaitKey(0);

	/**
	 * Aufgabe: Panorama (15 Punkte)
	 *
	 * Ziel dieser Aufgabe ist es, aus zwei gegebenen Bildern ein Panorama zu konstruieren.
	 * \begin{center}
	 * \includegraphics[width = 0.3\linewidth]{left.png}
	 * \includegraphics[width = 0.3\linewidth]{right.png}
	 * \end{center}
	 * 
	 * Dafür muss zunächst aus den gegeben Punktkorrespondenzen
	 * \begin{center}
	 * \begin{tabular}{|c|c|}
	 * \hline
	 * linkes Bild & rechtes Bild \\
	 * $(x, y)$ & $(x, y)$ \\ \hline \hline
	 * (463, 164) & (225, 179)\\ \hline
	 * (530, 357) & (294, 370)\\ \hline
	 * (618, 357) &(379, 367)\\ \hline
	 * (610, 153) & (369, 168)\\ \hline
	 * \end{tabular}
	 * \end{center}
	 * eine perspektivische Transformation bestimmt werden, mit der die Bilder auf eine gemeinsame Bildebene transformiert werden können.
	 * 
	 * - Berechne die Transformation aus den gegebenen Punktkorrespondenzen.
	 *   Benutze die Funktion \code{cvGetPerspectiveTransform}. Was ist die
	 *   zentrale Idee des DLT-Algorithmus, wie er in der Vorlesung vorgestellt
	 *   wurde?
	*/

/* TODO */
	CvMat *P = cvCreateMat(3, 3, CV_32FC1);
	CvPoint points1[] = { cvPoint(463, 164), cvPoint(530, 357), cvPoint(618, 357), cvPoint(610, 153) };
	CvPoint points2[] = { cvPoint(225, 179), cvPoint(294, 370), cvPoint(379, 367), cvPoint(369, 168) };
	CvPoint2D32f pt1[4], pt2[4];
	for (int i = 0; i < 4; ++i) {
		pt2[i].x = points2[i].x;
		pt2[i].y = points2[i].y;
		pt1[i].x = points1[i].x;
		pt1[i].y = points1[i].y;
	}
	cvGetPerspectiveTransform(pt1, pt2, P);
	
	/**
	 * - Bestimme die notwendige Bildgröße für das Panoramabild.
	 */

/* TODO */
	int h = img1f->height - 1;
	int w = img1f->width - 1;
	float p1[] = { 0.0, 0.0, 1.0 };
	float p2[] = { 0.0, (float)(h), 1.0 };
	float p3[] = { (float)(w), (float)(h), 1.0 };
	float p4[] = { (float)(w), 0.0, 1.0 };

	
	cv::Mat P1 = P * cv::Mat(3, 1, CV_32FC1, p1);
	cv::Mat P2 = P * cv::Mat(3, 1, CV_32FC1, p2);
	cv::Mat P3 = P * cv::Mat(3, 1, CV_32FC1, p3);
	cv::Mat P4 = P * cv::Mat(3, 1, CV_32FC1, p4);

	// mustn't be zero
	assert(P1.at<float>(2,0) != 0 && P2.at<float>(2,0) != 0 && P3.at<float>(2,0) != 0 && P4.at<float>(2,0) != 0);

	P1 = P1 / P1.at<float>(2,0);
	P2 = P2 / P2.at<float>(2,0);
	P3 = P3 / P3.at<float>(2,0);
	P4 = P4 / P4.at<float>(2,0);



	/**
	 * - Projiziere das linke Bild in die Bildebene des rechten Bildes. Beachte
	 *   dabei, dass auch der linke Bildrand in das Panoramabild projiziert
	 *   wird.
	 */

/* TODO */
	///////// Hier wird irgendwo ein fehler sein bei der Groesse...
	std::vector<cv::Mat*> matrices;
	matrices.push_back(&P1);
	matrices.push_back(&P2);
	matrices.push_back(&P3);
	matrices.push_back(&P4);
	cv::Point minP(P1.at<float>(0,0), P1.at<float>(1,0)), maxP(P1.at<float>(0,0), P1.at<float>(1,0));
	for(int i = 0; i < matrices.size(); ++i) {
			minP.x = (int)(min(matrices[i]->at<float>(0,0), (float)minP.x));
			minP.y = (int)(min(matrices[i]->at<float>(1,0), (float)minP.y));

			maxP.x = (int)(max(matrices[i]->at<float>(0,0), (float)maxP.x)+1.0);
			maxP.y = (int)(max(matrices[i]->at<float>(1,0), (float)maxP.y)+1.0);
	}

	minP.x = min(minP.x, 0); minP.y = min(minP.y, 0);
	maxP.x = max(maxP.x, img1f->width-1); maxP.y = max(maxP.y, img1f->height-1);
	// create image
	cv::Mat Panorama = cv::Mat(cv::Size(maxP-minP),  CV_32FC1, cv::Scalar(0.0));
	cv::Mat PLeft = cv::Mat(cv::Size(maxP-minP),  CV_32FC1, cv::Scalar(0.0));
	cv::Mat PRight = cv::Mat(cv::Size(maxP-minP),  CV_32FC1, cv::Scalar(0.0));

	cv::Mat matImg1f = cv::Mat( img1f);
	cv::Mat matImg2f = cv::Mat( img2f);
	for(int y=0; y < matImg1f.rows; ++y ) {
		for(int x=0; x < matImg1f.cols; ++x ) {
			PLeft.at<float>(y,x) = matImg1f.at<float>(y,x);
		}
	}
	for(int y=0; y < matImg2f.rows; ++y ) {
		for(int x=0; x < matImg2f.cols; ++x ) {
			PRight.at<float>(y,x) = matImg2f.at<float>(y,x);
		}
	}

	
	cv::imshow("mainWin", PLeft);
	cv::waitKey(0);
	cv::imshow("mainWin", PRight);
	cv::waitKey(0);

	float trans2[] = { 1.0, 0.0, -minP.x, 0.0, 1.0, -minP.y, 0.0, 0.0, 1.0};
	cv::Mat translation(3,3,CV_32FC1,trans2);
	//translate P
	cv::Mat Pnew = translation*cv::Mat(P);
	cv::warpPerspective(PLeft, Panorama, Pnew, Panorama.size());
	cv::warpPerspective(PRight, PLeft, translation, PLeft.size());
	PRight = PLeft.clone();

	cv::imshow("mainWin", PLeft);
	cv::waitKey(0);
	cv::imshow("mainWin", Panorama);
	cv::waitKey(0);
	/**
	 * - Bilde das Panoramabild, so dass Pixel, für die zwei Werte vorhanden sind,
	 *   den Mittelwert zugeordnet bekommen.
	 */

	cv::Mat mask = (Panorama > 0.0) & (PLeft > 0.0);
	cv::imshow("mainWin", mask);
	cv::waitKey(0);

	mask.convertTo(mask,CV_32FC1, 0.5/255.); 
	cv::Mat weighted = cv::Mat(Panorama.size(),  CV_32FC1, cv::Scalar(1.0)) - mask;

	Panorama = Panorama + PLeft;
	cv::multiply(Panorama, weighted, Panorama);

	cv::imshow("mainWin", Panorama);
	cv::waitKey(0);
/* TODO */

	/**
	 * - Zeige das Panoramabild an.
	 */

/* TODO */

}
Beispiel #15
0
	}
	CV_MAT_ELEM( *point_counts,  int, t-1, 0 ) = nc;
      }
      else printf("Bad board! How did this happen?\n");
    }
    // calibrate
    // Initialize the intrinsic matrix such that the two focal
    // lengths have a ratio of 1.0
    CV_MAT_ELEM( *intrinsic_matrix, float, 0, 0 ) = 1.0f;
    CV_MAT_ELEM( *intrinsic_matrix, float, 1, 1 ) = 1.0f;
    
    cvCalibrateCamera2(
		       object_points,
		       image_points,
		       point_counts,
		       cvGetSize(image),
		       intrinsic_matrix,
		       distortion_coeffs,
		       NULL,
		       NULL,
		       0 //			 CV_CALIB_FIX_ASPECT_RATIO
		       );

    calibrated = true;

    cvSave("intrinsic.dat", intrinsic_matrix);
    cvSave("distortion.dat", distortion_coeffs);

    matToScreen(intrinsic_matrix, "intrinsic");
    matToScreen(distortion_coeffs, "distortion");
Beispiel #16
0
static void
cvTsCalcBackProject( IplImage** images, IplImage* dst, CvHistogram* hist, int* channels )
{
    int x, y, k, cdims;
    union
    {
        float* fl;
        uchar* ptr;
    }
    plane[CV_MAX_DIM];
    int nch[CV_MAX_DIM];
    int dims[CV_MAX_DIM];
    int uniform = CV_IS_UNIFORM_HIST(hist);
    CvSize img_size = cvGetSize(images[0]);
    int img_depth = images[0]->depth;

    cdims = cvGetDims( hist->bins, dims );

    for( k = 0; k < cdims; k++ )
        nch[k] = images[k]->nChannels;

    for( y = 0; y < img_size.height; y++ )
    {
        if( img_depth == IPL_DEPTH_8U )
            for( k = 0; k < cdims; k++ )
                plane[k].ptr = &CV_IMAGE_ELEM(images[k], uchar, y, 0 ) + channels[k];
        else
            for( k = 0; k < cdims; k++ )
                plane[k].fl = &CV_IMAGE_ELEM(images[k], float, y, 0 ) + channels[k];

        for( x = 0; x < img_size.width; x++ )
        {
            float val[CV_MAX_DIM];
            float bin_val = 0;
            int idx[CV_MAX_DIM];
            
            if( img_depth == IPL_DEPTH_8U )
                for( k = 0; k < cdims; k++ )
                    val[k] = plane[k].ptr[x*nch[k]];
            else
                for( k = 0; k < cdims; k++ )
                    val[k] = plane[k].fl[x*nch[k]];
            idx[cdims-1] = -1;

            if( uniform )
            {
                for( k = 0; k < cdims; k++ )
                {
                    double v = val[k], lo = hist->thresh[k][0], hi = hist->thresh[k][1];
                    idx[k] = cvFloor((v - lo)*dims[k]/(hi - lo));
                    if( idx[k] < 0 || idx[k] >= dims[k] )
                        break;
                }
            }
            else
            {
                for( k = 0; k < cdims; k++ )
                {
                    float v = val[k];
                    float* t = hist->thresh2[k];
                    int j, n = dims[k];

                    for( j = 0; j <= n; j++ )
                        if( v < t[j] )
                            break;
                    if( j <= 0 || j > n )
                        break;
                    idx[k] = j-1;
                }
            }

            if( k == cdims )
                bin_val = (float)cvGetRealND( hist->bins, idx );

            if( img_depth == IPL_DEPTH_8U )
            {
                int t = cvRound(bin_val);
                CV_IMAGE_ELEM( dst, uchar, y, x ) = CV_CAST_8U(t);
            }
            else
                CV_IMAGE_ELEM( dst, float, y, x ) = bin_val;
        }
    }
}
Beispiel #17
0
void *ControlThread(void *unused)
{
    int i=0;
    char fileName[30];
    NvMediaTime pt1 ={0}, pt2 = {0};
    NvU64 ptime1, ptime2;
    struct timespec;

    IplImage* imgOrigin;
    IplImage* imgCanny;

    // cvCreateImage
    imgOrigin = cvCreateImage(cvSize(RESIZE_WIDTH, RESIZE_HEIGHT), IPL_DEPTH_8U, 3);
    imgCanny = cvCreateImage(cvGetSize(imgOrigin), IPL_DEPTH_8U, 1);

    int angle, speed;
    IplImage* imgOrigin;
    IplImage* imgResult;
    unsigned char status;

    unsigned int gain;

    CarControlInit();
    PositionControlOnOff_Write(UNCONTROL);
    SpeedControlOnOff_Write(1);

    //speed controller gain set
    //P-gain
    gain = SpeedPIDProportional_Read();        // default value = 10, range : 1~50
    printf("SpeedPIDProportional_Read() = %d \n", gain);
    gain = 20;
    SpeedPIDProportional_Write(gain);

    //I-gain
    gain = SpeedPIDIntegral_Read();        // default value = 10, range : 1~50
    printf("SpeedPIDIntegral_Read() = %d \n", gain);
    gain = 20;
    SpeedPIDIntegral_Write(gain);

    //D-gain
    gain = SpeedPIDDifferential_Read();        // default value = 10, range : 1~50
    printf("SpeedPIDDefferential_Read() = %d \n", gain);
    gain = 20;
    SpeedPIDDifferential_Write(gain);
    angle = 1460;
    SteeringServoControl_Write(angle);
    // cvCreateImage
    imgOrigin = cvCreateImage(cvSize(RESIZE_WIDTH, RESIZE_HEIGHT), IPL_DEPTH_8U, 3);

    imgResult = cvCreateImage(cvGetSize(imgOrigin), IPL_DEPTH_8U, 1);
    int flag = 1;
    while(1)
    {
        pthread_mutex_lock(&mutex);
        pthread_cond_wait(&cond, &mutex);


        GetTime(&pt1);
        ptime1 = (NvU64)pt1.tv_sec * 1000000000LL + (NvU64)pt1.tv_nsec;



        Frame2Ipl(imgOrigin); // save image to IplImage structure & resize image from 720x480 to 320x240
        pthread_mutex_unlock(&mutex);


        cvCanny(imgOrigin, imgCanny, 100, 100, 3);

        sprintf(fileName, "captureImage/imgCanny%d.png", i);
        cvSaveImage(fileName , imgCanny, 0);
		Frame2Ipl(imgOrigin, imgResult); // save image to IplImage structure & resize image from 720x480 to 320x240
        pthread_mutex_unlock(&mutex);


        //cvCanny(imgOrigin, imgCanny, 100, 100, 3);

        sprintf(fileName, "captureImage/imgyuv%d.png", i);
        cvSaveImage(fileName , imgOrigin, 0);


        //sprintf(fileName, "captureImage/imgOrigin%d.png", i);
        //cvSaveImage(fileName, imgOrigin, 0);


        // TODO : control steering angle based on captured image ---------------

        //speed set
        speed = DesireSpeed_Read();
        printf("DesireSpeed_Read() = %d \n", speed);
        //speed = -10;
        //DesireSpeed_Write(speed);
        if(flag == 1){
            if(greenlight>1000)
            {
                printf("right go\n");
                Winker_Write(LEFT_ON);
                usleep(1000000);
                //Winker_Write(ALL_OFF);
                angle = 1400;
                SteeringServoControl_Write(angle);
                speed = 10;
                DesireSpeed_Write(speed);
                speed = DesireSpeed_Read();
                printf("DesireSpeed_Read() = %d \n", speed);
                sleep(1);
                flag = 0;
            }
            else
            {
                printf("left go\n");
                Winker_Write(RIGHT_ON);
                usleep(10000);
                Winker_Write(ALL_OFF);

                speed = 20;
                DesireSpeed_Write(speed);
                usleep(1300000);
                angle = 1950;
                SteeringServoControl_Write(angle);
                usleep(5000000);
                angle = 1460;
                SteeringServoControl_Write(angle);
                usleep(1000000);
                speed = 0;
                DesireSpeed_Write(speed);
                flag = 0;
            }
        }

        // ---------------------------------------------------------------------

        GetTime(&pt2);
        ptime2 = (NvU64)pt2.tv_sec * 1000000000LL + (NvU64)pt2.tv_nsec;
        printf("--------------------------------operation time=%llu.%09llu[s]\n", (ptime2-ptime1)/1000000000LL, (ptime2-ptime1)%1000000000LL);


        i++;
    }

}
Beispiel #18
0
//--------------------------------HOUGHLINES---------------------------------
void sendtoHoughLines(IplImage * img)
{


	IplImage* src = cvCreateImage(
	cvGetSize(img), 8, 1 );
	cvCvtColor(img, src, CV_RGB2GRAY);

	//-------------CREATING A IMAGE NAMED dst FOR EDGE SHOWING ON IT FROM CANNY RESULT--------------------
	IplImage* dst = cvCreateImage(
	cvGetSize(src), 8, 1 );

	//---------------CREATING color_dst IMAGE FOR RESULT OF LINE DISPLAY PURPOSE---------------------------
	IplImage *color_dst = cvCreateImage(
	cvGetSize(src), 8, 3 );

	//---------------CEATING A STORE POOL FOR LINE PURPOSE AND LINE DECLARATIVE PARAMETER-------------
	CvMemStorage* storage =
	cvCreateMemStorage(0);
	CvSeq* lines = 0;
	int i;

	//---------------APPLYING THE CANNY FUNCTION ON SRC AND STORING THE EDGE RESULTS IN DST-------------
	cvCanny( src, dst,30, 90, 3 );

	cvDilate( dst, dst, 0, 1 );

	//--------------CONVERTING THE CANNY RESULT IMAGE DST INTO RGB AND STORE IT IN COLORDST AND SHOWING A WINDOW OF IT--------------
	
	cvCvtColor( dst, color_dst, CV_GRAY2BGR );


	/*
	//-----hough lines algo--------
	lines = cvHoughLines2( dst, storage,
					  CV_HOUGH_PROBABILISTIC, 1, CV_PI/180, 30, 30, 0 );

			//---------------ACCESING THE POINTS OF THE LINES AND DRAWING THOSE LINES ON THE GRAY TO RGB CONVERTED IMAGE NAMED color_dst-------------- 
	for( i = 0; i < lines->total; i++ )
	{
	CvPoint* line =
	(CvPoint*)cvGetSeqElem(lines,i);
	cvLine( color_dst, line[0], line[1],

		   CV_RGB(255,0,0), 3, 8 );
	//printf("\n i = %d x1 = %d y1 = %d x2 = %d y2 = %d ",i,line[0].x,line[0].y,line[1].x,line[1].y);


	}
	*/


	lines = cvHoughLines2( dst, storage,
			CV_HOUGH_STANDARD, 1, CV_PI/180, 30, 0, 0 );
			for( i = 0; i < MIN(lines->total,100); i++ )
			{
			float* line =
			(float*)cvGetSeqElem(lines,i);
			float rho = line[0];
			float theta = line[1];

			printf("theta = %f",(theta*180/3.142));
			CvPoint pt1, pt2;
			double a = cos(theta), b = sin(theta);
			double x0 = a*rho, y0 = b*rho;
			printf("a= %f  b=%f  x0=%f  y0=%f roh=%f\n", a,b,x0,y0,rho);
			pt1.x = cvRound(x0 + 1000*(-b));
			pt1.y = cvRound(y0 + 1000*(a));
			pt2.x = cvRound(x0 - 1000*(-b));
			pt2.y = cvRound(y0 - 1000*(a));
			printf("    x1 = %d, y1 = %d",pt1.x,pt1.y);
			printf("    x2 = %d, y2 = %d\n\n",pt2.x,pt2.y);

			//if((theta*180/3.142) < 100 && (theta*180/3.142) > 79 )
			cvLine( color_dst, pt1, pt2,
			CV_RGB(255,0,0), 3, 8 );
			}
	cvNamedWindow("HoughLinesShow",1);
	cvShowImage("HoughLinesShow",color_dst);
	cvWaitKey(1000);




}
Beispiel #19
0
int OnNewvision(IplImage* currImageBefore, IplImage* maskImage)
{
	//int pAPosition[6][2]={0};//pointArrayPosition
	//pAPosition[0][0]=0; pAPosition[0][1]=6;
	//pAPosition[1][0]=6; pAPosition[1][1]=0;
	//pAPosition[2][0]=12;pAPosition[2][1]=6;

	//pAPosition[3][0]=0; pAPosition[3][1]=9;
	//pAPosition[4][0]=6; pAPosition[4][1]=12;
	//pAPosition[5][0]=12;pAPosition[5][1]=9;

	//ListPoint pointSet1;
	//pointSet1.Item = (ArrayPoint*)MallocArrayPoint();
	//for (int dIndex = 0; dIndex <3; dIndex++)
	//{//cvPoint(wIndex, hIndex)
	//	pointSet1.Item->push_back(cvPoint(pAPosition[dIndex][0], pAPosition[dIndex][1]));
	//}	
	//
	//cvCircleObj outCircle1;
	//if(pointSet1.Item->size() == 0)
	//	return;
	//FitCircleObj(pointSet1, &outCircle1);

	//ListPoint pointSet2;
	//pointSet2.Item = (ArrayPoint*)MallocArrayPoint();
	//for (int dIndex = 3; dIndex <6; dIndex++)
	//{//cvPoint(wIndex, hIndex)
	//	pointSet2.Item->push_back(cvPoint(pAPosition[dIndex][0], pAPosition[dIndex][1]));
	//}
	//cvCircleObj outCircle2;
	//if(pointSet2.Item->size() == 0)
	//	return;
	//FitCircleObj(pointSet2, &outCircle2);

	//IplImage* currImage1 = cvCreateImage(cvSize(2000, 3000), IPL_DEPTH_8U, 1);
	//memset(currImage1->imageData, 0, currImage1->height*currImage1->widthStep*sizeof(unsigned char));

	//int bwPosition = 0;
	//for (int hIndex = 0; hIndex < currImage1->width; hIndex++)
	//{
	//	int y1 = 0, y2 = 0;
	//	y1 = sqrt( outCircle1.Radius * outCircle1.Radius - hIndex * hIndex);
	//	y2 = sqrt( outCircle2.Radius * outCircle2.Radius - hIndex * hIndex);
	//	for (int wIndex = 0; wIndex < currImage1->height; wIndex++)
	//	{
	//		bwPosition = hIndex*currImage1->widthStep + wIndex;
	//		if( wIndex < y1)
	//			currImage1->imageData[bwPosition] = 0;
	//		else if (wIndex > y1 && wIndex < y2)
	//			currImage1->imageData[bwPosition] = 255;
	//		else
	//			currImage1->imageData[bwPosition] = 0;
	//	}
	//}
	//cvShowImage("currImage1",currImage1);
	//cvWaitKey(0);

	//cvReleaseImage(&currImage1);
	//return;


	int hIndex = 0, wIndex = 0, ImagePosition = 0, TempPosition = 0, colorValue = 0;
	bool leakLight = false;	int LeakLightNum = 5;

	//IplImage* currImageBefore = cvLoadImage("00.bmp", CV_LOAD_IMAGE_GRAYSCALE);

	if (currImageBefore == NULL)
		return 1;

	if (maskImage == NULL)
	{
		return 2;
	}

	IplImage* currImage = cvCreateImage(cvSize(currImageBefore->width, currImageBefore->height), IPL_DEPTH_8U, 1);
	memset(currImage->imageData, 0, currImage->height*currImage->widthStep*sizeof(unsigned char));

	//使用Mask
	cvCopy(currImageBefore, currImage, maskImage);

	//cvShowImage("currImage",currImage);
	//cvWaitKey(0);

	IplImage* EdgeImage = cvCreateImage(cvSize(currImageBefore->width, currImageBefore->height), IPL_DEPTH_8U, 1);
	memset(EdgeImage->imageData, 0, EdgeImage->height*EdgeImage->widthStep*sizeof(unsigned char));

	cvCanny(currImage, EdgeImage, 50, 180, 3);

	//cvShowImage("EdgeImage", EdgeImage);
	//cvWaitKey(0);


	int edgeTempPosition = 0;
	for (int hIndex = 1; hIndex < EdgeImage->height - 1; hIndex++)
	{
		for (int wIndex = 1; wIndex < EdgeImage->width - 1; wIndex++)
		{
			edgeTempPosition = hIndex*EdgeImage->widthStep + wIndex;
			if (EdgeImage->imageData[edgeTempPosition] == 255)
			if (maskImage->imageData[edgeTempPosition + 1] == 0
				|| maskImage->imageData[edgeTempPosition - 1] == 0
				|| maskImage->imageData[edgeTempPosition + maskImage->widthStep] == 0
				|| maskImage->imageData[edgeTempPosition - maskImage->widthStep] == 0)
				EdgeImage->imageData[edgeTempPosition] = 0;
		}
	}

	//cvShowImage("EdgeImage2", EdgeImage);
	////cvSaveImage("E:\\wuxi\\EdgeImage.jpg",EdgeImage);
	//cvWaitKey(0);

	ListPoint pointSet; ListPoint bestPoint; ListPoint tempPoint;
	pointSet.Item = (ArrayPoint*)MallocArrayPoint();
	bestPoint.Item = (ArrayPoint*)MallocArrayPoint();
	tempPoint.Item = (ArrayPoint*)MallocArrayPoint();


	ListPoint pointSet13, pointSet23, pointSet33;
	ListPoint bestPoint13, bestPoint23, bestPoint33;
	pointSet13.Item = (ArrayPoint*)MallocArrayPoint();
	pointSet23.Item = (ArrayPoint*)MallocArrayPoint();
	pointSet33.Item = (ArrayPoint*)MallocArrayPoint();

	bestPoint13.Item = (ArrayPoint*)MallocArrayPoint();
	bestPoint23.Item = (ArrayPoint*)MallocArrayPoint();
	bestPoint33.Item = (ArrayPoint*)MallocArrayPoint();

	IplImage* markImage = cvCreateImage(cvGetSize(currImage), IPL_DEPTH_8U, 1);
	memset(markImage->imageData, 0, markImage->height*markImage->widthStep*sizeof(unsigned char));

	//ArrayPoint* PointArray = (ArrayPoint*)MallocArrayPoint();

	ListRect rectList; ListInt intAreaList;
	rectList.Item = (ArrayRect*)MallocArrayRect();
	intAreaList.Item = (ArrayInt *)MallocArrayInt();

	ExtractAllEdgePointNumForItem(EdgeImage, markImage, cvRect(0, 0, currImageBefore->width, currImageBefore->height), 255, &pointSet);

	//未搜寻到边缘点,可能是已经边缘效果不好或者是无料
	if (pointSet.Item->size() == 0 || pointSet.Item->size() < 10)
	{
		cvReleaseImage(&currImageBefore);
		//cvReleaseImage(&maskImage);
		cvReleaseImage(&markImage);
		cvReleaseImage(&currImage);
		cvReleaseImage(&EdgeImage);
		return 3;
	}

	CvPoint PartTempPoint;

	for (int dIndex = 0; dIndex < pointSet.Item->size() / 3; dIndex++)
	{
		PartTempPoint = (*pointSet.Item)[dIndex];

		AddArrayPoint(pointSet13.Item, PartTempPoint);
	}

	cvCircleObj TempCircle;

	memset(markImage->imageData, 0, markImage->height*markImage->widthStep*sizeof(unsigned char));

	//之前使用方法为全部进行ransac滤除,之后为缩短时间修改为1/3、1/3、1/3进行滤除
	//RansacCirclePoint(pointSet, &bestPoint, &tempPoint);
	//if(bestPoint.Item->size() == 0)
	//{
	//	cvReleaseImage(&currImageBefore);
	//	cvReleaseImage(&maskImage);
	//	cvReleaseImage(&markImage);
	//	cvReleaseImage(&currImage);
	//	cvReleaseImage(&EdgeImage);
	//	return;
	//}
	//SortPointsListByXValue(&bestPoint);

	RansacCirclePoint(pointSet13, &bestPoint13, &tempPoint);

	if (bestPoint13.Item->size() == 0)
	{
		cvReleaseImage(&currImageBefore);
		//cvReleaseImage(&maskImage);
		cvReleaseImage(&markImage);
		cvReleaseImage(&currImage);
		cvReleaseImage(&EdgeImage);
		return 4;
	}

	cvCircleObj outCircle;
	cvCircleObj outCircle13, outCircle23, outCircle33;
	//ListPoint bestPoint13, bestPoint23, bestPoint33;
	//bestPoint13.Item = (ArrayPoint*)MallocArrayPoint();	
	//bestPoint23.Item = (ArrayPoint*)MallocArrayPoint();	
	//bestPoint33.Item = (ArrayPoint*)MallocArrayPoint();	

	//for (int dIndex = 0; dIndex< bestPoint.Item->size()/3; dIndex++)
	//{
	//	AddArrayPoint(bestPoint13.Item, (*bestPoint.Item)[dIndex]);
	//}
	FitCircleObj(bestPoint13, &outCircle13);

	if (outCircle13.CirclePoint.x < 0 || outCircle13.CirclePoint.y >0)
	{
		for (int dIndex = pointSet.Item->size() / 3; dIndex < 2 * pointSet.Item->size() / 3; dIndex++)
		{
			PartTempPoint = (*pointSet.Item)[dIndex];

			AddArrayPoint(pointSet23.Item, PartTempPoint);
		}

		RansacCirclePoint(pointSet23, &bestPoint23, &tempPoint);

		if (bestPoint23.Item->size() == 0)
		{
			cvReleaseImage(&currImageBefore);
			//cvReleaseImage(&maskImage);
			cvReleaseImage(&markImage);
			cvReleaseImage(&currImage);
			cvReleaseImage(&EdgeImage);
			return 5;
		}

		FitCircleObj(bestPoint23, &outCircle23);

		if (outCircle23.CirclePoint.x < 0 || outCircle23.CirclePoint.y >0)
		{
			for (int dIndex = 2 * pointSet.Item->size() / 3; dIndex < pointSet.Item->size(); dIndex++)
			{
				PartTempPoint = (*pointSet.Item)[dIndex];

				AddArrayPoint(pointSet33.Item, PartTempPoint);
			}

			RansacCirclePoint(pointSet33, &bestPoint33, &tempPoint);

			if (bestPoint33.Item->size() == 0)
			{
				cvReleaseImage(&currImageBefore);
				//cvReleaseImage(&maskImage);
				cvReleaseImage(&markImage);
				cvReleaseImage(&currImage);
				cvReleaseImage(&EdgeImage);
				return 6;
			}

			FitCircleObj(bestPoint33, &outCircle33);

			if (outCircle33.CirclePoint.x < 0 || outCircle33.CirclePoint.y >0)
			{
				outCircle.CirclePoint.x = 0;
				outCircle.CirclePoint.y = 1;
				outCircle.Radius = 1;
			}
			else
				outCircle = outCircle33;;
		}
		else
			outCircle = outCircle23;

	}
	else
		outCircle = outCircle13;

	//FitCircleObj(bestPoint, &outCircle);

	if (outCircle.CirclePoint.y == 1 && outCircle.Radius == 1)
	{
		cvReleaseImage(&currImageBefore);
		//cvReleaseImage(&maskImage);
		cvReleaseImage(&markImage);
		cvReleaseImage(&currImage);
		cvReleaseImage(&EdgeImage);
		return 7;
	}

	ListPoint pointOutCircleSet;
	pointOutCircleSet.Item = (ArrayPoint*)MallocArrayPoint();

	int radiusAdd = 0;
	int radiusMove = 35;

	for (int dIndex = 0; dIndex < VL_MAX(bestPoint.Item->size(), 0); dIndex++)
	{
		CvPoint TempPoint;
		TempPoint.x = ((*bestPoint.Item)[dIndex]).x;
		TempPoint.y = ((*bestPoint.Item)[dIndex]).y + radiusMove;
		AddArrayPoint(pointOutCircleSet.Item, TempPoint);

	}


	ListPoint pointMoreCircleSet;
	pointMoreCircleSet.Item = (ArrayPoint*)MallocArrayPoint();

	for (int wIndex = 0; wIndex < currImageBefore->width; wIndex++)
	{
		CvPoint TempPoint;
		CvPoint TempOutPoint;
		float x = 0, y = 0;
		x = wIndex - outCircle.CirclePoint.x;
		//y = dIndex - outCircle.CirclePoint.y;

		y = sqrt((outCircle.Radius + radiusMove) * (outCircle.Radius + radiusMove) - x * x);

		TempPoint.x = wIndex;
		if (outCircle.CirclePoint.y < 0)
			y = VL_MAX(0, outCircle.CirclePoint.y + y);
		else
			y = VL_MAX(0, outCircle.CirclePoint.y - y);

		TempPoint.y = y;
		if (TempPoint.x >= 0 && TempPoint.y >= 0)
			AddArrayPoint(pointMoreCircleSet.Item, TempPoint);
	}

	SortPointsListByXValue(&pointMoreCircleSet);



	int maskCircleTemp = 0;

	for (int wIndex = 0; wIndex < markImage->width; wIndex++)
	{
		for (int hIndex = 0; hIndex < markImage->height; hIndex++)
		{
			maskCircleTemp = hIndex*markImage->widthStep + wIndex;

			if (hIndex <= ((*pointMoreCircleSet.Item)[wIndex]).y)
			{
				markImage->imageData[maskCircleTemp] = 255;
			}

		}
	}

	int a[4] = { 0 };

	//currImageBefore = cvLoadImage("E:\\wuxi\\leak\\1532.bmp", CV_LOAD_IMAGE_GRAYSCALE);
	getMaxDistance(currImageBefore, &pointMoreCircleSet, a);

	int subPotion = 0;
	for (int hIndex = 0; hIndex < markImage->height; hIndex++)
	{
		for (int wIndex = 0; wIndex < markImage->width; wIndex++)
		{
			subPotion = hIndex*markImage->widthStep + wIndex;

			//currImageBefore->imageData[subPotion] -= maskCircle->imageData[subPotion];
			//if (maskCircle->imageData[subPotion] == 255)
			//	currImageBefore->imageData[subPotion] = 0;
			if (currImageBefore->imageData[subPotion] - markImage->imageData[subPotion] <= 0)
				currImageBefore->imageData[subPotion] = 0;

			if (currImageBefore->imageData[subPotion] - markImage->imageData[subPotion] >= 255)
				currImageBefore->imageData[subPotion] = 255;

		}
	}

	cvThreshold(currImageBefore, markImage, 110, 255, CV_THRESH_BINARY);

	//cvShowImage("currbf", currImageBefore);
	//cvWaitKey(0);


	CvPoint LineCenter;
	CvPoint LineCenter2;
	LineCenter2.x = (int)outCircle.CirclePoint.x;
	LineCenter2.y = (int)outCircle.CirclePoint.y;

	//LineCenter.x = (a[1]+a[0]/2);
	LineCenter.x = a[1] + a[0] / 2;
	LineCenter.y = a[3];


	CvSize imgSize = cvSize(currImageBefore->width, currImageBefore->height);

	CvRect Zone;
	Zone.x = 0;
	Zone.y = 0;
	Zone.width = 0;
	Zone.height = 0;

	int checkLightValue = 55;
	bool Zone0 = false, Zone1 = false, Zone2 = false, Zone3 = false, Zone4 = false;
	//没有豁口区域
	if (a[0] == 0)
	{
		if (getMinYPositon(&pointMoreCircleSet, 0, currImage->width, &Zone, a, imgSize, 0))
		{
			if (getMinYPositon(&pointMoreCircleSet, 0, currImageBefore->width, &Zone, a, imgSize, 0))
				Zone0 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);
			//IplImage* ZoneImg = cvCreateImage(cvSize(Zone.width, Zone.height), IPL_DEPTH_8U, 1);
			//memset(ZoneImg->imageData, 0, ZoneImg->height*ZoneImg->widthStep*sizeof(unsigned char));

			//IplImage* ZoneImg2 = cvCreateImage(cvSize(Zone.width, Zone.height), IPL_DEPTH_8U, 1);
			//memset(ZoneImg2->imageData, 0, ZoneImg2->height*ZoneImg2->widthStep*sizeof(unsigned char));

			//cvSetImageROI(currImageBefore,Zone);

			//cvCopy(currImageBefore,ZoneImg);  

			//cvResetImageROI(currImageBefore); 

			//cvThreshold(ZoneImg, ZoneImg2, 110, 255, CV_THRESH_BINARY);	

			//int leakNum = 0;
			//int bwPosition = 0;
			//for (int hIndex = 0; hIndex < ZoneImg2->height; hIndex++)
			//{
			//	for (int wIndex = 0; wIndex < ZoneImg2->width; wIndex++)
			//	{
			//		bwPosition = hIndex*ZoneImg2->widthStep + wIndex;
			//		if( ZoneImg2->imageData[bwPosition] == 255)
			//			leakNum++;					
			//	}
			//}

			//if (leakNum > LeakLightNum)
			//{
			//	leakLight = true;
			//	//cvShowImage("ZoneImage2", ZoneImg2);
			//	//cvWaitKey(0);
			//}

			//cvReleaseImage(&ZoneImg);
		}
		//getMinYPositon(ListPoint* line, int firstPosition, int lastPosition, CvRect* zone, int* a, int direction)

	}
	else//有豁口区域
	{
		//getMinYPositon
		if (a[1] > currImageBefore->width / 2)
		{
			if (getMinYPositon(&pointMoreCircleSet, a[2], currImageBefore->width, &Zone, a, imgSize, 4))
				Zone4 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);

			if (getMinYPositon(&pointMoreCircleSet, 0, a[1], &Zone, a, imgSize, 3))
				Zone3 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);
		}

		if (a[1] <= currImageBefore->width / 2)
		{
			if (getMinYPositon(&pointMoreCircleSet, 0, a[1], &Zone, a, imgSize, 1))
				Zone1 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);

			if (getMinYPositon(&pointMoreCircleSet, a[2], currImageBefore->width, &Zone, a, imgSize, 2))
				Zone2 = CheckZoneLeak(currImageBefore, Zone, LeakLightNum, checkLightValue);
		}
	}

	cvReleaseImage(&currImageBefore);
	//cvReleaseImage(&maskImage);
	cvReleaseImage(&markImage);
	cvReleaseImage(&currImage);
	cvReleaseImage(&EdgeImage);

	bool lastResult = false;
	if (Zone0 || Zone1 || Zone2 || Zone3 || Zone4)
	{
		lastResult = true;
		return -1;
	}

	lastResult = false;
	return 0;

}
Beispiel #20
0
//-------DETECT FACE OF GADHIJI ---------------
void detectAllFaces( IplImage *img )
{
	// -------THIS STRUCTURE TO GIVES DETAILS OF DETECTED FACE----------- 
	GandhitplMatch detectedimg[10];
	//cvNamedWindow("displayface",1);
    int k;
 	int cnt=0;
	 CvRect *r1;
	 CvScalar s;
	 int fl=0;
    /* detect faces */
    CvSeq *faces = cvHaarDetectObjects(
            img,
            cascade,
            storage,
            1.1,
            1,
            0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
            cvSize( 30, 30 ) );
	globalmaximum=-999;
	globalmaxindex=-1;
    /* for each face found, draw a red box */
    for( k = 0 ; k < ( faces ? faces->total : 0 ) ; k++ ) {
			r1 = ( CvRect* )cvGetSeqElem( faces, k );	
		if((r1->height<100)&&(r1->width<100))
		 {
				fl=1;
				detectedimg[k].faceimg=cvCreateImage(cvGetSize(img),8,3);
				cvCopyImage(img,detectedimg[k].faceimg);
				detectedimg[k].height=r1->height;
				detectedimg[k].width=r1->width;
				detectedimg[k].x=r1->x;
				detectedimg[k].y=r1->y;
				
			//	cvShowImage("displayface",detectedimg[k].faceimg);
				//cvWaitKey(100);
	
				
			/*cvRectangle( img,
						 cvPoint( r1->x, r1->y ),
						 cvPoint( r1->x + r1->width, r1->y + r1->height ),
						 CV_RGB( 255, 0, 0 ), 1, 8, 0 ); */
				//printf("facedetection called");
//				printf("width= %d height= %d",detectedimg[k].faceimg->width,detectedimg[k].faceimg->height);
		for(int i=0;i<img->height;i++)
	{
		//printf("....");
		for(int j=0;j<img->width;j++)
		{
		//	printf("hi %d",j);
			if((j<r1->x || j>r1->x + r1->width)  || (i<r1->y || i>r1->y + r1->height))
			{ 
					
				
				s = cvGet2D(detectedimg[k].faceimg, i, j);
		//	printf("hi.....");
			//	s.
			//	s.val[3]=0.0;
				s.val[0]=0.0;
				s.val[1]=0.0;
				s.val[2]=0.0;
			
	cvSet2D(detectedimg[k].faceimg, i, j, s );
	
			}
			
			
		}
//		printf("over j");

	}

	//cvShowImage("displayface",detectedimg[k].faceimg);
	//cvWaitKey(10);
		




//			printf("width %d height  %d\n",r1->width,r1->height);
		}
		//-------SEND THE DETECTED FACE TO MATCH WITH FACE OF GANDHIJI---------
		gandhijitplMatch(detectedimg[k],k);	
 
	}
		//------KEEP THE MATCHED IMAGE WHOSE MATCH IS GREATER THAN 0.62-----------
	if(faces->total>0 && globalmaximum>0.62)
	{
		GlobalGandhiji[globalcnt].faceimg=detectedimg[globalmaxindex].faceimg;
		GlobalGandhiji[globalcnt].x=detectedimg[globalmaxindex].x;
		GlobalGandhiji[globalcnt].y=detectedimg[globalmaxindex].y;
		GlobalGandhiji[globalcnt].width=detectedimg[globalmaxindex].width;
		GlobalGandhiji[globalcnt].height=detectedimg[globalmaxindex].height;
		GlobalGandhiji[globalcnt].matchval=globalmaximum;

	}
	else
	{
		GlobalGandhiji[globalcnt].matchval=-1;//TO ELIMINATE THE IMAGES

	}
	globalcnt++;
    /* display video */
  //  cvShowImage( "video", img );
	//cvWaitKey(100);
}
Beispiel #21
0
IplImage* PlateFinder::FindPlate (IplImage *src) {
	IplImage* plate;
	IplImage* contourImg = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);	// anh tim contour
	IplImage* grayImg =  cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);	// anh xam
	cvCvtColor(src, grayImg, CV_RGB2GRAY);

	IplImage* cloneImg = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);
	cloneImg = cvCloneImage(src);
	
	// tien xu ly anh
	cvCopy(grayImg, contourImg);
	cvNormalize(contourImg, contourImg, 0, 255, CV_MINMAX);
	ImageRestoration(contourImg);
	
	IplImage* rectImg = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);
	cvMerge(contourImg, contourImg, contourImg, NULL, rectImg); // tron anh

	// tim contour cua buc anh
	CvMemStorage *storagePlate = cvCreateMemStorage(0);
	CvSeq *contours = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), storagePlate);
	cvFindContours(contourImg, storagePlate, &contours, sizeof(CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

	//cvShowImage("contourImg", contourImg);
	

	int xmin, ymin, xmax, ymax, w, h, s, r;
	int count;
	double ratio;	// ty le chieu rong tren chieu cao
	CvRect rectPlate; 

	// luu lai cac anh co kha nang la bien so
	IplImage** plateArr = new IplImage *[5];
	int j = 0;
	for (int i = 0; i < 5; i++)
	{
		plateArr[i] = NULL;
	}

	while (contours) {
		count = contours->total;
		CvPoint *PointArray = new CvPoint[count];
		cvCvtSeqToArray (contours, PointArray, CV_WHOLE_SEQ);

		for (int i = 0; i < count; i++)
		{
			if (i == 0)
			{
				xmin = xmax = PointArray[i].x;
				ymin = ymax = PointArray[i].y;
			}

			if (PointArray[i].x > xmax) {
				xmax = PointArray[i].x;
			}
			if (PointArray[i].x < xmin)  {
				xmin = PointArray[i].x;
			}

			if (PointArray[i].y > ymax) {
				ymax = PointArray[i].y;
			}
			if (PointArray[i].y < ymin)  {
				ymin = PointArray[i].y;
			}
		}

		w = xmax - xmin;
		h = ymax - ymin;
		s = w * h;

		cvRectangle (rectImg, cvPoint(xmin, ymin), cvPoint(xmax, ymax), RED);

		// loai bo nhung hinh chu nhat co ti le khong dung
		if (s != 0) {
			r = (contourImg->height * contourImg->width) / s;
		} else {
			r = 1000;
		}

		if (w == 0 && h == 0) {
			ratio = 0;
		} else {
			ratio = (double)w/h;
		}

		if (r > 30 && r < 270) {
			// ve ra hcn mau xanh la
			cvRectangle (rectImg, cvPoint(xmin, ymin), cvPoint(xmax, ymax), GREEN);

			if (ratio > 2.6 && ratio < 7) {
				cvRectangle (rectImg, cvPoint(xmin, ymin), cvPoint(xmax, ymax), BLUE);

				if (w > 80 && w < 250 && h > 25 && h < 150) {
					rectPlate = cvRect (xmin, ymin, w, h);

					cvRectangle (cloneImg, cvPoint(rectPlate.x, rectPlate.y),
						cvPoint(rectPlate.x + rectPlate.width, rectPlate.y + rectPlate.height), RED, 3);

					// cat bien so
					plate = cvCreateImage(cvSize(rectPlate.width, rectPlate.height), IPL_DEPTH_8U, 3);
					cvSetImageROI(src, rectPlate);
					cvCopy(src, plate, NULL);
					cvResetImageROI(src);

					// luu vao mang cac bien so plateArr
					int cnt = CountCharacter(plate);
					if (cnt >= 5) {
						plateArr[j] = cvCloneImage(plate);
						j++;
					}
				}
			}
		}

		delete []PointArray;

		contours = contours->h_next;
	}

	// sap xep
	if (plateArr[0]) 
	{
		int w = plateArr[0]->width;

		int flag;
		for (int i = 1; i < 4; i++)
		{
			if (plateArr[i] && plateArr[i]->width < w)
			{
				flag = i;
			}
		}

		plateArr[0] = plateArr[flag];
	}

	cvShowImage("cloneImg", cloneImg);
	//cvShowImage("rectImg", rectImg);
	//cvShowImage("plate", plateArr[0]);

	cvReleaseImage(&contourImg);
	cvReleaseImage(&rectImg);
	cvReleaseImage(&plate);

	return plateArr[0];
}
Beispiel #22
0
//--------THIS FUNCTION PERFORMS A TEMPLATE MATCH ON THE ORIGINAL IMAGE TO CROP THE IMAGE FROM BACKGROUND-----------
//-------IN ORDER TO IDENTIFY THE REGION WHERE THE NOTE IS PRESENT----------------
float NotetplMatch(IplImage *img1,int no)
{

	//------------NotetplProcessed STORES THE IMAGE EXTRACTED FROM BACKGROUND-------------
	NotetplProcessed[no]=cvCreateImage(cvGetSize(img1),8,3);

	//-------VARIABLES USED FOR TEMPLATE MATCHING----------
	IplImage *img;
	IplImage	*tpl;
	IplImage	*res;
	CvPoint		minloc, maxloc;
	double		minval, maxval;
	int			img_width, img_height;
	int			tpl_width, tpl_height;
	int			res_width, res_height;
	float maxi=-1.0;
	int index=0;
	//cvNamedWindow( "reference", CV_WINDOW_AUTOSIZE );
	//cvNamedWindow( "template", CV_WINDOW_AUTOSIZE );
	//cvNamedWindow("result",CV_WINDOW_AUTOSIZE);
	
	//-------TEMPORARY IMAGE-COLOR--------------
	img=cvCreateImage(cvGetSize(img1),8,3);

	//-----------THIS LOOP MATCHES THE IMAGE WITH 9 DIFFERENT TEMPLATES AS STORED IN THE GLOBAL 
	// (continued)VARIBLE Notetpl[]----------------
	for(int km=0;km<9;km++)
	{
		cvCopy(img1,img,NULL);
		tpl = cvLoadImage(Notetpl[km]);;
		img_width  = img->width;
		img_height = img->height;
		tpl_width  = tpl->width;
		tpl_height = tpl->height;
		res_width  = img_width - tpl_width + 1;
		res_height = img_height - tpl_height + 1;


		/* create new image for template matching computation */
		res = cvCreateImage( cvSize( res_width, res_height ), IPL_DEPTH_32F, 1 );
		/* choose template matching method to be used */
		//cvMatchTemplate( img, tpl, res, CV_TM_SQDIFF );
		//cvMatchTemplate( img, tpl, res, CV_TM_SQDIFF_NORMED );
		//cvMatchTemplate( img, tpl, res, CV_TM_CCORR );
		//cvMatchTemplate( img, tpl, res, CV_TM_CCORR_NORMED );
		//cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF );
		cvMatchTemplate( img, tpl, res, CV_TM_CCOEFF_NORMED );
		cvMinMaxLoc( res, &minval, &maxval,&minloc,&maxloc, 0);
		/* draw red rectangle */
		cvRectangle( img, 
				 cvPoint( maxloc.x, maxloc.y ), 
				 cvPoint( maxloc.x + tpl_width, maxloc.y + tpl_height ),
				 cvScalar( 0, 0, 255, 0 ), 1, 0, 0 );	
		CvScalar s;
			
	//	printf("\nminval= %f  maxval= %f  ",minval,maxval);

		//------AS WE NEED ONLY HALF PART(RIGHT PART ) OF NOTE WHERE GANDHIJI IS PRESENT THIS LOOP ONLY BRINGS IN FOCUS THAT AREA 
		// (continued) OTHER PORTION IS BLACKENED--------
		for(int i=1;i<img_height;i++)
		{
			for(int j=1;j<img_width;j++)
			{

				if((j<maxloc.x+(tpl_width/2) || j>maxloc.x + tpl_width)  || (i<maxloc.y || i>maxloc.y + tpl_height))
				{	 
					s = cvGet2D(img, i, j);
					s.val[0]=0.0;
					s.val[1]=0.0;
					s.val[2]=0.0;
				
					cvSet2D(img, i, j, s );

				}
			

			}

		}

		//---------COMPARES AMONGST NINE TEMPLATES WHICH IS MAX MATCHED TEMPLATE--------
		if(maxval>maxi)
		{
				maxi=maxval;
				cvCopyImage(img,NotetplProcessed[no]);
				
		}
	

		//cvShowImage( "reference", img );
		//cvShowImage( "template", tpl );
		//cvShowImage("result",res);
		//cvWaitKey(100);	
	}
	//printf("\n\nWaiting for next image to load\n");
	//cvDestroyWindow( "reference" );
	//cvDestroyWindow( "template" );
	//cvDestroyWindow( "result" );


	return maxi;
}
Beispiel #23
0
ImageRAII match( IplImage * image1, IplImage * image2, std::pair< CvMat *, CvMat * > image1_keys, std::pair< CvMat *, CvMat * > image2_keys )
{
	ImageRAII appended_images = appendimages( image1, image2 );
	ImageRAII rgb_appended_images( cvCreateImage( cvGetSize( appended_images.image ), appended_images.image->depth, 3 ) );
	cvCvtColor( appended_images.image, rgb_appended_images.image, CV_GRAY2RGB );
	CvScalar red;
	red.val[2] = 255;
	std::vector< std::pair< int, int > > points;

	// check for matches with the vectors in image1 and image2
	for( int i = 0; i < image1_keys.first->height; i++ )
	{
		double magnitude1 = 0;
		MatrixRAII current_vector( cvCreateMat( 1, image1_keys.first->cols, CV_32FC1 ) );
		// keeps track of minimum row found b/t image1 and image2 vectors
		MatrixRAII min( cvCreateMat( 1, image2_keys.first->cols, CV_32FC1 ) );
		cvGetRow( image1_keys.first, current_vector.matrix, i );
		CvPoint point1 = cvPoint( ( int )cvmGet( current_vector.matrix, 0, 1 ), ( int )cvmGet( current_vector.matrix, 0, 0 ) );
		std::map< float, int > angles;

		for( int k = 0; k < image1_keys.second->width; k++ )
			magnitude1 += pow( cvmGet( image1_keys.second, i, k ), 2 );
		magnitude1 = cvSqrt( magnitude1 );

		// compare a vector in image1 to every vector in image2 by calculating the cosine simularity
		for( int j = 0; j < image2_keys.first->height; j++ )
		{
			MatrixRAII descriptor1( cvCreateMat( 1, image1_keys.second->cols, CV_32FC1 ) );
			MatrixRAII descriptor2( cvCreateMat( 1, image2_keys.second->cols, CV_32FC1 ) );

			cvGetRow( image1_keys.second, descriptor1.matrix, i );
			cvGetRow( image2_keys.second, descriptor2.matrix, j );

			double dot_product = cvDotProduct( descriptor1.matrix, descriptor2.matrix );
			double magnitude2 = 0;
			for( int k = 0; k < image2_keys.second->width; k++ )
				magnitude2 += pow( cvmGet( descriptor1.matrix, 0, k ), 2 );
			magnitude2 = cvSqrt( magnitude2 );

			angles.insert( std::pair< float, int >( acos( dot_product / ( magnitude1 * magnitude2 ) ), j ) );
		}

		std::map< float, int >::iterator it =  angles.begin();
		int index = it->second;
		float angle = it->first;
		it++;
		if( angle < THRESHOLD * it->first )
		{
			points.push_back( std::make_pair( i, index ) );
		}
	}

	std::vector< std::pair< int, int > >::iterator it;
	for( it = points.begin(); it < points.end(); it++ )
	{
		CvPoint point1 = cvPoint( ( int )cvmGet( image1_keys.first,  it->first, 1 ), ( int )cvmGet( image1_keys.first, it->first, 0 ) );
		CvPoint point2 = cvPoint( ( int )cvmGet( image2_keys.first,  it->second, 1 ) + image1->width, ( int )cvmGet( image2_keys.first, it->second, 0 ) );
		cvLine( rgb_appended_images.image, point1, point2, red );
	}

	return rgb_appended_images;
}
int CvArrTest::validate_test_results( int test_case_idx )
{
    static const char* arr_names[] = { "input", "input/output", "output",
                                       "ref input/output", "ref output",
                                       "temporary", "mask" };
    int i, j;
    prepare_to_validation( test_case_idx );

    for( i = 0; i < 2; i++ )
    {
        int i0 = i == 0 ? OUTPUT : INPUT_OUTPUT;
        int i1 = i == 0 ? REF_OUTPUT : REF_INPUT_OUTPUT;
        int count = test_array[i0].size();

        assert( count == test_array[i1].size() );
        for( j = 0; j < count; j++ )
        {
            double err_level;
            CvPoint idx = {0,0};
            double max_diff = 0;
            int code;
            char msg[100];

            if( !test_array[i1][j] )
                continue;

            err_level = get_success_error_level( test_case_idx, i0, j );
            code = cvTsCmpEps( &test_mat[i0][j], &test_mat[i1][j], &max_diff, err_level, &idx,
                               element_wise_relative_error );

            switch( code )
            {
            case -1:
                sprintf( msg, "Too big difference (=%g)", max_diff );
                code = CvTS::FAIL_BAD_ACCURACY;
                break;
            case -2:
                strcpy( msg, "Invalid output" );
                code = CvTS::FAIL_INVALID_OUTPUT;
                break;
            case -3:
                strcpy( msg, "Invalid output in the reference array" );
                code = CvTS::FAIL_INVALID_OUTPUT;
                break;
            default:
                continue;
            }
            ts->printf( CvTS::LOG, "%s in %s array %d at (%d,%d)\n", msg,
                        arr_names[i0], j, idx.x, idx.y );
            for( i0 = 0; i0 < max_arr; i0++ )
            {
                int count = test_array[i0].size();
                if( i0 == REF_INPUT_OUTPUT || i0 == OUTPUT || i0 == TEMP )
                    continue;
                for( i1 = 0; i1 < count; i1++ )
                {
                    CvArr* arr = test_array[i0][i1];
                    if( arr )
                    {
                        CvSize size = cvGetSize(arr);
                        int type = cvGetElemType(arr);
                        ts->printf( CvTS::LOG, "%s array %d type=%sC%d, size=(%d,%d)\n",
                                    arr_names[i0], i1, cvTsGetTypeName(type),
                                    CV_MAT_CN(type), size.width, size.height );
                    }
                }
            }
            ts->set_failed_test_info( code );
            return code;
        }
    }

    return 0;
}
Beispiel #25
0
bool CvCaptureCAM_DC1394_v2_CPP::grabFrame()
{
    dc1394capture_policy_t policy = DC1394_CAPTURE_POLICY_WAIT;
    bool code = false, isColor;
    dc1394video_frame_t *dcFrame = 0, *fs = 0;
    int i, nch;

    if (!dcCam || (!started && !startCapture()))
        return false;

    dc1394_capture_dequeue(dcCam, policy, &dcFrame);

    if (!dcFrame)
        return false;

    if (/*dcFrame->frames_behind > 1 ||*/ dc1394_capture_is_frame_corrupt(dcCam, dcFrame) == DC1394_TRUE)
    {
        goto _exit_;
    }

    isColor = dcFrame->color_coding != DC1394_COLOR_CODING_MONO8 &&
              dcFrame->color_coding != DC1394_COLOR_CODING_MONO16 &&
              dcFrame->color_coding != DC1394_COLOR_CODING_MONO16S;

    if (nimages == 2)
    {
        fs = (dc1394video_frame_t*)calloc(1, sizeof(*fs));

        //dc1394_deinterlace_stereo_frames(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);
        dc1394_deinterlace_stereo_frames_fixed(dcFrame, fs, DC1394_STEREO_METHOD_INTERLACED);

        dc1394_capture_enqueue(dcCam, dcFrame); // release the captured frame as soon as possible
        dcFrame = 0;
        if (!fs->image)
            goto _exit_;
        isColor = colorStereo;
    }
    nch = isColor ? 3 : 1;

    for (i = 0; i < nimages; i++)
    {
        IplImage fhdr;
        dc1394video_frame_t f = fs ? *fs : *dcFrame, *fc = &f;
        f.size[1] /= nimages;
        f.image += f.size[0] * f.size[1] * i; // TODO: make it more universal
        if (isColor)
        {
            if (!frameC)
                frameC = (dc1394video_frame_t*)calloc(1, sizeof(*frameC));
            frameC->color_coding = nch == 3 ? DC1394_COLOR_CODING_RGB8 : DC1394_COLOR_CODING_MONO8;
            if (nimages == 1)
            {
                dc1394_convert_frames(&f, frameC);
                dc1394_capture_enqueue(dcCam, dcFrame);
                dcFrame = 0;
            }
            else
            {
                f.color_filter = bayerFilter;
                dc1394_debayer_frames(&f, frameC, bayer);
            }
            fc = frameC;
        }
        if (!img[i])
            img[i] = cvCreateImage(cvSize(fc->size[0], fc->size[1]), 8, nch);
        cvInitImageHeader(&fhdr, cvSize(fc->size[0], fc->size[1]), 8, nch);
        cvSetData(&fhdr, fc->image, fc->size[0]*nch);

    // Swap R&B channels:
    if (nch==3)
        cvConvertImage(&fhdr,&fhdr,CV_CVTIMG_SWAP_RB);

        if( rectify && cameraId == VIDERE && nimages == 2 )
        {
            if( !maps[0][0] || maps[0][0]->width != img[i]->width || maps[0][0]->height != img[i]->height )
            {
                CvSize size = cvGetSize(img[i]);
                cvReleaseImage(&maps[0][0]);
                cvReleaseImage(&maps[0][1]);
                cvReleaseImage(&maps[1][0]);
                cvReleaseImage(&maps[1][1]);
                maps[0][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
                maps[0][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
                maps[1][0] = cvCreateImage(size, IPL_DEPTH_16S, 2);
                maps[1][1] = cvCreateImage(size, IPL_DEPTH_16S, 1);
                char buf[4*4096];
                if( getVidereCalibrationInfo( buf, (int)sizeof(buf) ) &&
                    initVidereRectifyMaps( buf, maps[0], maps[1] ))
                    ;
                else
                    rectify = false;
            }
            cvRemap(&fhdr, img[i], maps[i][0], maps[i][1]);
        }
        else
            cvCopy(&fhdr, img[i]);
    }

    code = true;

_exit_:
    if (dcFrame)
        dc1394_capture_enqueue(dcCam, dcFrame);
    if (fs)
    {
        if (fs->image)
            free(fs->image);
        free(fs);
    }

    return code;
}
Beispiel #26
0
 //I is just a sample image for allocation purposes
 void AllocateImages(IplImage *I){ //I is passed in for sizing
 IavgF = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 IdiffF = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 IprevF = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 IhiF = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 IlowF = cvCreateImage(cvGetSize(I), IPL_DEPTH_32F, 3 );
 Ilow1 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 Ilow2 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 Ilow3 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 Ihi1 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 Ihi2 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 Ihi3 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 cvZero(IavgF  );
 cvZero(IdiffF  );
 cvZero(IprevF  );
 cvZero(IhiF );
 cvZero(IlowF  );      
 Icount = 0.00001; //Protect against divide by zero
 
 Iscratch = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 Iscratch2 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 3 );
 Igray1 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 Igray2 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 Igray3 = cvCreateImage( cvGetSize(I), IPL_DEPTH_32F, 1 );
 Imaskt = cvCreateImage( cvGetSize(I), IPL_DEPTH_8U, 1 );
 cvZero(Iscratch);
 cvZero(Iscratch2 );}
Beispiel #27
0
/*!
\fn CvGabor::get_image(int Type)
Get the speific type of image of Gabor

Parameters:
Type		The Type of gabor kernel, e.g. REAL, IMAG, MAG, PHASE   

Returns:
Pointer to image structure, or NULL on failure	

Return an Image (gandalf image class) with a specific Type   "REAL"	"IMAG" "MAG" "PHASE"  
*/
IplImage* CvGabor::get_image(int Type)
{

	if(IsKernelCreate() == false)
	{ 
		perror("Error: the Gabor kernel has not been created in get_image()!\n");
		return NULL;
	}
	else
	{  
		IplImage* pImage;
		IplImage *newimage;
		newimage = cvCreateImage(cvSize(Width,Width), IPL_DEPTH_8U, 1 );
		//printf("Width is %d.\n",(int)Width);
		//printf("Sigma is %f.\n", Sigma);
		//printf("F is %f.\n", F);
		//printf("Phi is %f.\n", Phi);

		//pImage = gan_image_alloc_gl_d(Width, Width);
		pImage = cvCreateImage( cvSize(Width,Width), IPL_DEPTH_32F, 1 );


		CvMat* kernel = cvCreateMat(Width, Width, CV_32FC1);
		CvMat* re = cvCreateMat(Width, Width, CV_32FC1);
		CvMat* im = cvCreateMat(Width, Width, CV_32FC1);
		double ve, ve1,ve2;
		CvScalar S;
		CvSize size = cvGetSize( kernel );
		int rows = size.height;
		int cols = size.width;
		switch(Type)
		{
		case 1:  //Real

			cvCopy( (CvMat*)Real, (CvMat*)kernel, NULL );
			//pImage = cvGetImage( (CvMat*)kernel, pImageGL );
			for (int i = 0; i < rows; i++)
			{
				for (int j = 0; j < cols; j++)
				{
					ve = cvGetReal2D((CvMat*)kernel, i, j);
					cvSetReal2D( (IplImage*)pImage, j, i, ve );
				}
			}
			break;
		case 2:  //Imag
			cvCopy( (CvMat*)Imag, (CvMat*)kernel, NULL );
			//pImage = cvGetImage( (CvMat*)kernel, pImageGL );
			for (int i = 0; i < rows; i++)
			{
				for (int j = 0; j < cols; j++)
				{
					ve = cvGetReal2D((CvMat*)kernel, i, j);
					cvSetReal2D( (IplImage*)pImage, j, i, ve );
				}
			}
			break; 
		case 3:  //Magnitude //add by yao

			cvCopy( (CvMat*)Real, (CvMat*)re, NULL );
			cvCopy( (CvMat*)Imag, (CvMat*)im, NULL );
			for (int i = 0; i < rows; i++)
			{
				for (int j = 0; j < cols; j++)
				{
					ve1 = cvGetReal2D((CvMat*)re, i, j);
					ve2 = cvGetReal2D((CvMat*)im, i, j);
					ve = cvSqrt(ve1*ve1+ve2*ve2);
					cvSetReal2D( (IplImage*)pImage, j, i, ve );
				}
			}
			break;
		case 4:  //Phase
			///@todo
			break;
		}

		cvNormalize((IplImage*)pImage, (IplImage*)pImage, 0, 255, CV_MINMAX, NULL );


		cvConvertScaleAbs( (IplImage*)pImage, (IplImage*)newimage, 1, 0 );

		cvReleaseMat(&kernel);

		cvReleaseImage(&pImage);

		return newimage;
	}
}
Beispiel #28
0
int MakeFeatureInMem(
		IplImage* 	RGBA,
		IplImage* 	depth,
		IplImage*	mask,
		FEATURE* 	feature){
	if (RGBA==NULL){
		fprintf(stderr, "image file is required to create feature set!");
		return 1;
	}

	IplImage *hsv_img, *h, *s, *v;
	if (HUELBP_ON){
		// convert to hsv image
		hsv_img = cvCreateImage( cvGetSize(RGBA), IPL_DEPTH_8U, 3);
		cvCvtColor(RGBA, hsv_img, CV_RGB2HSV);

		h = cvCreateImage( cvGetSize(hsv_img), IPL_DEPTH_8U, 1 );
		s = cvCreateImage( cvGetSize(hsv_img), IPL_DEPTH_8U, 1 );
		v = cvCreateImage( cvGetSize(hsv_img), IPL_DEPTH_8U, 1 );

		// Split image onto the color planes
		cvSplit( hsv_img, h, s, v, NULL );
	}
	// convert to grayscale-image
	IplImage* gray_img = RGBA;
	if (RGBA->nChannels > 1) {
		gray_img = cvCreateImage(cvGetSize(RGBA), IPL_DEPTH_8U, 1 );
		cvCvtColor( RGBA, gray_img, CV_RGB2GRAY );
	}
//	cvEqualizeHist(gray_img,gray_img);

	feature->grid_x 	= GRID_X;
	feature->grid_y 	= GRID_Y;
	feature->radius 	= RADIUS;
	feature->neighbors	= NEIGHBORS;

	int numPatterns = UNIFORM_ON? (NEIGHBORS+2) : pow(2.0, NEIGHBORS);
	//detect faces
	CvSeq* faces;
	int retCode = DetectFaces(gray_img, depth, &faces, FOREGRND_ON);
	if (retCode){//no faces found
		feature->histogram 		= NULL;
		feature->hue_histogram 	= NULL;
		feature->num_faces 		= 0;
		return 0;
	}else{
		//calculate features
		feature->num_faces 		= faces->total;
		feature->histogram 		= (CvMat**) malloc(faces->total*sizeof(CvMat*));
		feature->hue_histogram 	= (CvMat**) malloc(faces->total*sizeof(CvMat*));
		for(int i = 0; i < faces->total; i++ )
		{
			// Create a new rectangle for drawing the face
			CvRect* r = (CvRect*)cvGetSeqElem( faces, i ); // Find the dimensions of the face, and scale it if necessary
			IplImage* face_img = CreateSubImg(gray_img, *r);
			IplImage* lbp_img =  CalcLBP(face_img, RADIUS, NEIGHBORS, UNIFORM_ON);

			if (lbp_img==NULL){
				fprintf(stderr, "failed to create lbp image!\n");
				return 1;
			}
			feature->histogram[i] = CalcSpatialHistogram(lbp_img, numPatterns, GRID_X, GRID_Y);
			if (feature->histogram[i]==NULL){
				fprintf(stderr, "failed to create spatial histogram!\n");
				return 2;
			}
			cvReleaseImage(&face_img);
			cvReleaseImage(&lbp_img);

			if (HUELBP_ON){
				// Create a hue face image
				IplImage* hue_face_img = CreateSubImg(h, *r);

				//Create Hue LBP
				IplImage* hue_lbp = CalcLBP(hue_face_img, RADIUS, NEIGHBORS, UNIFORM_ON);
				if (hue_lbp==NULL){
					fprintf(stderr, "failed to create hue-lbp image!\n");
					return 1;
				}
				//Create Hue Spatial Histogram
				feature->hue_histogram[i] = CalcSpatialHistogram(hue_lbp, numPatterns, GRID_X, GRID_Y);
				if (feature->hue_histogram[i]==NULL){
					fprintf(stderr, "failed to create hue spatial histogram!\n");
					return 2;
				}

				cvReleaseImage(&hue_face_img);
				cvReleaseImage(&hue_lbp);
			}


		}
	}
	if (HUELBP_ON){
		cvReleaseImage(&hsv_img);
		cvReleaseImage(&h);
		cvReleaseImage(&s);
		cvReleaseImage(&v);
	}
	if (RGBA->nChannels > 1) {
		cvReleaseImage(&gray_img);
	}
	return 0;

}
Beispiel #29
0
size_t OpenCVImage::getHeight() const
{
    return m_img ? cvGetSize(m_img).height : 0;
}
Beispiel #30
0
/*
// Getting feature pyramid  
//
// API
// int getFeaturePyramid(IplImage * image, const filterObject **all_F, 
                      const int n_f,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, featurePyramid **maps);
// INPUT
// image             - image
// lambda            - resize scale
// k                 - size of cells
// startX            - X coordinate of the image rectangle to search
// startY            - Y coordinate of the image rectangle to search
// W                 - width of the image rectangle to search
// H                 - height of the image rectangle to search
// OUTPUT
// maps              - feature maps for all levels
// RESULT
// Error status
*/
int getFeaturePyramid(IplImage * image,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, CvLSVMFeaturePyramid **maps)
{
    IplImage *img2, *imgTmp, *imgResize;
    float   step, tmp;
    int      cntStep;
    int      maxcall;
    int i;
    int err;
    CvLSVMFeatureMap *map;
    
    //geting subimage
    cvSetImageROI(image, cvRect(startX, startY, W, H));
    img2 = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
    cvCopy(image, img2, NULL);
    cvResetImageROI(image);

    if(img2->depth != IPL_DEPTH_32F)
    {
        imgResize = cvCreateImage(cvSize(img2->width , img2->height) , IPL_DEPTH_32F , 3);
        cvConvert(img2, imgResize);
    }
    else
    {
        imgResize = img2;
    }
    
    step = powf(2.0f, 1.0f/ ((float)lambda));
    maxcall = W/k;
    if( maxcall > H/k )
    {
        maxcall = H/k;
    }
    cntStep = (int)(logf((float)maxcall/(5.0f))/logf(step)) + 1;
    //printf("Count step: %f %d\n", step, cntStep);

    allocFeaturePyramidObject(maps, lambda, cntStep + lambda);

    for(i = 0; i < lambda; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(img2, tmp, 4);
        err = getFeatureMaps_dp(imgTmp, 4, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
        cvReleaseImage(&imgTmp);
    }

    /**********************************one**************/
    for(i = 0; i <  cntStep; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(imgResize, tmp, 8);
	    err = getFeatureMaps_dp(imgTmp, 8, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i + lambda] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
		cvReleaseImage(&imgTmp);
    }/*for(i = 0; i < cntStep; i++)*/

    if(img2->depth != IPL_DEPTH_32F)
    {
        cvReleaseImage(&imgResize);
    }

    cvReleaseImage(&img2);
    return LATENT_SVM_OK;
}