Пример #1
0
int main (int argc, const char * argv[]) {
	char *imagefilename=(char*)malloc(sizeof(char)*16);
	char *dscfilename=(char*)malloc(sizeof(char)*16);
	if (argc<3) {
		printf("Usage: ./dump-descr image-file-name descriptor-file-name");
		strcpy(imagefilename, "savekkkk.jpg");
		strcpy(dscfilename, "saveD.jpg.dsc");
	}
	else {
		strcpy(imagefilename,argv[1]);
		strcpy(dscfilename,argv[2]);
	}
	
	FILE* dscfile;
	int w=1280,h=720;
	int i=0;
	int nkeypoints=0;
	vl_bool render=1;
	vl_bool first=1;
	VlSiftFilt * myFilter=0;
	VlSiftKeypoint const* keys;
	char img2_file[] = "/Users/quake0day/ana2/MVI_0124.MOV";
	
	//printf("sizeof(VlSiftKeypoint)=%d, filt=%d, pix=%d\n", sizeof(VlSiftKeypoint), sizeof(VlSiftFilt),sizeof(vl_sift_pix));
	
	dscfile=fopen(dscfilename, "wb");
	vl_sift_pix* fim;
	int err=0;
	int octave, nlevels, o_min;
	
	//vl_sift_pix descr[128];
	
	
	//CvCapture * camera = cvCreateCameraCapture (CV_CAP_ANY);
	CvCapture * camera = cvCreateFileCapture(img2_file);

	cvNamedWindow("Hello", 1);
	
	IplImage *myCVImage=cvQueryFrame(camera);//cvLoadImage(imagefilename, 0);
	
	IplImage *afterCVImage=cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
	IplImage *resizingImg=cvCreateImage(cvSize(w, h), myCVImage->depth, myCVImage->nChannels);
	octave=3;
	nlevels=10;
	o_min=1;
	myFilter=vl_sift_new(w, h, octave, nlevels, o_min);
	vl_sift_set_peak_thresh(myFilter, 0.5);
	fim=malloc(sizeof(vl_sift_pix)*w*h);
	int press=0;
	
	while (myCVImage) {
		
		dprintf("%d*%d\n",myCVImage->width,myCVImage->height);
		//w=myCVImage->width;
		//h=myCVImage->height;
		
		cvResize(myCVImage, resizingImg, CV_INTER_AREA);
		dprintf("resized scale:%d*%d\n",myCVImage->width,myCVImage->height);
		cvConvertImage(resizingImg, afterCVImage, 0);
		
		
		for (i=0; i<h; i++) {
			for (int j=0; j<w; j++) {
				fim[i*w+j]=CV_IMAGE_ELEM(afterCVImage,uchar,i,j);
				//printf("%f ", fim[i*w+j]);
			}
		}
		
		
		//vl_sift_set_peak_thresh(myFilter, 0.5);
		//vl_sift_set_edge_thresh(myFilter, 10.0);
		first=1;
		while (1) {
			if (first) {
				first=0;
				err=vl_sift_process_first_octave(myFilter, fim);
			}
			else {
				err=vl_sift_process_next_octave(myFilter);
			}
			if (err) {
				err=VL_ERR_OK;
				break;
			}
			
			vl_sift_detect(myFilter);
			nkeypoints=vl_sift_get_nkeypoints(myFilter);
			dprintf("insider numkey:%d\n",nkeypoints);
			keys=vl_sift_get_keypoints(myFilter);
			dprintf("final numkey:%d\n",nkeypoints);
			
			
			if (render) {
				for (i=0; i<nkeypoints; i++) {
					cvCircle(resizingImg, cvPoint(keys->x, keys->y), keys->sigma, cvScalar(100, 255, 50, 0), 1, CV_AA, 0);
					//printf("x:%f,y:%f,s:%f,sigma:%f,\n",keys->x,keys->y,keys->s,keys->sigma);
					if (press=='d') {
						
						double angles [4] ;
						int nangles ;
						
						/* obtain keypoint orientations ........................... */
						nangles=vl_sift_calc_keypoint_orientations(myFilter, angles, keys);
						
						/* for each orientation ................................... */
						for (int q = 0 ; q < (unsigned) nangles ; ++q) {
							vl_sift_pix descr [128] ;
							
							
							//printf("\n");
							/* compute descriptor (if necessary) */
							vl_sift_calc_keypoint_descriptor(myFilter, descr, keys, angles[q]);
							for (int j=0; j<128; j++) {
								descr[j]*=512.0;
								descr[j]=(descr[j]<255.0)?descr[j]:255.0;
								printf("%f ", descr[j]);
							}
							fwrite(descr, sizeof(vl_sift_pix), 128, dscfile);
						}
					}
					keys++;
				}
			}
			
		}
		
		cvShowImage("Hello", resizingImg);
		
		myCVImage = cvQueryFrame(camera);
		
		press=cvWaitKey(1);
		if( press=='q' )
			return 0;
		else if( press=='r' )
			render=1-render;
	}
	free(fim);
	cvReleaseImage(&afterCVImage);
	cvReleaseImage(&resizingImg);
	cvReleaseImage(&myCVImage);
	
	return 0;
}
Пример #2
0
bool AirCursor::init(bool makeDebugImage)
{
    if (m_init) return true;

    m_debugImageEnabled = makeDebugImage;

    XnStatus rc = XN_STATUS_OK;

    // init OpenNI context
    rc = m_context.Init();
    m_context.SetGlobalMirror(true);
    if (rc != XN_STATUS_OK)
    {
        std::cout << "ERROR: init failed: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    // create a DepthGenerator node
    rc = m_depthGenerator.Create(m_context);
    if (rc != XN_STATUS_OK)
    {
        std::cout << "node creation failed: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    // create the gesture and hands generators
    rc = m_gestureGenerator.Create(m_context);
    if (rc != XN_STATUS_OK)
    {
        std::cout << "gesture generator creation failed: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    rc = m_handsGenerator.Create(m_context);
    if (rc != XN_STATUS_OK)
    {
        std::cout << "hands generator creation failed: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    // register to callbacks
    XnCallbackHandle h1, h2;
    m_gestureGenerator.RegisterGestureCallbacks(gestureRecognizedCB, gestureProcessCB, this, h1);
    m_handsGenerator.RegisterHandCallbacks(handCreateCB, handUpdateCB, handDestroyCB, this, h2);

    // init session manager
    rc = m_sessionManager.Initialize(&m_context, "Wave,Click", NULL);
    if (rc != XN_STATUS_OK)
    {
        std::cout << "session manager init failed: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    // register to session callbacks
    m_sessionManager.RegisterSession(this, &sessionStartCB, &sessionEndCB);

    // start generating data
    rc = m_context.StartGeneratingAll();
    if (rc != XN_STATUS_OK)
    {
        std::cout << "data generating start failed: " << xnGetStatusString(rc) << std::endl;
        return false;
    }

    m_pushDetector.RegisterPush(this, pushCB);
    m_sessionManager.AddListener(&m_pushDetector);

    m_swipeDetector.RegisterSwipeUp(this, &swipeUpCB);
    m_swipeDetector.RegisterSwipeDown(this, &swipeDownCB);
    m_swipeDetector.RegisterSwipeLeft(this, &swipeLeftCB);
    m_swipeDetector.RegisterSwipeRight(this, &swipeRightCB);
    m_sessionManager.AddListener(&m_swipeDetector);

    // 8bit depth map
    m_iplDepthMap = cvCreateImage(cvSize(DEPTH_MAP_SIZE_X, DEPTH_MAP_SIZE_Y), IPL_DEPTH_8U, 1);

    // opencv mem storage
    m_cvMemStorage = cvCreateMemStorage(0);

    if (m_debugImageEnabled)
    {
        // 24bit rgb888 debug image
        m_iplDebugImage = cvCreateImage(cvSize(DEPTH_MAP_SIZE_X, DEPTH_MAP_SIZE_Y), IPL_DEPTH_8U, 3);

        // Same debug image as a QImage
        m_debugImage = new QImage(DEPTH_MAP_SIZE_X, DEPTH_MAP_SIZE_Y, QImage::Format_RGB888);
    }

    m_init = true;
    return true;
}
Пример #3
0
 int main() {
  CvPoint pt1,pt2;
  CvRect regt;
   CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,144);
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,216);	 
  // Create a window in which the captured images will be presented
   cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   // Show the image captured from the camera in the window and repeat
   while ( 1 ) {
     // Get one frame
     IplImage* frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }
     int modfheight, modfwidth;

     modfheight = frame->height;
     modfwidth = frame->width;
     // create modified frame with 1/4th the original size
     IplImage* modframe = cvCreateImage(cvSize((int)(modfwidth/4),(int)(modfheight/4)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
     IplImage* hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
     // create a frame within threshold. 
     IplImage* threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
     cvInRangeS(hsvframe,cvScalar(30, 25, 150),cvScalar(60, 60, 220),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     // created dilated image
     IplImage* dilframe = cvCreateImage(cvGetSize(threshframe),8,1);
     cvDilate(threshframe,dilframe,NULL,2); //cvDilate(input frame, output frame, mask, number of times to dilate)

     CBlobResult blobs; 
     blobs = CBlobResult(dilframe,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob;
     blobs.GetNthBlob(CBlobGetArea(),0,biggestblob); //GetNthBlob(criteria, number, output) Get only  the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1.x = biggestblob.MinX()*4;
     pt1.y = biggestblob.MinY()*4;
     pt2.x = biggestblob.MaxX()*4;
     pt2.y = biggestblob.MaxY()*4;
     cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob

     cvShowImage( "mywindow", frame); // show output image
     // Do not release the frame!
     //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27 ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   cvDestroyWindow( "mywindow" );
   return 0;
 }
Пример #4
0
/**
 * main
 */
int main(int argc, const char **argv)
{
	// Our main data storage vessel..
	RASPIVID_STATE state;
	
	MMAL_STATUS_T status = -1;
	MMAL_PORT_T *camera_video_port = NULL;
	MMAL_PORT_T *camera_still_port = NULL;
	MMAL_PORT_T *preview_input_port = NULL;
	MMAL_PORT_T *encoder_input_port = NULL;
	MMAL_PORT_T *encoder_output_port = NULL;
	
	time_t timer_begin,timer_end;
	double secondsElapsed;
	
	bcm_host_init();
	signal(SIGINT, signal_handler);

	// read default status
	default_status(&state);

	// init windows and OpenCV Stuff
	cvNamedWindow("camcvWin", CV_WINDOW_AUTOSIZE); 
	int w=state.width;
	int h=state.height;
	dstImage = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3);
	py = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);		// Y component of YUV I420 frame
	pu = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1);	// U component of YUV I420 frame
	pv = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1);	// V component of YUV I420 frame
	pu_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);
	pv_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1);
	image = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3);	// final picture to display

   
	// create camera
	if (!create_camera_component(&state))
	{
	   vcos_log_error("%s: Failed to create camera component", __func__);
	}
        else if ( (status = raspipreview_create(&state.preview_parameters)) != MMAL_SUCCESS)
	{
	   vcos_log_error("%s: Failed to create preview component", __func__);
	   destroy_camera_component(&state);
	}
	else
	{
		PORT_USERDATA callback_data;
		
		camera_video_port   = state.camera_component->output[MMAL_CAMERA_VIDEO_PORT];
		camera_still_port   = state.camera_component->output[MMAL_CAMERA_CAPTURE_PORT];
	   
		VCOS_STATUS_T vcos_status;
		
		callback_data.pstate = &state;
		
		vcos_status = vcos_semaphore_create(&callback_data.complete_semaphore, "RaspiStill-sem", 0);
		vcos_assert(vcos_status == VCOS_SUCCESS);
		
		// assign data to use for callback
		camera_video_port->userdata = (struct MMAL_PORT_USERDATA_T *)&callback_data;
        
        // init timer
  		time(&timer_begin); 

       
       // start capture
		if (mmal_port_parameter_set_boolean(camera_video_port, MMAL_PARAMETER_CAPTURE, 1) != MMAL_SUCCESS)
		{
		   goto error;
		}
		
		// Send all the buffers to the video port
		
		int num = mmal_queue_length(state.video_pool->queue);
		int q;
		for (q=0;q<num;q++)
		{
		   MMAL_BUFFER_HEADER_T *buffer = mmal_queue_get(state.video_pool->queue);
		
		   if (!buffer)
		   		vcos_log_error("Unable to get a required buffer %d from pool queue", q);
		
			if (mmal_port_send_buffer(camera_video_port, buffer)!= MMAL_SUCCESS)
		    	vcos_log_error("Unable to send a buffer to encoder output port (%d)", q);
		}
		
		
		// Now wait until we need to stop
		vcos_sleep(state.timeout);
  
error:

		mmal_status_to_int(status);
		
		
		// Disable all our ports that are not handled by connections
		check_disable_port(camera_still_port);
		
		if (state.camera_component)
		   mmal_component_disable(state.camera_component);
		
		//destroy_encoder_component(&state);
		raspipreview_destroy(&state.preview_parameters);
		destroy_camera_component(&state);
		
		}
		if (status != 0)
		raspicamcontrol_check_configuration(128);
		
		time(&timer_end);  /* get current time; same as: timer = time(NULL)  */
		cvReleaseImage(&dstImage);
		cvReleaseImage(&pu);
		cvReleaseImage(&pv);
		cvReleaseImage(&py);
		cvReleaseImage(&pu_big);
		cvReleaseImage(&pv_big);
		
		secondsElapsed = difftime(timer_end,timer_begin);
		
		printf ("%.f seconds for %d frames : FPS = %f\n", secondsElapsed,nCount,(float)((float)(nCount)/secondsElapsed));
		
   return 0;
}
Пример #5
0
EyeExtractor::EyeExtractor(const PointTracker &tracker):
    tracker(tracker), 
    eyefloat2(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
    eyegrey(cvCreateImage( eyesize, 8, 1 )),
    eyefloat(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
    eyeimage(cvCreateImage( eyesize, 8, 3 )),
    histogram_horizontal(cvCreateImage( eyesize, 8, 3 )),
    histogram_vertical(cvCreateImage( cvSize(eyesize.height, eyesize.width), 8, 3 )),
    vector_horizontal(new vector<int> (eyesize.width,0)),
    vector_vertical(new vector<int> (eyesize.height,0)),
    eyeGraySegmented(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
 	// ONUR DUPLICATED CODE FOR LEFT EYE
    eyefloat2_left(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
    eyegrey_left(cvCreateImage( eyesize, 8, 1 )),
    eyefloat_left(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
    eyeimage_left(cvCreateImage( eyesize, 8, 3 )),
    histogram_horizontal_left(cvCreateImage( eyesize, 8, 3 )),
    histogram_vertical_left(cvCreateImage( cvSize(eyesize.height, eyesize.width), 8, 3 )),
    vector_horizontal_left(new vector<int> (eyesize.width,0)),
    vector_vertical_left(new vector<int> (eyesize.height,0)),
    eyeGraySegmented_left(cvCreateImage( eyesize, IPL_DEPTH_32F, 1 )),
	blink(false),

    histPositionSegmentedPixels (new vector<vector<int> >),
    histPositionSegmentedPixels_left (new vector<vector<int> >),

    extractFeatures(eyesize)
{
}
Пример #6
0
void aplicar_umbralizar (int tiempo, int cant_iteraciones, const char *nomb_impl, const char *nomb_arch_entrada, unsigned char min, unsigned char max, unsigned char q) {
	IplImage *src = 0;
	IplImage *dst = 0;
	CvSize dst_size;

	// Cargo la imagen
	if( (src = cvLoadImage (nomb_arch_entrada, CV_LOAD_IMAGE_GRAYSCALE)) == 0 )
		exit(EXIT_FAILURE);

	dst_size.width = src->width;
	dst_size.height = src->height;

	// Creo una IplImage para cada salida esperada
	if( (dst = cvCreateImage (dst_size, IPL_DEPTH_8U, 1) ) == 0 )
		exit(EXIT_FAILURE);

	// Chequeo de parametros
	if (!(min <= max && min >= 0 && max <= 255 && q >= 0 && q <= 255)) {
		imprimir_ayuda();

		cvReleaseImage(&src);
		cvReleaseImage(&dst);

		exit ( EXIT_SUCCESS );
	}

	typedef void (umbralizar_fn_t) (unsigned char*, unsigned char*, int, int, int, unsigned char, unsigned char, unsigned char);

	umbralizar_fn_t *proceso;

	if (strcmp(nomb_impl, "c") == 0) {
		proceso = umbralizar_c;
	} else {
		proceso = umbralizar_asm;
	}

	if (tiempo) {
		unsigned long long int start, end;

		MEDIR_TIEMPO_START(start);

		for(int i=0; i<cant_iteraciones; i++) {
			proceso((unsigned char*)src->imageData, (unsigned char*)dst->imageData, src->height, src->width, src->widthStep, min, max, q);
		}

		MEDIR_TIEMPO_STOP(end);

		imprimir_tiempos_ejecucion(start, end, cant_iteraciones);
	} else {
		proceso((unsigned char*)src->imageData, (unsigned char*)dst->imageData, src->height, src->width, src->widthStep, min, max, q);
	}

	// Guardo imagen y libero las imagenes
	char nomb_arch_salida[256];

	memset(nomb_arch_salida, 0, 256);

	sprintf(nomb_arch_salida, "%s.umbralizar.min-%d.max-%d.q-%d.%s.bmp", nomb_arch_entrada, min, max, q, nomb_impl);

	cvSaveImage(nomb_arch_salida, dst, NULL);

	cvReleaseImage(&src);
	cvReleaseImage(&dst);
}
Пример #7
0
void identifyClip(QString activeFolder, int clipNum){
    qDebug()<<"Clip"<<clipNum<<":"<<activeFolder;

    qDebug()<<"===========KNN Ant Identification Program===========\n( Starting with k ="<<K<<"bgSimilarity ="<<bgSimilarity<<"binSize ="<<binSize<<")";

    qDebug()<<"Counting ants...";
    QString header = "../data/KNNtraining/";
    QDir preDir(header);
    QStringList fileList = preDir.entryList(QStringList("????"));
    int numKnownAnts = fileList.length();
    int maxSamplesPerAnt = 0;
    QStringList::const_iterator iterator;
    for (iterator = fileList.constBegin(); iterator != fileList.constEnd(); ++iterator){
        QDir innerDir(header+(*iterator)+"/");
        QStringList innerFileList = innerDir.entryList(QStringList("*.png"));
        if(innerFileList.length()>maxSamplesPerAnt) maxSamplesPerAnt = innerFileList.length();
    }

    qDebug()<<"Initializing data structures...";
    RgbImage **RGBsamples = new RgbImage*[numKnownAnts];
    for(int i=0 ; i<numKnownAnts ; i++){ RGBsamples[i] = new RgbImage[maxSamplesPerAnt]; }
    const int numUnknownImages = maxFrames * numParkingSpots;
    bool *nullPtr = new bool[numUnknownImages];
    UVHistogram *H = new UVHistogram[numKnownAnts*maxSamplesPerAnt];
    UVHistogram *unknownH = new UVHistogram[numUnknownImages];
    YuvPixel ****YUVsamples = new YuvPixel***[numKnownAnts];
    for(int i=0 ; i<numKnownAnts ; i++){
    	YUVsamples[i] = new YuvPixel**[maxSamplesPerAnt];
        for(int j=0 ; j<maxSamplesPerAnt ; j++){
            YUVsamples[i][j] = new YuvPixel*[maxImageRows];
            for(int k=0; k<maxImageRows; k++){
                YUVsamples[i][j][k] = new YuvPixel[maxImageCols];
            }
        }
    }
    int *samplesPerAnt = new int[numKnownAnts];

    qDebug()<<"Reading training images...";
    header = "../data/KNNtraining/";
    QDir trainingDir(header);
    fileList = trainingDir.entryList(QStringList("????"));
    qDebug()<<fileList;
    QStringList::const_iterator tIterator;
    int antIndex = 0;
    for (tIterator = fileList.constBegin(); tIterator != fileList.constEnd(); ++tIterator){
        QDir innerDir(header+(*tIterator)+"/");
        QStringList innerFileList = innerDir.entryList(QStringList("*.png"));
        QStringList::const_iterator innerIterator;
        int imageIndex = 0;
        for (innerIterator = innerFileList.constBegin(); innerIterator != innerFileList.constEnd(); ++innerIterator){
            IplImage* img = 0;
            img = cvLoadImage((header+(*tIterator)+"/"+(*innerIterator)).toUtf8().constData());
            if(!img){
                qDebug()<<"Could not load image file"<<(header+(*tIterator)+"/"+(*innerIterator));
            }
            else{
                RgbImage rgbImg(img);
                RGBsamples[antIndex][imageIndex] = rgbImg;
                samplesPerAnt[antIndex] = imageIndex+1;
            }
            imageIndex++;
        }
        antIndex++;
    }

    qDebug()<<"Converting to YUV...";
    for(int i=1; i<=numKnownAnts; i++){
        for(int j=1; j<=samplesPerAnt[i-1]; j++){
            for(int r=0; r<RGBsamples[i-1][j-1].height(); r++){
                for(int c=0; c<RGBsamples[i-1][j-1].width(); c++){
                    double Y = 0.299*RGBsamples[i-1][j-1][r][c].r + 0.587*RGBsamples[i-1][j-1][r][c].g + 0.114*RGBsamples[i-1][j-1][r][c].b;
                    double U = (RGBsamples[i-1][j-1][r][c].b - Y)*0.565;
                    double V = (RGBsamples[i-1][j-1][r][c].r - Y)*0.713;
                    YUVsamples[i-1][j-1][r][c].y = Y;
                    YUVsamples[i-1][j-1][r][c].u = U;
                    YUVsamples[i-1][j-1][r][c].v = V;
                }
            }
        }
    }

    qDebug()<<"Building histograms...";
    for(int i=1; i<=numKnownAnts; i++){
        for(int j=1; j<=samplesPerAnt[i-1]; j++){
            H[(i-1)*maxSamplesPerAnt+j-1].agentId = i;
            for(int x=0; x<256; x++){
                H[(i-1)*maxSamplesPerAnt+j-1].UValues[x] = 0;
                H[(i-1)*maxSamplesPerAnt+j-1].VValues[x] = 0;
            }
            for(int r=0; r<RGBsamples[i-1][j-1].height(); r++){
                for(int c=0; c<RGBsamples[i-1][j-1].width(); c++){
                    if(!(similar(0, YUVsamples[i-1][j-1][r][c].u, bgSimilarity) && similar(0, YUVsamples[i-1][j-1][r][c].v, bgSimilarity))){
                        H[(i-1)*maxSamplesPerAnt+j-1].UValues[(YUVsamples[i-1][j-1][r][c].u + 128)/binSize]++;
                        H[(i-1)*maxSamplesPerAnt+j-1].VValues[(YUVsamples[i-1][j-1][r][c].v + 128)/binSize]++;
                    }
                }
            }
            H[(i-1)*maxSamplesPerAnt+j-1].normalize();
        }
        for(int j=samplesPerAnt[i-1]+1; j<=maxSamplesPerAnt; j++){
            for(int x=0; x<256; x++){
                H[(i-1)*maxSamplesPerAnt+j-1].UValues[x] = 0;
                H[(i-1)*maxSamplesPerAnt+j-1].VValues[x] = 0;
            }
        }
    }
    delete [] RGBsamples;
    delete [] YUVsamples;

    qDebug()<<"Processing unidentified images...";
    header = "/media/8865399a-a349-43cd-8cc0-2b719505efaf/"+activeFolder;

    for(int i=0; i<maxFrames; i++){
        for(int j=0; j<numParkingSpots; j++){
            nullPtr[(i)*numParkingSpots + j ] = true;
            unknownH[(i)*numParkingSpots + j ].agentId = -1;
            for(int x=0; x<256; x++){
                unknownH[(i)*numParkingSpots + j ].UValues[x] = 0;
                unknownH[(i)*numParkingSpots + j ].VValues[x] = 0;
            }
        }
    }
    QDir unknownDir(header);
    fileList = unknownDir.entryList(QStringList("pSpot*"));
    QStringList::const_iterator uIterator;
    for (uIterator = fileList.constBegin(); uIterator != fileList.constEnd(); ++uIterator){
        qDebug()<<"  Beginning images in"<<(*uIterator);
        QDir innerDir(header+(*uIterator)+"/");
        QStringList innerFileList = innerDir.entryList(QStringList("*.png"));
        QStringList::const_iterator innerIterator;
        for (innerIterator = innerFileList.constBegin(); innerIterator != innerFileList.constEnd(); ++innerIterator){
            IplImage* img = 0;
            img = cvLoadImage((header+(*uIterator)+"/"+(*innerIterator)).toUtf8().constData());
            if(!img){
                 qDebug()<<"Could not load image file"<<(header+(*uIterator)+"/"+(*innerIterator));
            }
            else{
                RgbImage rgbImg(img);
                QString name = (*innerIterator);
                name.remove(QString("framenum_"));
                name.remove(QString(".png"));
                //QStringList parts = name.split("_");
                //int i = parts[3].toInt();//frame
                //int j = parts[0].toInt();//spot
                int i = name.toInt();
                QString spotName = (*uIterator);
                int j = spotName.remove("pSpot").toInt();
                nullPtr[(i)*numParkingSpots + j ] = false;
                for(int r=0; r<rgbImg.height(); r++){
                    for(int c=0; c<rgbImg.width(); c++){
                        double Y = 0.299*rgbImg[r][c].r + 0.587*rgbImg[r][c].g + 0.114*rgbImg[r][c].b;
                        double U = (rgbImg[r][c].b - Y)*0.565;
                        double V = (rgbImg[r][c].r - Y)*0.713;
                        if(!(similar(0, ((int)U), bgSimilarity) && similar(0, ((int)V), bgSimilarity))){
                            unknownH[(i)*numParkingSpots + j ].UValues[(((int)U) + 128)/binSize]++;
                            unknownH[(i)*numParkingSpots + j ].VValues[(((int)V) + 128)/binSize]++;
                        }
                    }
                }
                unknownH[(i)*numParkingSpots + j ].normalize();
            }
            cvReleaseImage(&img);
        }
    }

//	for(int i=1; i<=maxFrames; i++){
//		for(int j=1; j<=numParkingSpots; j++){
//			QString name, fileName;
//			IplImage* img=0;
//			//name = "clipnum_"+QString::number(3)+"_framenum_"+QString::number(i)+"_spotnum_"+QString::number(j)+".png";
//			name = parkingSpotNames[j-1]+QString::number(i)+".png";
//			fileName = header+name;
//			img=cvLoadImage(fileName.toUtf8().constData());
//			if(!img){
//				nullPtr[(i-1)*numParkingSpots + j - 1] = true;
//			}
//			else{
//				RgbImage rgbImg(img);
//				unknowns[(i-1)*numParkingSpots + j - 1] = rgbImg;
//				nullPtr[(i-1)*numParkingSpots + j - 1] = false;
//			}
//			unknownH[(i-1)*numParkingSpots + j - 1].agentId = -1;
//			for(int x=0; x<256; x++){
//				unknownH[(i-1)*numParkingSpots + j - 1].UValues[x] = 0;
//				unknownH[(i-1)*numParkingSpots + j - 1].VValues[x] = 0;
//			}
//			if(nullPtr[(i-1)*numParkingSpots + j - 1]){
//				continue;
//			}
//			for(int r=0; r<unknowns[(i-1)*numParkingSpots + j - 1].height(); r++){
//				for(int c=0; c<unknowns[(i-1)*numParkingSpots + j - 1].width(); c++){
//					double Y = 0.299*unknowns[(i-1)*numParkingSpots + j - 1][r][c].r + 0.587*unknowns[(i-1)*numParkingSpots + j - 1][r][c].g + 0.114*unknowns[(i-1)*numParkingSpots + j - 1][r][c].b;
//					double U = (unknowns[(i-1)*numParkingSpots + j - 1][r][c].b - Y)*0.565;
//					double V = (unknowns[(i-1)*numParkingSpots + j - 1][r][c].r - Y)*0.713;
//					if(!(similar(0, ((int)U), bgSimilarity) && similar(0, ((int)V), bgSimilarity))){
//						unknownH[(i-1)*numParkingSpots + j - 1].UValues[(((int)U) + 128)/binSize]++;
//						unknownH[(i-1)*numParkingSpots + j - 1].VValues[(((int)V) + 128)/binSize]++;
//					}
//				}
//			}
//			unknownH[(i-1)*numParkingSpots + j - 1].normalize();
//
//			cvReleaseImage(&img);
//
//		}
//		if(i%1000==0)
//			qDebug()<<"( Frame"<<i<<")";
//	}
//	delete [] unknowns;

    header = "../data/"+activeFolder;
    QDir dir(header); if (!dir.exists()) dir.mkpath(".");

    qDebug()<<"Computing confusion matrix...";
    int confHeight = 480, confWidth = 2*confHeight;//, buffer = 2, unknownWidth = (double)(confWidth/(numKnownAnts*maxSamplesPerAnt))*numUnknownImages;
    QString name = header+"confusionmat"+QString::number(clipNum)+".png";
    //cvNamedWindow("ConfusionMatrix", CV_WINDOW_AUTOSIZE);
    IplImage* confImg = cvCreateImage(cvSize(confWidth,confHeight), IPL_DEPTH_8U, 1);
    BwImage confMat(confImg);
    int totalUnknownSamples = 0;
    for(int i=1; i<=numUnknownImages; i++){
        if(nullPtr[i-1]){
            continue;
        }
        totalUnknownSamples++;
    }
    int totalKnownSamples = 0;
    for(int i=0; i<numKnownAnts;i++)
        totalKnownSamples += samplesPerAnt[i];
    int vertStep = max(confHeight/totalKnownSamples, 1);
    int horzStep = max((confWidth/2)/totalKnownSamples, 1);
    int stepRow = 0;
    for(int i=1; i<=numKnownAnts; i++){
        for(int j=1; j<=samplesPerAnt[i-1]; j++){
            int rowIndex = (i-1)*maxSamplesPerAnt+j-1;
            int stepCol = 0;
            for(int ii=1; ii<=numKnownAnts; ii++){
                for(int jj=1; jj<=samplesPerAnt[ii-1]; jj++){
                    int colIndex = (ii-1)*maxSamplesPerAnt+jj-1;
                    for(int k=0; k<=vertStep; k++){
                        for(int kk=0; kk<=horzStep; kk++){
                            confMat[min(confHeight,(int)(((double)stepRow/totalKnownSamples)*confHeight+k))]
                                   [min(confWidth/2, (int)(((double)stepCol/totalKnownSamples)*(confWidth/2)+kk))] = 255 * H[rowIndex].intersectionWith(H[colIndex]);
                        }
                    }
                    stepCol++;
                }
            }
            stepCol = 0;
            for(int ii=1; ii<=maxFrames; ii++){
                for(int jj=1; jj<=numParkingSpots; jj++){
                    int colIndex = (ii-1)*numParkingSpots + jj - 1;
                    if(!nullPtr[colIndex]){
                        for(int k=0; k<=vertStep; k++)
                            confMat[min(confHeight,(int)(((double)stepRow/totalKnownSamples)*confHeight+k))]
                                   [confWidth/2+(int)(((double)stepCol/totalUnknownSamples)*(confWidth/2))] = 255 * H[rowIndex].intersectionWith(unknownH[colIndex]);
                        stepCol++;
                    }
                }
            }
            stepRow++;
        }
    }
    //cvShowImage("ConfusionMatrix", confImg);
    cvSaveImage(name.toUtf8().constData(),confImg);

    qDebug()<<"Assigning IDs...";
    double **hypotheses = new double*[numUnknownImages]; //id and confidence
    for(int i=0; i<numUnknownImages; i++) hypotheses[i] = new double[2];
    double *averageDistance = new double[numUnknownImages];
    for(int i=0; i<numUnknownImages; i++){
        if(nullPtr[i]){
            continue;
        }
        //find k nearest neighbors
        double nearestK[K][2];//id and confidence
        for(int k=0; k<K; k++){
            nearestK[k][0] = -1;
            nearestK[k][1] = 0;
        }
        for(int j=0; j<numKnownAnts*maxSamplesPerAnt; j++){
            double similarity = unknownH[i].intersectionWith(H[j]);
            int furthestNeighbor = 0;
            for(int k=1; k<K; k++){
                if(nearestK[k][1]<nearestK[furthestNeighbor][1])
                    furthestNeighbor = k;
            }
            if(similarity > nearestK[furthestNeighbor][1]){
                nearestK[furthestNeighbor][1] = similarity;
                nearestK[furthestNeighbor][0] = H[j].agentId;
            }
        }
        //poll the neighbors
        int agentVotes[numKnownAnts];
        for(int j=0; j<numKnownAnts; j++){agentVotes[j] = 0;}
        for(int k=0; k<K; k++){agentVotes[(int)(nearestK[k][0]-1)]++;}
        int majorityVote = 0;
        //qDebug()<<agentVotes[0];
        for(int j=1; j<numKnownAnts; j++){
            if(agentVotes[j]>agentVotes[majorityVote])
                majorityVote = j;
            //qDebug()<<agentVotes[j];
        }
        //qDebug()<<"--";
        hypotheses[i][0] = majorityVote+1;//this 'sometimes zero-indexed, sometimes one-indexed' business is going to bite us later
        hypotheses[i][1] = ((double)agentVotes[majorityVote])/K;
        averageDistance[i] = 0;
        for(int k=0; k<K; k++){
            //if((int)(nearestK[k][0]) == majorityVote)
                averageDistance[i]+=nearestK[k][1];
        }
        averageDistance[i] /= K;//((double)agentVotes[majorityVote]);
    }
    ofstream myFile;
    name = header+"results"+QString::number(clipNum)+".csv";
    myFile.open(name.toUtf8().constData());
    myFile << "Frame Number, Spot Number, ID, Confidence, Similarity, \n";
    for(int i=0; i<numUnknownImages; i++){
        if(nullPtr[i]){
            continue;
        }
        //qDebug()<<"Image"<<i+1<<"is of agent"<<hypotheses[i][0]<<"("<<hypotheses[i][1]*100<<"% agree at"<<averageDistance[i]<<")";
        //if(averageDistance[i]>=0.9){
            myFile << ((i/numParkingSpots) + 1) << "," << ((i%numParkingSpots) + 1) << "," << hypotheses[i][0] << "," << hypotheses[i][1] << "," << averageDistance[i] << ", \n";
        //}
    }
    myFile.close();
    qDebug()<<"Output saved to"<<name;

    delete [] averageDistance;
    delete [] samplesPerAnt;
    delete [] hypotheses;
    delete [] unknownH;
    delete [] nullPtr;
    delete [] H;

    qDebug()<<"=====================Clean Exit=====================";
}
Пример #8
0
int main( int argc, char** argv )
{
    forceUSLocaleToKeepOurSanity();

    CvSize board_size = {0,0};
    float square_size = 1.f, aspect_ratio = 1.f;
    const char* out_filename = "out_camera_data.yml";
    const char* input_filename = 0;
    int i, image_count = 10;
    int write_extrinsics = 0, write_points = 0;
    int flags = 0;
    CvCapture* capture = 0;
    FILE* f = 0;
    char imagename[1024];
    CvMemStorage* storage;
    CvSeq* image_points_seq = 0;
    int elem_size, flip_vertical = 0;
    int delay = 1000;
    clock_t prev_timestamp = 0;
    CvPoint2D32f* image_points_buf = 0;
    CvFont font = cvFont( 1, 1 );
    double _camera[9], _dist_coeffs[4];
    CvMat camera = cvMat( 3, 3, CV_64F, _camera );
    CvMat dist_coeffs = cvMat( 1, 4, CV_64F, _dist_coeffs );
    CvMat *extr_params = 0, *reproj_errs = 0;
    double avg_reproj_err = 0;
    int mode = DETECTION;
    int undistort_image = 0;
    CvSize img_size = {0,0};
    const char* live_capture_help =
        "When the live video from camera is used as input, the following hot-keys may be used:\n"
            "  <ESC>, 'q' - quit the program\n"
            "  'g' - start capturing images\n"
            "  'u' - switch undistortion on/off\n";

    if( argc < 2 )
    {
        printf( "This is a camera calibration sample.\n"
            "Usage: calibration\n"
            "     -w <board_width>         # the number of inner corners per one of board dimension\n"
            "     -h <board_height>        # the number of inner corners per another board dimension\n"
            "     [-n <number_of_frames>]  # the number of frames to use for calibration\n"
            "                              # (if not specified, it will be set to the number\n"
            "                              #  of board views actually available)\n"
            "     [-d <delay>]             # a minimum delay in ms between subsequent attempts to capture a next view\n"
            "                              # (used only for video capturing)\n"
            "     [-s <square_size>]       # square size in some user-defined units (1 by default)\n"
            "     [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
            "     [-op]                    # write detected feature points\n"
            "     [-oe]                    # write extrinsic parameters\n"
            "     [-zt]                    # assume zero tangential distortion\n"
            "     [-a <aspect_ratio>]      # fix aspect ratio (fx/fy)\n"
            "     [-p]                     # fix the principal point at the center\n"
            "     [-v]                     # flip the captured images around the horizontal axis\n"
            "     [input_data]             # input data, one of the following:\n"
            "                              #  - text file with a list of the images of the board\n"
            "                              #  - name of video file with a video of the board\n"
            "                              # if input_data not specified, a live view from the camera is used\n"
            "\n" );
        printf( "%s", live_capture_help );
        return 0;
    }

    for( i = 1; i < argc; i++ )
    {
        const char* s = argv[i];
        if( strcmp( s, "-w" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.width ) != 1 || board_size.width <= 0 )
                return fprintf( stderr, "Invalid board width\n" ), -1;
        }
        else if( strcmp( s, "-h" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &board_size.height ) != 1 || board_size.height <= 0 )
                return fprintf( stderr, "Invalid board height\n" ), -1;
        }
        else if( strcmp( s, "-s" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &square_size ) != 1 || square_size <= 0 )
                return fprintf( stderr, "Invalid board square width\n" ), -1;
        }
        else if( strcmp( s, "-n" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &image_count ) != 1 || image_count <= 3 )
                return printf("Invalid number of images\n" ), -1;
        }
        else if( strcmp( s, "-a" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &aspect_ratio ) != 1 || aspect_ratio <= 0 )
                return printf("Invalid aspect ratio\n" ), -1;
        }
        else if( strcmp( s, "-d" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
                return printf("Invalid delay\n" ), -1;
        }
        else if( strcmp( s, "-op" ) == 0 )
        {
            write_points = 1;
        }
        else if( strcmp( s, "-oe" ) == 0 )
        {
            write_extrinsics = 1;
        }
        else if( strcmp( s, "-zt" ) == 0 )
        {
            flags |= CV_CALIB_ZERO_TANGENT_DIST;
        }
        else if( strcmp( s, "-p" ) == 0 )
        {
            flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
        }
        else if( strcmp( s, "-v" ) == 0 )
        {
            flip_vertical = 1;
        }
        else if( strcmp( s, "-o" ) == 0 )
        {
            out_filename = argv[++i];
        }
        else if( s[0] != '-' )
            input_filename = s;
        else
            return fprintf( stderr, "Unknown option %s", s ), -1;
    }

    if( input_filename )
    {
        fprintf( stderr, "Trying to open %s \n" , input_filename );

        capture = cvCreateFileCapture( input_filename );
        if( !capture )
        {
            fprintf(stderr,"Warning , cvCreateFileCapture failed to open %s \n",input_filename);
            f = fopen( input_filename, "rt" );
            if( !f )
                return fprintf( stderr, "The input file could not be opened\n" ), -1;
            image_count = -1;
        }
        mode = CAPTURING;
    }
    else
        capture = cvCreateCameraCapture(0);

    if( !capture && !f )
        return fprintf( stderr, "Could not initialize video capture\n" ), -2;

    if( capture )
        printf( "%s", live_capture_help );

    elem_size = board_size.width*board_size.height*sizeof(image_points_buf[0]);
    storage = cvCreateMemStorage( MAX( elem_size*4, 1 << 16 ));
    image_points_buf = (CvPoint2D32f*)cvAlloc( elem_size );
    image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );

    cvNamedWindow( "Image View", 1 );

    for(;;)
    {
        IplImage *view = 0, *view_gray = 0;
        int count = 0, found, blink = 0;
        CvPoint text_origin;
        CvSize text_size = {0,0};
        int base_line = 0;
        char s[100];
        int key;

        if( f && fgets( imagename, sizeof(imagename)-2, f ))
        {
            int l = strlen(imagename);
            if( l > 0 && imagename[l-1] == '\n' )
                imagename[--l] = '\0';
            if( l > 0 )
            {
                if( imagename[0] == '#' )
                    continue;
                view = cvLoadImage( imagename, 1 );
            }
        }
        else if( capture )
        {
            IplImage* view0 = cvQueryFrame( capture );
            if( view0 )
            {
                view = cvCreateImage( cvGetSize(view0), IPL_DEPTH_8U, view0->nChannels );
                if( view0->origin == IPL_ORIGIN_BL )
                    cvFlip( view0, view, 0 );
                else
                    cvCopy( view0, view );
            }
        }

        if( !view )
        {
            if( image_points_seq->total > 0 )
            {
                image_count = image_points_seq->total;
                goto calibrate;
            }
            break;
        }

        if( flip_vertical )
            cvFlip( view, view, 0 );

        img_size = cvGetSize(view);
        found = cvFindChessboardCorners( view, board_size,
            image_points_buf, &count, CV_CALIB_CB_ADAPTIVE_THRESH );

#if 1
        // improve the found corners' coordinate accuracy
        view_gray = cvCreateImage( cvGetSize(view), 8, 1 );
        cvCvtColor( view, view_gray, CV_BGR2GRAY );
        cvFindCornerSubPix( view_gray, image_points_buf, count, cvSize(11,11),
            cvSize(-1,-1), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
        cvReleaseImage( &view_gray );
#endif

        if( mode == CAPTURING && found && (f || clock() - prev_timestamp > delay*1e-3*CLOCKS_PER_SEC) )
        {
            cvSeqPush( image_points_seq, image_points_buf );
            prev_timestamp = clock();
            blink = !f;
#if 1
            if( capture )
            {
                sprintf( imagename, "view%05d.png", image_points_seq->total - 1 );
                cvSaveImage( imagename, view );
            }
#endif
        }

        cvDrawChessboardCorners( view, board_size, image_points_buf, count, found );

        cvGetTextSize( "100/100", &font, &text_size, &base_line );
        text_origin.x = view->width - text_size.width - 10;
        text_origin.y = view->height - base_line - 10;

        if( mode == CAPTURING )
        {
            if( image_count > 0 )
                sprintf( s, "%d/%d", image_points_seq ? image_points_seq->total : 0, image_count );
            else
                sprintf( s, "%d/?", image_points_seq ? image_points_seq->total : 0 );
        }
        else if( mode == CALIBRATED )
            sprintf( s, "Calibrated" );
        else
            sprintf( s, "Press 'g' to start" );

        cvPutText( view, s, text_origin, &font, mode != CALIBRATED ?
                                   CV_RGB(255,0,0) : CV_RGB(0,255,0));

        if( blink )
            cvNot( view, view );

        if( mode == CALIBRATED && undistort_image )
        {
            IplImage* t = cvCloneImage( view );
            cvUndistort2( t, view, &camera, &dist_coeffs );
            cvReleaseImage( &t );
        }

        cvShowImage( "Image View", view );
        key = cvWaitKey(capture ? 50 : 500);

        if( key == 27 )
            break;

        if( key == 'u' && mode == CALIBRATED )
            undistort_image = !undistort_image;

        if( capture && key == 'g' )
        {
            mode = CAPTURING;
            cvClearMemStorage( storage );
            image_points_seq = cvCreateSeq( 0, sizeof(CvSeq), elem_size, storage );
        }

        if( mode == CAPTURING && (unsigned)image_points_seq->total >= (unsigned)image_count )
        {
calibrate:
            cvReleaseMat( &extr_params );
            cvReleaseMat( &reproj_errs );
            int code = run_calibration( image_points_seq, img_size, board_size,
                square_size, aspect_ratio, flags, &camera, &dist_coeffs, &extr_params,
                &reproj_errs, &avg_reproj_err );
            // save camera parameters in any case, to catch Inf's/NaN's
            save_camera_params( out_filename, image_count, img_size,
                board_size, square_size, aspect_ratio, flags,
                &camera, &dist_coeffs, write_extrinsics ? extr_params : 0,
                write_points ? image_points_seq : 0, reproj_errs, avg_reproj_err );
            if( code )
                mode = CALIBRATED;
            else
                mode = DETECTION;
        }

        if( !view )
            break;
        cvReleaseImage( &view );
    }

    if( capture )
        cvReleaseCapture( &capture );
    if( storage )
        cvReleaseMemStorage( &storage );
    return 0;
}
Пример #9
0
static void show()
{
    if(!exist_image || !exist_scan){
        return;
    }

    IplImage* image_view = cvCreateImage(cvGetSize(&image), image.depth, image.nChannels);
    cvCopy(&image, image_view);

	float min_d, max_d;
	min_d = max_d = scan_image.distance.at(0);
	for(int i = 1; i < IMAGE_WIDTH * IMAGE_HEIGHT; i++){
		float di = scan_image.distance.at(i);
		max_d = di > max_d ? di : max_d;
		min_d = di < min_d ? di : min_d;
	}
	float wid_d = max_d - min_d;

    /*
     * Plot depth points on an image
     */
    CvPoint pt;
    int height, width;
    for(int i = 0; i < (int)scan_image.distance.size(); i++) {
        height = (int)(i % IMAGE_HEIGHT);
        width = (int)(i / IMAGE_HEIGHT);
        if(scan_image.distance.at(i) != 0.0) {
            pt.x = width;
            pt.y = height;
			int colorid= wid_d ? ( (scan_image.distance.at(i) - min_d) * 255 / wid_d ) : 128;
			cv::Vec3b color=colormap.at<cv::Vec3b>(colorid);
			int g = color[1];
			int b = color[2];
			int r = color[0];
            cvCircle(image_view, pt, 2, CV_RGB (r, g, b), CV_FILLED, 8, 0);
        }
    }


  drawRects(image_view,
            car_fused_objects.obj,
            cvScalar(255.0, 255.0, 0,0),
            (image_view->height)*.3);

  drawRects(image_view,
            pedestrian_fused_objects.obj,
            cvScalar(0.0, 255.0, 0,0),
            (image_view->height)*.3);
  /* PUT DISTANCE text on image */
  putDistance(image_view,
              car_fused_objects.obj,
              (image_view->height)*.3,
              car_fused_objects.type.c_str());
  putDistance(image_view,
              pedestrian_fused_objects.obj,
              (image_view->height)*.3,
              pedestrian_fused_objects.type.c_str());

    /*
     * Show image
     */
    cvShowImage(window_name, image_view);
    cvWaitKey(2);
    cvReleaseImage(&image_view);
}
Пример #10
0
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{

	double nan = std::numeric_limits<double>::quiet_NaN();
	double inf = std::numeric_limits<double>::infinity();

	if (nrhs == 0) 
  {
		mexPrintf("Lucas-Kanade\n");
		return;
	}

	IplImage **IMG = 0;
	IplImage **PYR = 0;

	IMG = (IplImage**) calloc(MAX_IMG,sizeof(IplImage*));
	PYR = (IplImage**) calloc(MAX_IMG,sizeof(IplImage*));

	if (IMG == 0 || (nrhs != 5 && nrhs != 6)) 
  {
		mexPrintf("lk(2,imgI,imgJ,ptsI,ptsJ,Level)\n");
		//            0 1    2    3    4   
		return;
	}

	int Level;
	if (nrhs == 6) 
		Level = (int) *mxGetPr(prhs[5]);
  else
		Level = 5;

	int I = 0;
	int J = 1;
	int Winsize = 10;

	// Images
	CvSize imageSize = cvSize(mxGetN(prhs[1]),mxGetM(prhs[1]));
	IMG[I] = cvCreateImage( imageSize, 8, 1 );
	PYR[I] = cvCreateImage( imageSize, 8, 1 );
	loadImageFromMatlab(prhs[1], IMG[I]);

	imageSize = cvSize(mxGetN(prhs[2]),mxGetM(prhs[2]));
	IMG[J] = cvCreateImage( imageSize, 8, 1 );
	PYR[J] = cvCreateImage( imageSize, 8, 1 );
	loadImageFromMatlab(prhs[2], IMG[J]);

	// Points
	double *ptsI = mxGetPr(prhs[3]); int nPts = mxGetN(prhs[3]);
	double *ptsJ = mxGetPr(prhs[4]); 

	if (nPts != mxGetN(prhs[4]))
  {
		mexPrintf("Inconsistent input!\n");
		return;
	}

	points[0] = (CvPoint2D32f*)cvAlloc(nPts*sizeof(CvPoint2D32f)); // template
	points[1] = (CvPoint2D32f*)cvAlloc(nPts*sizeof(CvPoint2D32f)); // target
	points[2] = (CvPoint2D32f*)cvAlloc(nPts*sizeof(CvPoint2D32f)); // forward-backward

	for (int i = 0; i < nPts; i++) 
  {
		points[0][i].x = ptsI[2*i]; points[0][i].y = ptsI[2*i+1];
		points[1][i].x = ptsJ[2*i]; points[1][i].y = ptsJ[2*i+1];
		points[2][i].x = ptsI[2*i]; points[2][i].y = ptsI[2*i+1];
	}

	float *ncc    = (float*) cvAlloc(nPts*sizeof(float));
	float *ssd    = (float*) cvAlloc(nPts*sizeof(float));
	float *fb     = (float*) cvAlloc(nPts*sizeof(float));
	char  *status = (char*)  cvAlloc(nPts);

	cvCalcOpticalFlowPyrLK( IMG[I], IMG[J], PYR[I], PYR[J], points[0], points[1], nPts, cvSize(win_size,win_size), Level, status, 0, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), CV_LKFLOW_INITIAL_GUESSES);
	cvCalcOpticalFlowPyrLK( IMG[J], IMG[I], PYR[J], PYR[I], points[1], points[2], nPts, cvSize(win_size,win_size), Level, status, 0, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), CV_LKFLOW_INITIAL_GUESSES | CV_LKFLOW_PYR_A_READY | CV_LKFLOW_PYR_B_READY );
			
	normCrossCorrelation(IMG[I],IMG[J],points[0],points[1],nPts, status, ncc, Winsize,CV_TM_CCOEFF_NORMED);
	//normCrossCorrelation(IMG[I],IMG[J],points[0],points[1],nPts, status, ssd, Winsize,CV_TM_SQDIFF);
	euclideanDistance( points[0],points[2],fb,nPts);

	// Output
	int M = 4;
	plhs[0] = mxCreateDoubleMatrix(M, nPts, mxREAL);
	double *output = mxGetPr(plhs[0]);
	for (int i = 0; i < nPts; i++) 
  {
		if (status[i] == 1) 
    {
			output[M*i]   = (double) points[1][i].x;
			output[M*i+1] = (double) points[1][i].y;
			output[M*i+2] = (double) fb[i];
			output[M*i+3] = (double) ncc[i];
		}
    else
    {
			output[M*i]   = nan;
			output[M*i+1] = nan;
			output[M*i+2] = nan;
			output[M*i+3] = nan;
		}
	}

  // clean up
  for (int i = 0; i < MAX_IMG; i++) 
  {
		cvReleaseImage(&(IMG[i]));
		cvReleaseImage(&(PYR[i]));
	}
	free(IMG);
	free(PYR);
}
Пример #11
0
/** Grab a Frame from either camera or video source
 *
 */
int GrabFrame(Experiment* exp) {

	if (!(exp->VidFromFile)) {
		/** Acquire from Physical Camera **/
		if (exp->UseFrameGrabber) {
			/** Use BitFlow SDK to acquire from Frame Grabber **/
			if (AcquireFrame(exp->fg)==T2FG_ERROR){
				return EXP_ERROR;
			}

			/** Check to see if file sizes match **/

			LoadFrameWithBin(exp->fg->HostBuf, exp->fromCCD);

		} else {

			/** Acqure from ImagingSource USB Cam **/

			exp->lastFrameSeenOutside = exp->MyCamera->iFrameNumber;
			/*** Create a local copy of the image***/
			LoadFrameWithBin(exp->MyCamera->iImageData, exp->fromCCD);

		}

	} else {

		/** Acquire  from file **/

		IplImage* tempImg;
		/** Grab the frame from the video **/
		tempImg = cvQueryFrame(exp->capture);

		/** Stall for a little bit **/
		//Sleep(50);


		if (tempImg == NULL) {
			printf("There was an error querying the frame from video!\n");
			return EXP_VIDEO_RAN_OUT;
		}

		/** Create a new temp image that is grayscale and of the same size **/
		IplImage* tempImgGray = cvCreateImage(cvGetSize(tempImg), IPL_DEPTH_8U,
				1);

		/** Convert Color to GrayScale **/
		cvCvtColor(tempImg, tempImgGray, CV_RGB2GRAY);

		/** Load the frame into the fromCCD frame object **/
		/*** ANDY! THIS WILL FAIL BECAUSE THE SIZING ISN'T RIGHT **/
		LoadFrameWithImage(tempImgGray, exp->fromCCD);
		cvReleaseImage(&tempImgGray);
		/*
		 * Note: for some reason thinks crash when you go cvReleaseImage(&tempImg)
		 * And there don't seem to be memory leaks if you leave it. So I'm going to leave it in place.
		 *
		 */
	}

	exp->Worm->frameNum++;
	return EXP_SUCCESS;
}
Пример #12
0
int TextRecognizer::recognize(IplImage *input,
		const struct TextDetectionParams &params, std::string svmModel,
		std::vector<Chain> &chains,
		std::vector<std::pair<Point2d, Point2d> > &compBB,
		std::vector<std::pair<CvPoint, CvPoint> > &chainBB,
		std::vector<std::string>& text) {

	// Convert to grayscale
	IplImage * grayImage = cvCreateImage(cvGetSize(input), IPL_DEPTH_8U, 1);
	cvCvtColor(input, grayImage, CV_RGB2GRAY);

	for (unsigned int i = 0; i < chainBB.size(); i++) {
		cv::Point center = cv::Point(
				(chainBB[i].first.x + chainBB[i].second.x) / 2,
				(chainBB[i].first.y + chainBB[i].second.y) / 2);

		/* work out if total width of chain is large enough */
		if (chainBB[i].second.x - chainBB[i].first.x
				< input->width / params.maxImgWidthToTextRatio) {
			LOGL(LOG_TXT_ORIENT,
					"Reject chain #" << i << " width=" << (chainBB[i].second.x - chainBB[i].first.x) << "<" << (input->width / params.maxImgWidthToTextRatio));
			continue;
		}

		/* eliminate chains with components of lower height than required minimum */
		int minHeight = chainBB[i].second.y - chainBB[i].first.y;
		for (unsigned j = 0; j < chains[i].components.size(); j++) {
			minHeight = std::min(minHeight,
					compBB[chains[i].components[j]].second.y
							- compBB[chains[i].components[j]].first.y);
		}
		if (minHeight < params.minCharacterheight) {
			LOGL(LOG_CHAINS,
					"Reject chain # " << i << " minHeight=" << minHeight << "<" << params.minCharacterheight);
			continue;
		}

		/* invert direction if angle is in 3rd/4th quadrants */
		if (chains[i].direction.x < 0) {
			chains[i].direction.x = -chains[i].direction.x;
			chains[i].direction.y = -chains[i].direction.y;
		}
		/* work out chain angle */
		double theta_deg = 180
				* atan2(chains[i].direction.y, chains[i].direction.x) / PI;

		if (absd(theta_deg) > params.maxAngle) {
			LOGL(LOG_TXT_ORIENT,
					"Chain angle " << theta_deg << " exceeds max " << params.maxAngle);
			continue;
		}
		if ((chainBB.size() == 2) && (absd(theta_deg) > 5))
			continue;
		LOGL(LOG_TXT_ORIENT,
				"Chain #" << i << " Angle: " << theta_deg << " degrees");

		/* create copy of input image including only the selected components */
		cv::Mat inputMat = cv::Mat(input);
		cv::Mat grayMat = cv::Mat(grayImage);
		cv::Mat componentsImg = cv::Mat::zeros(grayMat.rows, grayMat.cols,
				grayMat.type());

		std::vector<cv::Point> compCoords;

		for (unsigned int j = 0; j < chains[i].components.size(); j++) {
			int component_id = chains[i].components[j];
			cv::Rect roi = cv::Rect(compBB[component_id].first.x,
					compBB[component_id].first.y,
					compBB[component_id].second.x
							- compBB[component_id].first.x,
					compBB[component_id].second.y
							- compBB[component_id].first.y);
			cv::Mat componentRoi = grayMat(roi);

			compCoords.push_back(
					cv::Point(compBB[component_id].first.x,
							compBB[component_id].first.y));
			compCoords.push_back(
					cv::Point(compBB[component_id].second.x,
							compBB[component_id].second.y));
			compCoords.push_back(
					cv::Point(compBB[component_id].first.x,
							compBB[component_id].second.y));
			compCoords.push_back(
					cv::Point(compBB[component_id].second.x,
							compBB[component_id].first.y));

			cv::Mat thresholded;
			cv::threshold(componentRoi, thresholded, 0 // the value doesn't matter for Otsu thresholding
					, 255 // we could choose any non-zero value. 255 (white) makes it easy to see the binary image
					, cv::THRESH_OTSU | cv::THRESH_BINARY_INV);

#if 0
			cv::Moments mu = cv::moments(thresholded, true);
			std::cout << "mu02=" << mu.mu02 << " mu11=" << mu.mu11 << " skew="
			<< mu.mu11 / mu.mu02 << std::endl;
#endif
			cv::imwrite("thresholded.png", thresholded);

			cv::threshold(componentRoi, componentsImg(roi), 0 // the value doesn't matter for Otsu thresholding
					, 255 // we could choose any non-zero value. 255 (white) makes it easy to see the binary image
					, cv::THRESH_OTSU | cv::THRESH_BINARY_INV);
		}
		cv::imwrite("bib-components.png", componentsImg);

		cv::Mat rotMatrix = cv::getRotationMatrix2D(center, theta_deg, 1.0);

		cv::Mat rotatedMat = cv::Mat::zeros(grayMat.rows, grayMat.cols,
				grayMat.type());
		cv::warpAffine(componentsImg, rotatedMat, rotMatrix, rotatedMat.size());
		cv::imwrite("bib-rotated.png", rotatedMat);

		/* rotate each component coordinates */
		const int border = 3;
		cv::transform(compCoords, compCoords, rotMatrix);
		/* find bounding box of rotated components */
		cv::Rect roi = getBoundingBox(compCoords,
				cv::Size(input->width, input->height));
		/* ROI area can be null if outside of clipping area */
		if ((roi.width == 0) || (roi.height == 0))
			continue;
		LOGL(LOG_TEXTREC, "ROI = " << roi);
		cv::Mat mat = cv::Mat::zeros(roi.height + 2 * border,
				roi.width + 2 * border, grayMat.type());
		cv::Mat tmp = rotatedMat(roi);
#if 0
        cv::Mat roiMat = inputMat(roi);
        char *filename_roi;
        asprintf(&filename_roi, "bib-%05d-%d.png", this->bsid+1, i);
        cv::imwrite(filename_roi, roiMat);
        free(filename_roi);
#endif
		/* copy bounded box from rotated mat to new mat with borders - borders are needed
		 * to improve OCR success rate
		 */
		tmp.copyTo(
				mat(
						cv::Rect(cv::Point(border, border),
								cv::Point(roi.width + border,
										roi.height + border))));

		/* resize image to improve OCR success rate */
		float upscale = 3.0;
		cv::resize(mat, mat, cvSize(0, 0), upscale, upscale);
		/* erode text to get rid of thin joints */
		int s = (int) (0.05 * mat.rows); /* 5% of up-scaled size) */
		cv::Mat elem = cv::getStructuringElement(cv::MORPH_ELLIPSE,
				cv::Size(2 * s + 1, 2 * s + 1), cv::Point(s, s));
		cv::erode(mat, mat, elem);
		cv::imwrite("bib-tess-input.png", mat);

		// Pass it to Tesseract API
		tess.SetImage((uchar*) mat.data, mat.cols, mat.rows, 1, mat.step1());
		// Get the text
		char* out = tess.GetUTF8Text();
		do {
			if (strlen(out) == 0) {
				break;
			}
			std::string s_out(out);
			boost::algorithm::trim(s_out);

			if (s_out.size() != chains[i].components.size()) {
				LOGL(LOG_TEXTREC,
						"Text size mismatch: expected " << chains[i].components.size() << " digits, got '" << s_out << "' (" << s_out.size() << " digits)");
				break;
			}
			/* if first character is a '0' we have a partially occluded number */
			if (s_out[0] == '0') {
				LOGL(LOG_TEXTREC, "Text begins with '0' (partially occluded)");
				break;
			}
			if (!is_number(s_out)) {
				LOGL(LOG_TEXTREC, "Text is not a number ('" << s_out << "')");
				//break;
			}

			/* adjust width to size of 6 digits */
			int charWidth = (chainBB[i].second.x - chainBB[i].first.x)
					/ s_out.size();
			int width = 6 * charWidth;
			/* adjust to 2 width/height aspect ratio */
			int height = width / 2;
			int midx = center.x;
			int midy = center.y;

			cv::Rect roi = cv::Rect(midx - width / 2, midy - height / 2, width,
					height);
			if ((roi.x >= 0) && (roi.y >= 0)
					&& (roi.x + roi.width < inputMat.cols)
					&& (roi.y + roi.height < inputMat.rows)) {
				cv::Mat bibMat = inputMat(roi);

				if (s_out.size() <= (unsigned) params.modelVerifLenCrit) {

					if (svmModel.empty()) {
						LOGL(LOG_TEXTREC, "Reject " << s_out << " on no model");
						break;
					}

					if (minHeight < params.modelVerifMinHeight) {
						LOGL(LOG_TEXTREC,
								"Reject " << s_out << " on small height");
						break;
					}

					/* if we have an SVM Model, predict */

					CvSVM svm;
					cv::HOGDescriptor hog(cv::Size(128, 64), /* windows size */
					cv::Size(16, 16), /* block size */
					cv::Size(8, 8), /* block stride */
					cv::Size(8, 8), /* cell size */
					9 /* nbins */
					);
					std::vector<float> descriptor;

					/* resize to HOGDescriptor dimensions */
					cv::Mat resizedMat;
					cv::resize(bibMat, resizedMat, hog.winSize, 0, 0);
					hog.compute(resizedMat, descriptor);

					/* load SVM model */
					svm.load(svmModel.c_str());
					float prediction = svm.predict(cv::Mat(descriptor).t());
					LOGL(LOG_SVM, "Prediction=" << prediction);
					if (prediction < 0.5) {
						LOGL(LOG_TEXTREC,
								"Reject " << s_out << " on low SVM prediction");
						break;
					}
				}

				/* symmetry check */
				if (   //(i == 4) &&
						(1)) {
					cv::Mat inputRotated = cv::Mat::zeros(inputMat.rows,
							inputMat.cols, inputMat.type());
					cv::warpAffine(inputMat, inputRotated, rotMatrix,
							inputRotated.size());

					int minOffset = 0;
					double min = 1e6;
					//width = 12 * charWidth;
					for (int offset = -50; offset < 30; offset += 2) {

						/* resize to HOGDescriptor dimensions */
						cv::Mat straightMat;
						cv::Mat flippedMat;

						/* extract shifted ROI */
						cv::Rect roi = cv::Rect(midx - width / 2 + offset,
								midy - height / 2, width, height);

						if ((roi.x >= 0) && (roi.y >= 0)
								&& (roi.x + roi.width < inputMat.cols)
								&& (roi.y + roi.height < inputMat.rows)) {
							straightMat = inputRotated(roi);
							cv::flip(straightMat, flippedMat, 1);
							cv::Scalar mssimV = getMSSIM(straightMat,
									flippedMat);
							double avgMssim = (mssimV.val[0] + mssimV.val[1]
									+ mssimV.val[2]) * 100 / 3;
							double dist = 1 / (avgMssim + 1);
							LOGL(LOG_SYMM_CHECK, "offset=" << offset << " dist=" << dist);
							if (dist < min) {
								min = dist;
								minOffset = offset;
								cv::imwrite("symm-max.png", straightMat);
								cv::Mat visualImage;
							}
						}
					}

					LOGL(LOG_SYMM_CHECK, "MinOffset = " << minOffset
							<< " charWidth=" << charWidth);

					if (absd(minOffset) > charWidth / 3) {
						LOGL(LOG_TEXTREC,
								"Reject " << s_out << " on asymmetry");
						std::cout << "Reject " << s_out << " on asymmetry"
								<< std::endl;
						break;
					}
				}

				/* save for training only if orientation is ~horizontal */
				if (abs(theta_deg) < 7) {
					char *filename;
                    std::cout << " ------ " << s_out << std::endl;
					asprintf(&filename, "bib-%05d-%s.png", this->bsid++, s_out.c_str());
					cv::imwrite(filename, bibMat);
					free(filename);
				}

			} else {
				LOGL(LOG_TEXTREC, "Reject as ROI outside boundaries");
				break;
			}

			/* all fine, add this bib number */
			text.push_back(s_out);
			LOGL(LOG_TEXTREC, "Bib number: '" << s_out << "'");

		} while (0);
		free(out);
	}

	cvReleaseImage(&grayImage);

	return 0;

}
Пример #13
0
int main(int argc, char* argv[])
{
	IplImage* color = cvLoadImage("E:\\pic_skindetect\\clothtest\\2.jpg", 1);
	IplImage* gray = cvCreateImage(cvGetSize(color), 8, 1);
	IplImage* show = cvCreateImage(cvGetSize(color), 8, 1);
	cvZero(show);
	int i = 0;

	cvCvtColor(color, gray, CV_RGB2GRAY);
	//cvThreshold(gray, gray, 100, 255, CV_THRESH_BINARY_INV);
	cvCanny(gray, gray, 50, 150, 3); 
	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSeq* contours;
	CvSeq* seq_fourier = cvCreateSeq(CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour),sizeof(CvPoint2D32f), storage);
	cvFindContours(gray, storage, &contours, sizeof(CvContour), CV_RETR_TREE);

	CvSeq* mostContours = contours;
	/*for(; contours; contours = contours->h_next)
	{
		if (mostContours->total < contours->total)
		{
			mostContours = contours;
		}
	}*/

	int t = 0;
	for(; contours; contours = contours->h_next)
	{
	//contours = mostContours;
		++t;
		printf("%d\n", contours->total);
		cvDrawContours(color, contours, CV_RGB(255,0,0), CV_RGB(255,0,0), 1, 3);
		CalcFourierDescriptorCoeff(contours, 2000, seq_fourier);
		CalcBoundary(seq_fourier, contours->total, contours);

		for(int i = 0; i < contours->total; i++)
		{
			CvPoint* pt=(CvPoint*)cvGetSeqElem(contours, i);
			if(pt->x >= 0 && pt->x < show->width && pt->y >= 0 && pt->y < show->height)
			{
				((uchar*)(show->imageData+pt->y*show->widthStep))[pt->x] = 255;
			}
		}

		/*for(i = 0; i < contours->total; i++)
		{
			CvPoint* pt=(CvPoint*)cvGetSeqElem(contours, i);
			printf("%d, %d, %d\n", pt->x, pt->y, i);
		}*/
/*
		for(i = 0; i < seq_fourier->total; i++)
		{
			CvPoint2D32f* pt=(CvPoint2D32f*)cvGetSeqElem(seq_fourier, i);
			printf("%f, %f, %d\n", pt->x, pt->y, i);
		}*/
	}
	printf("t=%d\n", t);

	cvNamedWindow("color", 0);
	cvShowImage("color",color);
	//cvWaitKey(0);

	cvNamedWindow("gray", 0);
	cvShowImage("gray", gray);
	//cvWaitKey(0);

	cvNamedWindow("reconstructed", 0);
	cvShowImage("reconstructed", show);
	cvWaitKey(0);
	cvReleaseMemStorage(&storage);
	cvReleaseImage(&color);
	cvReleaseImage(&gray);
	cvReleaseImage(&show);
	cvDestroyAllWindows();
	return 0;
}
Пример #14
0
std::list<vision::Garbage*> GarbageRecognition::garbageList(IplImage * src, IplImage * model){
	std::list<vision::Garbage*>::iterator it;
	for ( it=garbages.begin() ; it != garbages.end() ; it++ )
		delete *it;
	garbages.clear();
  

	//cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
	//object model

	//image for the histogram-based filter
	//could be a parameter

	vision::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
	CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

	//~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
	//~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);



	//gets a frame for setting  image size
	//CvSize srcSize = cvSize(frameWidth,frameHeight);
	CvSize srcSize = cvGetSize(src);

	//images for HSV conversion
	IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
	IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );



	//Image for thresholding
	IplImage * threshImage=cvCreateImage(srcSize,8,1);

	//image for equalization
	IplImage * equalizedImage=cvCreateImage(srcSize,8,1);

	//image for Morphing operations(Dilate-erode)
	IplImage * morphImage=cvCreateImage(srcSize,8,1);

	//image for image smoothing
	IplImage * smoothImage=cvCreateImage(srcSize,8,1);

	//image for contour-finding operations
	IplImage * contourImage=cvCreateImage(srcSize,8,3);

	int frameCounter=1;
	int cont_index=0;

	//convolution kernel for morph operations
	IplConvKernel* element;

	CvRect boundingRect;

	//contours
	CvSeq * contours;
  CvSeq * contoursCopy;

	//Main loop


	frameCounter++;

	//convert image to hsv
	cvCvtColor( src, hsv, CV_BGR2HSV );
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );

	//equalize Saturation Channel image
	cvEqualizeHist(s_plane,equalizedImage);

	//threshold the equalized Saturation channel image
	cvThreshold(equalizedImage,threshImage,THRESHOLD_VALUE,255,
	CV_THRESH_BINARY);

	//apply morphologic operations
	element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
		MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
		CV_SHAPE_RECT, NULL);

	cvDilate(threshImage,morphImage,element,MORPH_DILATE_ITER);
	cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);

	//apply smooth gaussian-filter
	cvSmooth(morphImage,smoothImage,CV_GAUSSIAN,3,0,0,0);

	//get all contours
	contours = myFindContours(smoothImage);
  contoursCopy=contours;
	
  cont_index=0;
	cvCopy(src,contourImage,0);
	


	while(contours!=NULL){
		CvSeq * aContour=getPolygon(contours);
		vision::Contours * ct = new Contours(aContour);

	
	    //int	pf = ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER);

		//int raf = ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO);

		// int af = ct->areaFilter(MINCONTOUR_AREA,MAXCONTOUR_AREA);
		//int baf = ct->boxAreaFilter(BOXFILTER_TOLERANCE);

        //int hmf = ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN);


		//apply filters

		if (ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER) &&
        ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO) &&
        ct->boxAreaFilter(BOXFILTER_TOLERANCE) &&
        ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN) &&
       1){

				
				
				//get contour bounding box
				boundingRect=cvBoundingRect(ct->getContour(),0);
				
				//build garbage List
			
				//printf(" c %d,%d\n",boundingRect.x,boundingRect.y);

				vision::MinimalBoundingRectangle * r = new vision::MinimalBoundingRectangle(boundingRect.x,
					boundingRect.y,boundingRect.width,boundingRect.height);



				vision::Garbage * aGarbage = new vision::Garbage(r);
//				printf("%d , %d - %d , %d\n",boundingRect.x,boundingRect.y,boundingRect.width,boundingRect.height);

				garbages.push_back(aGarbage);


			}

		delete ct;
		cvReleaseMemStorage( &aContour->storage );
		contours=contours->h_next;
		cont_index++;
	}

   // cvShowImage("output",contourImage);
   // cvWaitKey(0);
	delete h;
  
  if(contoursCopy!=NULL)
    cvReleaseMemStorage(&contoursCopy->storage);

	cvReleaseHist(&testImageHistogram);
	//Image for thresholding
	//cvReleaseMemStorage( &contours->storage );
	cvReleaseImage(&threshImage);
	cvReleaseImage(&equalizedImage);
	cvReleaseImage(&morphImage);
	cvReleaseImage(&smoothImage);
	cvReleaseImage(&contourImage);
	
	cvReleaseImage(&hsv);
	cvReleaseImage(&h_plane);
	cvReleaseImage(&s_plane);
	cvReleaseImage(&v_plane);

	return garbages;
}
Пример #15
0
  void VideoCapture::start()
  {
    loadConfig();

    if (useCamera) setUpCamera();
    if (useVideo)  setUpVideo();
    if (!capture)  std::cerr << "Capture error..." << std::endl;

    int input_fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
    std::cout << "input->fps:" << input_fps << std::endl;

    IplImage* frame1 = cvQueryFrame(capture);
    frame = cvCreateImage(cvSize((int)((frame1->width*input_resize_percent) / 100), (int)((frame1->height*input_resize_percent) / 100)), frame1->depth, frame1->nChannels);
    //cvCreateImage(cvSize(frame1->width/input_resize_factor, frame1->height/input_resize_factor), frame1->depth, frame1->nChannels);
    std::cout << "input->resize_percent:" << input_resize_percent << std::endl;
    std::cout << "input->width:" << frame->width << std::endl;
    std::cout << "input->height:" << frame->height << std::endl;

    double loopDelay = 33.333;
    if (input_fps > 0)
      loopDelay = (1. / input_fps)*1000.;
    std::cout << "loopDelay:" << loopDelay << std::endl;

    std::cout << "Press 'ESC' to stop..." << std::endl;
    bool firstTime = true;
    do
    {
      frameNumber++;

      frame1 = cvQueryFrame(capture);
      if (!frame1) break;

      cvResize(frame1, frame);

      if (enableFlip)
        cvFlip(frame, frame, 0);

      if (VC_ROI::use_roi == true && VC_ROI::roi_defined == false && firstTime == true)
      {
        VC_ROI::reset();

        do
        {
          cv::Mat img_input(frame);

          if (showOutput)
          {
            cv::imshow("Input", img_input);

            std::cout << "Set ROI (press ESC to skip)" << std::endl;
            VC_ROI::img_input1 = new IplImage(img_input);
            cvSetMouseCallback("Input", VC_ROI::VideoCapture_on_mouse, NULL);
            key = cvWaitKey(0);
            delete VC_ROI::img_input1;
          }
          else
            key = KEY_ESC;

          if (key == KEY_ESC)
          {
            std::cout << "ROI disabled" << std::endl;
            VC_ROI::reset();
            VC_ROI::use_roi = false;
            break;
          }

          if (VC_ROI::roi_defined)
          {
            std::cout << "ROI defined (" << VC_ROI::roi_x0 << "," << VC_ROI::roi_y0 << "," << VC_ROI::roi_x1 << "," << VC_ROI::roi_y1 << ")" << std::endl;
            break;
          }
          else
            std::cout << "ROI undefined" << std::endl;

        } while (1);
      }

      if (VC_ROI::use_roi == true && VC_ROI::roi_defined == true)
      {
        CvRect rect = cvRect(VC_ROI::roi_x0, VC_ROI::roi_y0, VC_ROI::roi_x1 - VC_ROI::roi_x0, VC_ROI::roi_y1 - VC_ROI::roi_y0);
        cvSetImageROI(frame, rect);
      }

      cv::Mat img_input(frame);

      if (showOutput)
        cv::imshow("Input", img_input);

      if (firstTime)
        saveConfig();

      start_time = cv::getTickCount();
      frameProcessor->process(img_input);
      int64 delta_time = cv::getTickCount() - start_time;
      freq = cv::getTickFrequency();
      fps = freq / delta_time;
      //std::cout << "FPS: " << fps << std::endl;

      cvResetImageROI(frame);

      key = cvWaitKey(loopDelay);
      //std::cout << "key: " << key << std::endl;

      if (key == KEY_SPACE)
        key = cvWaitKey(0);

      if (key == KEY_ESC)
        break;

      if (stopAt > 0 && stopAt == frameNumber)
        key = cvWaitKey(0);

      firstTime = false;
    } while (1);

    cvReleaseCapture(&capture);
  }
Пример #16
0
void MBLBPDetectSingleScale( const IplImage* img,
                             MBLBPCascade * pCascade,
                             CvSeq * positions, 
                             CvSize winStride)
{
    IplImage * sum = 0;
    int ystep, xstep, ymax, xmax;
    
    CV_FUNCNAME( "MBLBPDetectSingleScale" );

    __BEGIN__;


    if( !img )
        CV_ERROR( CV_StsNullPtr, "Null image pointer" );

    if( ! pCascade) 
        CV_ERROR( CV_StsNullPtr, "Invalid classifier cascade" );

    if( !positions )
        CV_ERROR( CV_StsNullPtr, "Null CvSeq pointer" );

    if(pCascade->win_width > img->width || 
       pCascade->win_height > img->height)
        return ;



    CV_CALL( sum = cvCreateImage(cvSize(img->width, img->height), IPL_DEPTH_32S, 1));
    myIntegral(img, sum);
    //cvIntegral(img, sum);
    UpdateCascade(pCascade, sum);

    ystep = winStride.height;
    xstep = winStride.width;
    ymax = img->height - pCascade->win_height -1;
	xmax = img->width  - pCascade->win_width -1;

#ifdef _OPENMP
    #pragma omp parallel for
#endif

	for(int iy = 0; iy < ymax; iy+=ystep)
    {
       for(int ix = 0; ix < xmax; ix+=xstep)
        {
            int w_offset = iy * sum->widthStep / sizeof(int) + ix;
			int result = DetectAt(pCascade, w_offset);
            if( result > 0)
            {
                //since the integral image is different with that of OpenCV,
                //update the position to OpenCV's by adding 1.
                CvPoint pt = cvPoint(ix+1, iy+1);
#ifdef _OPENMP
omp_set_lock(&lock); 
#endif
                cvSeqPush(positions, &pt);
#ifdef _OPENMP
omp_unset_lock(&lock);
#endif
			}
			if(result == 0)
			{
				ix += xstep;
			}
        }
    }

    __END__;

    cvReleaseImage(&sum);
    return ;
}
Пример #17
0
// update Gaussian Mixture Model
void GMM::update(IplImage* curr){

	if(bgModel == NULL){
	
		bgModel = cvCreateGaussianBGModel(curr, NULL);

	}else{
	
		// check channels
		IplImage* curr_gray;
		if(curr->nChannels == 3){
		
			curr_gray = cvCreateImage(cvGetSize(curr), IPL_DEPTH_8U, 1);
			cvCvtColor(curr, curr_gray, CV_RGB2GRAY);
			cvEqualizeHist(curr_gray, curr_gray);
		
		}else if(curr->nChannels == 1){
		
			curr_gray = curr;
			cvEqualizeHist(curr_gray, curr_gray);
		
		}else{
		
			// exception
		
		}

		//learning rate
		if(frameCnt++ <= learnCnt){
		
			cvUpdateBGStatModel(curr_gray, bgModel, -1); // learn
		
		}else{
		
			cvUpdateBGStatModel(curr_gray, bgModel, 0);
		
		}

		if(curr->nChannels == 3){
		
			cvReleaseImage(&curr_gray);
		
		}
	
	}

	if(fgclone){
	
		cvCopy(bgModel->foreground, fgclone, NULL);

		// remove salt & pepper noise
		cvSmooth(fgclone, fgclone, CV_MEDIAN, 5);
	
	}else{
	
		fgclone = cvCloneImage(bgModel->foreground);

		//remove salt & pepper noise
		cvSmooth(fgclone, fgclone, CV_MEDIAN, 5);
	
	}

}
Пример #18
0
CvSeq * MBLBPDetectMultiScale( const IplImage* img,
                               MBLBPCascade * pCascade,
                               CvMemStorage* storage, 
                               int scale_factor1024x,
                               int min_neighbors, 
                               int min_size,
							   int max_size)
{
    IplImage stub;
    CvMat mat, *pmat;
    CvSeq* seq = 0;
    CvSeq* seq2 = 0;
    CvSeq* idx_seq = 0;
    CvSeq* result_seq = 0;
    CvSeq* positions = 0;
    CvMemStorage* temp_storage = 0;
    CvAvgComp* comps = 0;
    
    CV_FUNCNAME( "MBLBPDetectMultiScale" );

    __BEGIN__;

    int factor1024x;
    int factor1024x_max;
    int coi;

    if( ! pCascade) 
        CV_ERROR( CV_StsNullPtr, "Invalid classifier cascade" );

    if( !storage )
        CV_ERROR( CV_StsNullPtr, "Null storage pointer" );

    CV_CALL( img = cvGetImage( img, &stub));
    CV_CALL( pmat = cvGetMat( img, &mat, &coi));

    if( coi )
        CV_ERROR( CV_BadCOI, "COI is not supported" );

    if( CV_MAT_DEPTH(pmat->type) != CV_8U )
        CV_ERROR( CV_StsUnsupportedFormat, "Only 8-bit images are supported" );

    if( CV_MAT_CN(pmat->type) > 1 )
    	CV_ERROR( CV_StsUnsupportedFormat, "Only single-channel images are supported" );

    min_size  = MAX(pCascade->win_width,  min_size);
	if(max_size <=0 )
		max_size = MIN(img->width, img->height);
	if(max_size < min_size)
		return NULL;

	CV_CALL( temp_storage = cvCreateChildMemStorage( storage ));
    seq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvRect), temp_storage );
    seq2 = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvAvgComp), temp_storage );
    result_seq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvAvgComp), storage );
    positions = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), temp_storage );

    if( min_neighbors == 0 )
        seq = result_seq;

    factor1024x = ((min_size<<10)+(pCascade->win_width/2)) / pCascade->win_width;
	factor1024x_max = (max_size<<10) / pCascade->win_width; //do not round it, to avoid the scan window be out of range

#ifdef _OPENMP
	omp_init_lock(&lock); 
#endif
    for( ; factor1024x <= factor1024x_max;
         factor1024x = ((factor1024x*scale_factor1024x+512)>>10) )
    {
        IplImage * pSmallImage = cvCreateImage( cvSize( ((img->width<<10)+factor1024x/2)/factor1024x, ((img->height<<10)+factor1024x/2)/factor1024x),
                                                IPL_DEPTH_8U, 1);
        try{
			cvResize(img, pSmallImage);
		}
		catch(...)
		{
			cvReleaseImage(&pSmallImage);
			return NULL;
		}
		
		
        CvSize winStride = cvSize( (factor1024x<=2048)+1,  (factor1024x<=2048)+1 );

		cvClearSeq(positions);

        MBLBPDetectSingleScale( pSmallImage, pCascade, positions, winStride);

        for(int i=0; i < (positions ? positions->total : 0); i++)
        {
            CvPoint pt = *(CvPoint*)cvGetSeqElem( positions, i );
            CvRect r = cvRect( (pt.x * factor1024x + 512)>>10,
                               (pt.y * factor1024x + 512)>>10,
                               (pCascade->win_width * factor1024x + 512)>>10,
                               (pCascade->win_height * factor1024x + 512)>>10);

            cvSeqPush(seq, &r);
        }

        cvReleaseImage(&pSmallImage);
    }
#ifdef _OPENMP
	omp_destroy_lock(&lock); 
#endif
  
    if( min_neighbors != 0 )
    {
        // group retrieved rectangles in order to filter out noise 
        int ncomp = cvSeqPartition( seq, 0, &idx_seq, (CvCmpFunc)is_equal, 0 );
        CV_CALL( comps = (CvAvgComp*)cvAlloc( (ncomp+1)*sizeof(comps[0])));
        memset( comps, 0, (ncomp+1)*sizeof(comps[0]));

        // count number of neighbors
        for(int i = 0; i < seq->total; i++ )
        {
            CvRect r1 = *(CvRect*)cvGetSeqElem( seq, i );
            int idx = *(int*)cvGetSeqElem( idx_seq, i );
            assert( (unsigned)idx < (unsigned)ncomp );

            comps[idx].neighbors++;
             
            comps[idx].rect.x += r1.x;
            comps[idx].rect.y += r1.y;
            comps[idx].rect.width += r1.width;
            comps[idx].rect.height += r1.height;
        }

        // calculate average bounding box
        for(int i = 0; i < ncomp; i++ )
        {
            int n = comps[i].neighbors;
            if( n >= min_neighbors )
            {
                CvAvgComp comp;
                comp.rect.x = (comps[i].rect.x*2 + n)/(2*n);
                comp.rect.y = (comps[i].rect.y*2 + n)/(2*n);
                comp.rect.width = (comps[i].rect.width*2 + n)/(2*n);
                comp.rect.height = (comps[i].rect.height*2 + n)/(2*n);
                comp.neighbors = comps[i].neighbors;

                cvSeqPush( seq2, &comp );
            }
        }

        // filter out small face rectangles inside large face rectangles
        for(int i = 0; i < seq2->total; i++ )
        {
            CvAvgComp r1 = *(CvAvgComp*)cvGetSeqElem( seq2, i );
            int j, flag = 1;

            for( j = 0; j < seq2->total; j++ )
            {
                CvAvgComp r2 = *(CvAvgComp*)cvGetSeqElem( seq2, j );
                int distance = (r2.rect.width *2+5)/10;//cvRound( r2.rect.width * 0.2 );
            
                if( i != j &&
                    r1.rect.x >= r2.rect.x - distance &&
                    r1.rect.y >= r2.rect.y - distance &&
                    r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance &&
                    r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance &&
                    (r2.neighbors > MAX( 3, r1.neighbors ) || r1.neighbors < 3) )
                {
                    flag = 0;
                    break;
                }
            }

            if( flag )
            {
                cvSeqPush( result_seq, &r1 );
                /* cvSeqPush( result_seq, &r1.rect ); */
            }
        }
    }   


    __END__;

    cvReleaseMemStorage( &temp_storage );
    cvFree( &comps );

    return result_seq;
}
Пример #19
0
void aplicar_waves (int tiempo, int cant_iteraciones, const char *nomb_impl, const char *nomb_arch_entrada, float x_scale, float y_scale, float g_scale) {
	IplImage *src = 0;
	IplImage *dst = 0;
	CvSize dst_size;

	// Cargo la imagen
	if( (src = cvLoadImage (nomb_arch_entrada, CV_LOAD_IMAGE_GRAYSCALE)) == 0 )
		exit(EXIT_FAILURE);

	dst_size.width = src->width;
	dst_size.height = src->height;

	// Creo una IplImage para cada salida esperada
	if( (dst = cvCreateImage (dst_size, IPL_DEPTH_8U, 1) ) == 0 )
		exit(EXIT_FAILURE);

	// Chequeo de parametros
	if (!(x_scale <=  32.0 && x_scale >= 0.0 &&
		  y_scale <=  32.0 && y_scale >= 0.0 &&
		  g_scale <= 255.0 && g_scale >= 0.0)) {
		imprimir_ayuda();

		cvReleaseImage(&src);
		cvReleaseImage(&dst);

		exit ( EXIT_SUCCESS );
	}

	typedef void (waves_fn_t) (unsigned char*, unsigned char*, int, int, int, float, float, float);

	waves_fn_t *proceso;

	if (strcmp(nomb_impl, "c") == 0) {
		proceso = waves_c;
	} else {
		proceso = waves_asm;
	}

	if (tiempo) {
		unsigned long long int start, end;

		MEDIR_TIEMPO_START(start);

		for(int i=0; i<cant_iteraciones; i++) {
			proceso((unsigned char*)src->imageData, (unsigned char*)dst->imageData, src->height, src->width, src->widthStep, x_scale, y_scale, g_scale);
		}

		MEDIR_TIEMPO_STOP(end);

		imprimir_tiempos_ejecucion(start, end, cant_iteraciones);
	} else {
		proceso((unsigned char*)src->imageData, (unsigned char*)dst->imageData, src->height, src->width, src->widthStep, x_scale, y_scale, g_scale);
	}

	// Guardo imagen y libero las imagenes
	char nomb_arch_salida[256];

	memset(nomb_arch_salida, 0, 256);

	sprintf(nomb_arch_salida, "%s.waves.x_scale-%3.2f.y_scale-%3.2f.g_scale-%3.2f.%s.bmp", nomb_arch_entrada, x_scale, y_scale, g_scale, nomb_impl);

	cvSaveImage(nomb_arch_salida, dst, NULL);

	cvReleaseImage(&src);
	cvReleaseImage(&dst);
}
Пример #20
0
int main(int argc, char* argv[]) {
    IplImage* img_8uc1 = NULL;


#ifdef IMAGE
    if( argc != 2 || !(img_8uc1 = cvLoadImage( argv[1], CV_LOAD_IMAGE_GRAYSCALE )) ){
        printf("%s image\n",argv[0]);
        return -1;
    }
#else
    CvCapture* capture = cvCreateFileCapture( argv[1] );
    IplImage* frame;
    if( argc != 2 || !(frame = cvQueryFrame( capture )) ){
        printf("%s image\n",argv[0]);
        return -1;
    }
#endif
  
    const char* name = "Edge Detection Window";
    cvNamedWindow( name, 0 );
    cvCreateTrackbar( "Contour perimeter", name, &high_switch_value, 100, switch_callback_h );
    cvCreateTrackbar( "Min area", name, &minimum_area, 100000, NULL);
    cvCreateTrackbar( "Max area", name, &maximum_area, 100000, NULL);

    while(1) {
#ifndef IMAGE
        frame = cvQueryFrame( capture );
        img_8uc1=cvCreateImage( cvGetSize(frame), 8, 1 );
        cvCvtColor(frame,img_8uc1,CV_BGR2GRAY);
#endif
        IplImage* img_edge = cvCreateImage( cvGetSize(img_8uc1), 8, 1 );
        IplImage* img_8uc3 = cvCreateImage( cvGetSize(img_8uc1), 8, 3 );
        cvThreshold( img_8uc1, img_edge, 128, 255, CV_THRESH_BINARY );
        CvMemStorage* storage = cvCreateMemStorage();
        CvSeq* first_contour = NULL;
        cvFindContours(
           img_edge,
           storage,
           &first_contour,
           sizeof(CvContour),
           CV_RETR_CCOMP
       );

       int n=0;
       cvCvtColor( img_8uc1, img_8uc3, CV_GRAY2BGR );
       CvSeq* contours=first_contour;
       while (contours) {
           double area=fabs(cvContourArea(contours, CV_WHOLE_SEQ));
           if(area < minimum_area || area > maximum_area) {
               contours = contours->h_next;
               continue;
           }
           CvSeq *result;
           double s,t;

           result = cvApproxPoly(contours, sizeof(CvContour), storage,
               CV_POLY_APPROX_DP, cvContourPerimeter(contours) * perimeter_constant, 0);

           if (result->total == 4 && cvCheckContourConvexity(result)) {
               s = 0;
               int i;
               for (i = 0; i < 5; i++) {
                   // find minimum angle between joint
                   // edges (maximum of cosine)
                   if (i >= 2) {
                       t = fabs(angle(
                           (CvPoint *) cvGetSeqElem(result, i),
                           (CvPoint *) cvGetSeqElem(result, i - 2),
                           (CvPoint *) cvGetSeqElem(result, i - 1)));
                       s = s > t ? s : t;
                   }
                }
                cvDrawContours(img_8uc3, contours, RED, BLUE, 0, 2, 8);
                // if cosines of all angles are small
                // (all angles are ~90 degree) then write quandrange
                // vertices to resultant sequence 
                // printf("s=%f\n",s);
	        if (s > 0.3) {
	  	/*for (i = 0; i < 4; i++) {
		    cvSeqPush(squares,(CvPoint *) cvGetSeqElem(result, i));
	        }*/
                }
            }
            contours = contours->h_next;
            n++;
        }
        cvShowImage( name, img_8uc3 );
        cvWaitKey(200);
        cvReleaseImage( &img_8uc3 );
        cvReleaseImage( &img_edge );
#ifndef IMAGE
        cvReleaseImage( &img_8uc1 );
#endif
    }
    cvDestroyWindow( argv[0] );
    return 0;
}
Пример #21
0
static void ocvThread(void){

	//if(cvDefined==FALSE){
		ttModels theModels;
		ttInit(&theModels);
		
	static GdkPixbuf *pixbuf;
	
	IplImage *theFrame, *segmented;
	static char theStr[12];
  thePixel=cvPoint(0,0);
  //globalFrame=cvCreateImage(size,IPL_DEPTH_8U,3);
	//char theChar;
	
	#if use_webcam==1
	CvCapture* theCamera;
	CvSize size=cvSize(justWidth,justHeight);
	theCamera=cvCaptureFromCAM(-1);
	cvSetCaptureProperty( theCamera,CV_CAP_PROP_FRAME_WIDTH,justWidth );
	cvSetCaptureProperty( theCamera,CV_CAP_PROP_FRAME_HEIGHT,justHeight );
  theFrame=cvCreateImage(size,IPL_DEPTH_8U,3);
	#else
	theFrame=cvLoadImage("images/image02.jpg",1);
	assert(theFrame!=NULL);
	justWidth=theFrame->width;
	justHeight=theFrame->height;
	CvSize size=cvSize(justWidth,justHeight);
		cvConvertImage(theFrame,theFrame,CV_CVTIMG_SWAP_RB);
	#endif
  segmented=cvCreateImage(size,IPL_DEPTH_8U,3);
		
	while (1){
	
	#if use_webcam==1
		theFrame=cvQueryFrame(theCamera);
		
		assert(theFrame!=NULL);
		cvConvertImage(theFrame,theFrame,CV_CVTIMG_SWAP_RB);
	#endif
	
		if(changeFlag==1){
			theRanger.hue=-1;
			theH=ttCalibration(theFrame,&thePixel,&theRanger,NULL);
			theRanger.hue=theH;
			changeFlag=0;
			//getIndex();
			//printf("%d\n",theIndex);
			//updateLimits();
		}
		ttCalibration(theFrame,&thePixel,&theRanger,segmented);
		sprintf(theStr,"Hue=%d",theH);
		getIndex();
		//cvShowImage("window",theImage);
		//theFrame=theImage;
		
		//cvWaitKey(5000);
		
 		gdk_threads_enter();
		pixbuf = gdk_pixbuf_new_from_data ((guchar*) theFrame->imageData,
																		GDK_COLORSPACE_RGB,
																		FALSE,
																		theFrame->depth,
																		theFrame->width,
																		theFrame->height,
																		(theFrame->widthStep),
																		NULL,
																		NULL); 
	
		                 
	//printf("\n\nchingadamadre!\n");CV_CVTIMG_SWAP_RB
    gtk_image_set_from_pixbuf(GTK_IMAGE(image), pixbuf);
    
    pixbuf = gdk_pixbuf_new_from_data ((guchar*) segmented->imageData,
																		GDK_COLORSPACE_RGB,
																		FALSE,
																		theFrame->depth,
																		theFrame->width,
																		theFrame->height,
																		(theFrame->widthStep),
																		NULL,
																		NULL); 
																		
    gtk_image_set_from_pixbuf(GTK_IMAGE(gseg), pixbuf);
    gtk_label_set_text((GtkLabel *)hval,theStr);
  	gdk_threads_leave();
    //cvWaitKey();
    #if use_webcam==0
	  g_usleep(50000);
	  #endif
	}
}
Пример #22
0
void ImageViewer::displayMatches(QPainter& painter)const
{
	QPoint pt1, pt2;

	if (siftObj1.keypoints==NULL){
		printf("ERROR : Keypoints NULL\n");
		exit(-1);
	}
		
	if (dispMatch && lastComparison.tab_match!=NULL && !siftObj1.IsEmpty() ){
		// Display matches
		for (int i=0;i<lastComparison.nb_match;i++) {
			pt1.setX(ROUND(lastComparison.tab_match[i].x1));
			pt1.setY(ROUND(lastComparison.tab_match[i].y1));
			pt2.setX(ROUND(lastComparison.tab_match[i].x2));
			pt2.setY(ROUND(lastComparison.tab_match[i].y2 + siftObj1.im->height));
			
			painter.setBrush(Qt::white);
			if (lastComparison.tab_match[i].id==0)
				painter.setPen(Qt::red); //red for discarded matches
			else painter.setPen(Qt::green); //green
			
			painter.drawLine(pt1, pt2);
			painter.drawEllipse(pt1, 3, 3);
			painter.drawEllipse(pt2, 3, 3);
		}
	}
	
	#ifdef AAA
		
	//IplImage * im,* imcol;
	QSize s;
	//QPoint pt1, pt2;
	//CvScalar color;
	int i,j,im2null=0;
	Keypoint k1*=siftObj1->keypoints;
	Keypoint k2*=siftObj2->keypoints;
		/*Affine transform of the image border*/
		
		if (param.size_m()>0) {
			Matrice p1(2,1), p2(2,1), p3(2,1), p4(2,1), transl(2,1);
			transl.set_val(0,0,0);
			transl.set_val(1,0,im1->height);
			p1.set_val(0,0,0);
			p1.set_val(1,0,0);
			p2.set_val(0,0,im1->width);
			p2.set_val(1,0,0);
			p3.set_val(0,0,im1->width);
			p3.set_val(1,0,im1->height);
			p4.set_val(0,0,0);
			p4.set_val(1,0,im1->height);
			
			p1=Transform(p1,param)+transl;
			p2=Transform(p2,param)+transl;
			p3=Transform(p3,param)+transl;
			p4=Transform(p4,param)+transl;
			
			color=CV_RGB(0,128,255); //light blue
			pt1.x=ROUND(p1.get_val(0,0));
			pt1.y=ROUND(p1.get_val(1,0));
			pt2.x=ROUND(p2.get_val(0,0));
			pt2.y=ROUND(p2.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			pt1.x=ROUND(p2.get_val(0,0));
			pt1.y=ROUND(p2.get_val(1,0));
			pt2.x=ROUND(p3.get_val(0,0));
			pt2.y=ROUND(p3.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			pt1.x=ROUND(p3.get_val(0,0));
			pt1.y=ROUND(p3.get_val(1,0));
			pt2.x=ROUND(p4.get_val(0,0));
			pt2.y=ROUND(p4.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			pt1.x=ROUND(p4.get_val(0,0));
			pt1.y=ROUND(p4.get_val(1,0));
			pt2.x=ROUND(p1.get_val(0,0));
			pt2.y=ROUND(p1.get_val(1,0));
			cvLine(imcol, pt1, pt2, color, 1);
			
			/* Draw the border of the object */
			CvMemStorage *storage= cvCreateMemStorage (0); /* Memory used by openCV */
			int header_size = sizeof( CvContour );
			CvSeq *contours;
			
			IplImage* imthres = cvCreateImage(cvSize(im1->width,im1->height),IPL_DEPTH_8U, 1 );
			cvCopy( im1, imthres, 0 );
			
			/* First find the contour of a thresholded image*/
			
			cvThreshold(imthres, imthres, border_threshold, 255, CV_THRESH_BINARY );
			cvFindContours ( imthres, storage, &contours, header_size, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
			
			/* For each contour found*/
			
			while ( contours != NULL) {
				double area=fabs(cvContourArea(contours,CV_WHOLE_SEQ)); // compute area
				if ( area > 20) {
					for (int i=0;i<contours->total;i++) {
						
						/* Compute transform of contour*/
						
						CvPoint* cvpt=(CvPoint*)cvGetSeqElem( contours, i);
						p1.set_val(0,0,cvpt->x);
						p1.set_val(1,0,cvpt->y);
						p1= Transform(p1,param) + transl;
						cvpt->x=ROUND(p1.get_val(0,0));
						cvpt->y=ROUND(p1.get_val(1,0));
					}
					//                cvDrawContours( imcol, contours, CV_RGB(0,0,255),CV_RGB(255,0,0),0,2,8);
					cvDrawContours( imcol, contours, CV_RGB(0,0,255),CV_RGB(0,0,255),0,2,8);
				}
				contours = contours->h_next; // ?????
			}
			free( contours );
			cvReleaseMemStorage( &storage );
			
	}
	#endif
}
Пример #23
0
int main(int argc, char* argv[])
{
	//---------------------服务器端-----------------------
	
	WSADATA wsadata;
	
	//WSA(Windows Sockets Asynchronous,Windows异步套接字)的启动命令
	WSAStartup(MAKEWORD(2,2), &wsadata);

	//创建套接字
	int fd = socket(AF_INET, SOCK_STREAM, 0);
	
	sockaddr_in myaddr;
	myaddr.sin_addr.s_addr = INADDR_ANY;
	myaddr.sin_family = AF_INET;
	myaddr.sin_port = htons(3001);

	//绑定到本机地址和端口上
	int e = bind(fd, (sockaddr*)&myaddr, sizeof(myaddr));
	
	if (e < 0)
	{
		printf("bind port 3001 error\n");
		closesocket(fd);
		return 1;
	}
	
	//监听,准备接受客户端请求
	listen(fd, 5);

	printf("recv data for show\n");

	cvWaitKey(1);
	
	while (true)
	{
		printf("wait for connecting\n");
		
		sockaddr_in faraddr;
		int size = sizeof(faraddr);
		
		int newfd = accept(fd, (sockaddr*)&faraddr, &size);
		
		if (newfd < 1)
		{
			printf("accept error %d\n", GetLastError());
			closesocket(fd);
			return 1;
		}
		printf("connected on\n");

		IplImage* imgs[10] = {0};
		bool windows[10] = {false};
		
		while (true)
		{
			bool hasError = false;
			
			HEAD head;
			
			for (int i=0; i<4; ++i)
			{
				int n = recv(newfd, ((char*)&head)+i, 1, 0);
				
				if (n != 1)
				{
					hasError = true;
					printf("recv error %d\n", GetLastError());
					break;
				}
			}
			
			if (hasError)
			{
				break;
			}
			
			head.headlen = ntohl(head.headlen);
			
			if (head.headlen > 1024 || head.headlen < 8)
			{
				printf("head.headlen error %d\n", head.headlen);
				break;
			}

			for (int i=0; i<head.headlen - 4; ++i)
			{
				int n = recv(newfd, ((char*)&head)+4+i, 1, 0);
				if (n != 1)
				{
					hasError = true;
					printf("recv error %d\n", GetLastError());
					break;
				}
			}
			if (hasError)
			{
				break;
			}
			
			head.h = ntohl(head.h);
			head.id = ntohl(head.id);
			head.w = ntohl(head.w);
			head.taillen = ntohl(head.taillen);
			
			if (head.taillen > 10*1024*1024 && head.taillen < 1)
			{
				printf("head.taillen error %d\n", head.taillen);
				break;
			}

			if (head.id >= sizeof(imgs)/sizeof(imgs[0]) || head.id < 0)
			{
				printf("id error %d\n", head.id);
				break;
			}
			if (head.taillen !=  (((head.w*3+3) & (~3)) * head.h) && head.taillen !=  (((head.w+3) & (~3)) * head.h) )
			{
				printf("w h tail error %d %d %d\n", head.w, head.h, head.taillen);
				break;
			}
				
			IplImage* img = imgs[head.id];
			
			int nChannels = (head.taillen / head.w / head.h) >= 3 ? 3 : 1;
			
			bool needCreate = false;
			needCreate = (NULL == img);
			
			if (!needCreate && (img->height != head.h || img->width != head.w || img->nChannels != nChannels))
			{
				needCreate = true;
				cvReleaseImage(&(imgs[head.id]));
				imgs[head.id] = img = NULL;
			}
			
			if (needCreate)
			{
				imgs[head.id] = img = cvCreateImage(cvSize(head.w, head.h), IPL_DEPTH_8U, nChannels);
			}

			int receivedLen = 0;
			int needLen = head.taillen;

			while (needLen > 0)
			{
				int n = recv(newfd, img->imageData + receivedLen, needLen, 0);
				if (n < 1)
				{
					printf("recv error line:%d error:%d\n", __LINE__, GetLastError());
					hasError = true;
					break;
				}
				receivedLen += n;
				needLen -= n;
			}
			if (hasError)
			{
				shutdown(newfd, 2);
				closesocket(newfd);
				break;
			}

			char windowName[10] = {0};
			
			windowName[0] = '0' + head.id;
			
			if (!windows[head.id])
			{
				windows[head.id] = true;
				
				//创建窗口用来显示图像
				cvNamedWindow(windowName);
			}
			
			//在指定窗口中显示图像 参数1-窗口的名字 参数2-被显示的图像
			cvShowImage(windowName, img);
			
			cvWaitKey(1);
		}
		shutdown(newfd, 2);
		closesocket(newfd);
		
		for (int i=0; i<sizeof(windows)/sizeof(windows[0]); ++i)
		{
			if (windows[i])
			{
				windows[i] = false;
				char buf[10] = {0};
				buf[0] = i+'0';
				cvDestroyWindow(buf);
			}
		}
	}
	
	return 0;
}
Пример #24
0
int main(int argc, char *argv[]) {
	/* initialisation of the parameters  */
    LOOV_params *param=alloc_init_LOOV_params();
    param = parse_arg(param, argc, argv);
	/* initialisation of the boxes sequences */
    CvMemStorage* storage_box = cvCreateMemStorage(0);
    CvSeq* seq_box = cvCreateSeq( 0, sizeof(CvSeq), sizeof(box*), storage_box);							// list of boxes that are shown in the current frame
    CvMemStorage* storage_box_final = cvCreateMemStorage(0);
    CvSeq* seq_box_final = cvCreateSeq( 0, sizeof(CvSeq), sizeof(box*), storage_box_final);				// boxes list  that no longer appear

    if (param->videoName==NULL) { fprintf(stderr,"enter video name after parameter -v\n"); exit(0); }
    CvCapture* capture = cvCaptureFromFile(param->videoName);    										// read video
    if (!capture)        { printf("error on video %s\n",param->videoName); exit(1); }
    cvSetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES, param->startFrame);  							// get video property
    IplImage* frame_temp = cvQueryFrame( capture );   													// get the first frame    

	/* computed parameters depending on the image size */
    int video_depth=1;    																				
    for (int i=0;i<frame_temp->depth;i++) video_depth=video_depth*2;									// find the max threshold
    param->max_thr = video_depth-1;
    param->it_connected_caractere = round_me((float)frame_temp->width*param->aspect_ratio*param->it_connected_caractere);       
    param->y_min_size_text = round_me((float)frame_temp->height*param->y_min_size_text);
    param->x_min_size_text = round_me((float)frame_temp->width*param->aspect_ratio*param->x_min_size_text);
	
	/* read mask image, to process only a part of the images */
    IplImage* frame=cvCreateImage(cvSize(frame_temp->width*param->aspect_ratio, frame_temp->height), frame_temp->depth, frame_temp->nChannels);
    cvResize(frame_temp, frame, CV_INTER_CUBIC);
    IplImage* im_mask=0;    
    if (param->path_im_mask!=NULL) {
        im_mask=cvLoadImage(param->path_im_mask, CV_LOAD_IMAGE_GRAYSCALE);
        if ((frame->width!=im_mask->width) || (frame->height!=im_mask->height)){
            IplImage* im_mask_resize = cvCreateImage(cvSize(frame->width, frame->height),im_mask->depth, 1);  // resize mask to the images video size
            cvResize(im_mask, im_mask_resize, CV_INTER_CUBIC);
            cvReleaseImage(&im_mask);
            im_mask = cvCloneImage(im_mask_resize);
            cvReleaseImage(&im_mask_resize);
        }
    }   
    
    printf("processing of frames from %d to %d\n", param->startFrame, param->startFrame+param->nbFrame);
    
    IplImage* frame_BW=cvCreateImage(cvSize(frame_temp->width*param->aspect_ratio, frame_temp->height), frame_temp->depth, 1);
    IplImage* frame_BW_temp=cvCreateImage(cvSize(frame_temp->width, frame_temp->height), frame_temp->depth, 1);   
    int frameNum=param->startFrame;
    while((frameNum<param->startFrame+param->nbFrame) && (frame_temp = cvQueryFrame( capture ))) {  // capture the current frame and put it in frame_temp
        frameNum++;
        if( frame_temp ) {	
            cvCvtColor(frame_temp, frame_BW_temp, CV_RGB2GRAY);			                            // convert frame from color to gray
            cvResize(frame_temp, frame, CV_INTER_CUBIC);                                            // resize for aspect ratio
            cvResize(frame_BW_temp, frame_BW, CV_INTER_CUBIC);
            cvCvtColor(frame, frame_BW, CV_RGB2GRAY);
			IplImage* im = cvCloneImage(frame_BW);			
            im = sobel_double_H(im, param);															// find edge of characters		
            if (param->path_im_mask!=NULL) cvAnd(im,im_mask,im, NULL);								// apply mask if it exists
            im = connected_caractere(im, param);													// connect edges of a same line
            im = delete_horizontal_bar(im, param);													// filter noise on the resulting image
            im = delete_vertical_bar(im, param);													// filter noise on the resulting image
            if (param->path_im_mask!=NULL) cvAnd(im,im_mask,im, NULL);								// apply mask if it exists
            spatial_detection_box(im, seq_box, frameNum, frame_BW, frame, frame, im_mask, param); 	// Detect boxes spatial position
            temporal_detection_box(seq_box, seq_box_final, frameNum, frame_BW, im_mask, param);     // Temporal tracking of the boxes
            cvReleaseImage(&im);
        }
    }     
    cvReleaseImage(&frame_BW);
    cvReleaseImage(&im_mask);

    /* finish the transcriptin of the boxes in seq_box */
    for (int i=0;i<seq_box->total;i++){
        box* pt_search_box = *(box**)cvGetSeqElem(seq_box, i);
        if (pt_search_box->stop_frame - pt_search_box->start_frame > param->min_duration_box) {         
            cvSeqPush(seq_box_final, &pt_search_box);                                               // copy boxes in seq_box_final
            cvSeqSort(pt_search_box->seq_thr_t, cmp_thr, 0);
            int* thr_med = (int*)cvGetSeqElem( pt_search_box->seq_thr_t, (int)(pt_search_box->nb_img_detect_avg_t/2) );   
            set_threshold_OCR_Image(pt_search_box->im_average_mask_t,*thr_med);                
            transcription_box(pt_search_box, param);                                                // process transcription of the boxes
            if (param->print_text == 1){                                                            // print transcription
                printf("box_%d img_avg ymin=%d ymax=%d xmin=%d xmax=%d " ,pt_search_box->num ,round_me(pt_search_box->ymin_avg), round_me(pt_search_box->xmin_avg), round_me(pt_search_box->ymax_avg), round_me(pt_search_box->xmax_avg));
                print_transcription_image(get_img_OCR_Image(pt_search_box->im_average_mask_t), round_me(pt_search_box->thr_med), param);
            }
        }
        else free_box(pt_search_box);
    }
            
    /* Write transcription in output_path+".OCR" file */
    char * file_txt_temp=sprintf_alloc("%s.OCR", param->output_path);
    FILE * file_txt = fopen(file_txt_temp, "w");
    free(file_txt_temp); 
    cvSeqSort( seq_box_final, cmp_box_by_frame, 0);
    for (int i=0;i<seq_box_final->total;i++){
        file_print_box(file_txt, *(box**)cvGetSeqElem(seq_box_final, i), param);   //
    }    
    fclose(file_txt);

    /* free memory */     
    for (int i=0;i<seq_box_final->total;i++){
        free_box(*(box**)cvGetSeqElem(seq_box_final, i));
    }    
    cvClearSeq(seq_box);
    cvReleaseMemStorage( &storage_box );
    cvReleaseImage(&im_mask);
    cvClearSeq(seq_box_final);
    cvReleaseMemStorage( &storage_box_final );
    cvReleaseCapture( &capture ); 

    return 0;
}
Пример #25
0
/**
   Initializes all variables that don't need to get updated for each flow calculation.
   Note: Not much error checking is done, all inputs should be > 0

   @param[in] width_in   Width of images that will be used for calculation
   @param[in] height_in   Height of images that will be used for calculation
   @param[in] max_level_in   The maximum level that will be reached in the multigrid algorithm, higher maximum level = coarser level reached
   @param[in] start_level_in   The starting level used as the base in the multigrid algorithm, higher start level = coarser starting level
   @param[in] n1_in   Number of pre-smoothing steps in the multigrid cycle
   @param[in] n2_in   Number of post-smoothing steps in the multigrid cycle
   @param[in] rho_in   Gaussian smoothing parameter
   @param[in] alpha_in   Regularisation parameter in the energy functional
   @param[in] sigma_in   Gaussian smoothing parameter

*/
VarFlow::VarFlow(int width_in, int height_in, int max_level_in, int start_level_in, int n1_in, int n2_in,
                float rho_in, float alpha_in, float sigma_in){
					
	max_level = max_level_in;
    start_level = start_level_in;
    
    if(max_level < start_level)
    {
        max_level = start_level;
	std::cout<<"Warning: input max_level < start_level, correcting (new value = "<<max_level<<")"<< std::endl;
    }
	
	//Width and height of the largest image in the multigrid cycle, based on external input image dimensions
	//and the desired starting level
	int width = (int)floor(width_in/pow((float)2.0,(float)(start_level)));
    int height = (int)floor(height_in/pow((float)2.0,(float)(start_level)));
    
    // start_level too large, correct it
    if(width < 1 || height < 1)
    {
        if(width < 1)
        {
              start_level	= (int)floor(log(static_cast<float>(width_in)) / log(2.0));
              width			= (int)floor(width_in / pow((float)2.0,(float)(start_level)));
              height		= (int)floor(height_in / pow((float)2.0,(float)(start_level)));
        }
        
        if(height < 1)
        {
              start_level   = (int)floor(log(static_cast<float>(height_in)) / log(2.0));
              width			= (int)floor(width_in/pow((float)2.0,(float)(start_level)));
              height		= (int)floor(height_in/pow((float)2.0,(float)(start_level)));
        }
    
        // Correct max_level as well
        max_level = start_level;
	std::cout<<"Warning: start_level too large, correcting start_level and max_level (new value = "<<start_level<<")"<< std::endl;
        
    }
    
    int width_end = (int)floor(width_in/pow((float)2.0,(float)(max_level)));
    int height_end = (int)floor(height_in/pow((float)2.0,(float)(max_level)));
    
    // max_level too large, correct it
    if(width_end < 1 || height_end < 1)
    {
        if(width_end < 1)
        {
              max_level = (int)floor(log(static_cast<float>(width_in)) / log(2.0));
              height_end = (int)floor(height_in/pow((float)2.0,(float)(max_level)));
        }
        
        if(height_end < 1)
        {
              max_level = (int)floor(log(static_cast<float>(height_in)) / log(2.0));
        }
        
	std::cout<<"Warning: max_level too large, correcting (new value = "<<max_level<<")"<< std::endl;
        
    }
          
             
    n1 = n1_in;
    n2 = n2_in;
    
    rho = rho_in;
    alpha = alpha_in;
    sigma = sigma_in;
    
    // Spacial derivative masks
    mask_x[0] = 0.08333;
    mask_x[1] = -0.66666;
    mask_x[2] = 0;
    mask_x[3] = 0.66666;
    mask_x[4] = -0.08333;
    
    mask_y[0] = -0.08333;
    mask_y[1] = 0.66666;
    mask_y[2] = 0;
    mask_y[3] = -0.66666;
    mask_y[4] = 0.08333;
    
    fx_mask = cvMat(1, 5, CV_32F, mask_x);
    fy_mask = cvMat(5, 1, CV_32F, mask_y);
    
    //Resized input images will be stored in these variables
    imgAsmall = cvCreateImage(cvSize(width, height), 8, 1);
    imgBsmall = cvCreateImage(cvSize(width, height), 8, 1);
    
    //Float representations of resized input images
    imgAfloat = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    imgBfloat = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    
    //Spacial and temporal derivatives of input image A
    imgAfx = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    imgAfy = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    imgAft = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
    
    //Arrays to hold images of various sizes used in the multigrid cycle
    imgAfxfx_array = new IplImage*[max_level-start_level+1];  
    imgAfxfy_array = new IplImage*[max_level-start_level+1];  
    imgAfxft_array = new IplImage*[max_level-start_level+1];  
    imgAfyfy_array = new IplImage*[max_level-start_level+1];  
    imgAfyft_array = new IplImage*[max_level-start_level+1];  
    
    imgU_array = new IplImage*[max_level-start_level+1];  
    imgV_array = new IplImage*[max_level-start_level+1];  
    imgU_res_err_array = new IplImage*[max_level-start_level+1];  
    imgV_res_err_array = new IplImage*[max_level-start_level+1];  

    int i;
    
    //Allocate memory for image arrays
    for(i = 0; i < (max_level-start_level+1); i++){
        
        imgAfxfx_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgAfxfy_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgAfxft_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgAfyfy_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgAfyft_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
    
        imgU_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgV_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgU_res_err_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
        imgV_res_err_array[i] = cvCreateImage(cvSize((int)floor(width/pow((float)2.0,(float)(i))),(int)floor(height/pow((float)2.0,(float)(i)))), IPL_DEPTH_32F, 1);
    
        cvZero(imgU_array[i]);
        cvZero(imgV_array[i]);
        cvZero(imgU_res_err_array[i]);
        cvZero(imgV_res_err_array[i]);
       
    }
    
    initialized = 1;
    
}
Пример #26
0
/* ///////////////////// chess_corner_test ///////////////////////// */
void CV_ChessboardDetectorTimingTest::run( int start_from )
{
    int code = CvTS::OK;

    /* test parameters */
    char   filepath[1000];
    char   filename[1000];

    CvMat*  _v = 0;
    CvPoint2D32f* v;

    IplImage* img = 0;
    IplImage* gray = 0;
    IplImage* thresh = 0;

    int  idx, max_idx;
    int  progress = 0;

    sprintf( filepath, "%scameracalibration/", ts->get_data_path() );
    sprintf( filename, "%schessboard_timing_list.dat", filepath );
    printf("Reading file %s\n", filename);
    CvFileStorage* fs = cvOpenFileStorage( filename, 0, CV_STORAGE_READ );
    CvFileNode* board_list = fs ? cvGetFileNodeByName( fs, 0, "boards" ) : 0;

    if( !fs || !board_list || !CV_NODE_IS_SEQ(board_list->tag) ||
        board_list->data.seq->total % 4 != 0 )
    {
        ts->printf( CvTS::LOG, "chessboard_timing_list.dat can not be readed or is not valid" );
        code = CvTS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    max_idx = board_list->data.seq->total/4;

    for( idx = start_from; idx < max_idx; idx++ )
    {
        int count0 = -1;
        int count = 0;
        CvSize pattern_size;
        int result, result1 = 0;

        const char* imgname = cvReadString((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4), "dummy.txt");
        int is_chessboard = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4+1), 0);
        pattern_size.width = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4 + 2), -1);
        pattern_size.height = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4 + 3), -1);
        
        ts->update_context( this, idx-1, true );

        /* read the image */
        sprintf( filename, "%s%s", filepath, imgname );
    
        img = cvLoadImage( filename );
        
        if( !img )
        {
            ts->printf( CvTS::LOG, "one of chessboard images can't be read: %s\n", filename );
            if( max_idx == 1 )
            {
                code = CvTS::FAIL_MISSING_TEST_DATA;
                goto _exit_;
            }
            continue;
        }

        ts->printf(CvTS::LOG, "%s: chessboard %d:\n", imgname, is_chessboard);

        gray = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        thresh = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );
        cvCvtColor( img, gray, CV_BGR2GRAY );
 

        count0 = pattern_size.width*pattern_size.height;

        /* allocate additional buffers */
        _v = cvCreateMat(1, count0, CV_32FC2);
        count = count0;

        v = (CvPoint2D32f*)_v->data.fl;

        int64 _time0 = cvGetTickCount();
        result = cvCheckChessboard(gray, pattern_size);
        int64 _time01 = cvGetTickCount();
        
        OPENCV_CALL( result1 = cvFindChessboardCorners(
                 gray, pattern_size, v, &count, 15 ));
        int64 _time1 = cvGetTickCount();

        if( result != is_chessboard )
        {
            ts->printf( CvTS::LOG, "Error: chessboard was %sdetected in the image %s\n", 
                       result ? "" : "not ", imgname );
            code = CvTS::FAIL_INVALID_OUTPUT;
            goto _exit_;
        }
        if(result != result1)
        {
            ts->printf( CvTS::LOG, "Warning: results differ cvCheckChessboard %d, cvFindChessboardCorners %d\n", 
                       result, result1);
        }
                
        int num_pixels = gray->width*gray->height;
        float check_chessboard_time = float(_time01 - _time0)/(float)cvGetTickFrequency(); // in us
        ts->printf(CvTS::LOG, "    cvCheckChessboard time s: %f, us per pixel: %f\n", 
                   check_chessboard_time*1e-6, check_chessboard_time/num_pixels);
        
        float find_chessboard_time = float(_time1 - _time01)/(float)cvGetTickFrequency();
        ts->printf(CvTS::LOG, "    cvFindChessboard time s: %f, us per pixel: %f\n",
                   find_chessboard_time*1e-6, find_chessboard_time/num_pixels);

        cvReleaseMat( &_v );
        cvReleaseImage( &img );
        cvReleaseImage( &gray );
        cvReleaseImage( &thresh );
        progress = update_progress( progress, idx-1, max_idx, 0 );
    }

_exit_:

    /* release occupied memory */
    cvReleaseMat( &_v );
    cvReleaseFileStorage( &fs );
    cvReleaseImage( &img );
    cvReleaseImage( &gray );
    cvReleaseImage( &thresh );

    if( code < 0 )
        ts->set_failed_test_info( code );
}
Пример #27
0
IplImage *detect_sunspots(IplImage *img)
{
    int se_size = INIT_SE_SIZE;
    int n_prev;
    int n = 0;

    IplImage *src;

    if (IPL_DEPTH_8U != img->depth || 1 != img->nChannels) {
        src = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
        cvConvertImage(img, src);
    } else {
        src = cvCloneImage(img);
    }

    IplImage *dst = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
    IplImage *prev = 0;
    //cvSmooth(src, src, CV_GAUSSIAN, 3);
    //IplConvKernel *k = create_se(3);
    //cvErode(src, src, k);
    //cvEqualizeHist(src, src);
    //cvThreshold(src, src, 100, 255, CV_THRESH_TRUNC);
    //cut<pixel_type>(src, 120);
    //cvShowImage("Original", src);

    do {
        if (prev)
            cvReleaseImage(&prev);

        prev = cvCloneImage(dst);
        n_prev = n;

        IplConvKernel *se = create_se(se_size);
        cvMorphologyEx(src, dst, NULL, se, CV_MOP_BLACKHAT);
        //printf("SE_SIZE = %d\n", se_size);
        cvReleaseStructuringElement(&se);

        IplImage *tmp = threshold(dst);
        cvReleaseImage(&dst);
        dst = tmp;

        n = count_pixels<pixel_type>(dst, 255);
        se_size += 2;
    } while (n > n_prev);

    cvReleaseImage(&dst);
    dst = prev;

#ifdef APPLY_POST_PROCESSING
    IplConvKernel *kernel = create_se(2);
#ifdef POST_USE_EROSION
    cvErode(dst, dst, kernel);
#else
    cvMorphologyEx(dst, dst, NULL, kernel, CV_MOP_OPEN, 1);
    //cvErode(dst, dst, kernel2);
#endif
    cvReleaseStructuringElement(&kernel);
#endif

    cvReleaseImage(&src);

    return dst;
}
Пример #28
0
IplImage * detect_and_draw( IplImage* img )
{
	static CvScalar colors[] =
		    {
		        {{0,0,255}},
		        {{0,128,255}},
		        {{0,255,255}},
		        {{0,255,0}},
		        {{255,128,0}},
		        {{255,255,0}},
		        {{255,0,0}},
		        {{255,0,255}}
		    };

		    IplImage *gray, *small_img;
		    int i, j;

		    gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
		    small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
		                         cvRound (img->height/scale)), 8, 1 );

		    cvCvtColor( img, gray, CV_BGR2GRAY );
		    cvResize( gray, small_img, CV_INTER_LINEAR );
		    cvEqualizeHist( small_img, small_img );
		    cvClearMemStorage( storage );

		    if( cascade )
		    {
		        double t = (double)cvGetTickCount();
		        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
		                                            1.1, 2, 0
		                                            //|CV_HAAR_FIND_BIGGEST_OBJECT
		                                            //|CV_HAAR_DO_ROUGH_SEARCH
		                                            |CV_HAAR_DO_CANNY_PRUNING
		                                            //|CV_HAAR_SCALE_IMAGE
		                                            ,
		                                            cvSize(30, 30) );
		        t = (double)cvGetTickCount() - t;
		        num=faces->total;
		        //printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
		        for( i = 0; i < (faces ? faces->total : 0); i++ )
		        {
		            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
		            CvMat small_img_roi;
		            CvSeq* nested_objects;
		            CvPoint center;
		            CvScalar color = colors[i%8];
		            int radius;
		            center.x = cvRound((r->x + r->width*0.5)*scale);
		            center.y = cvRound((r->y + r->height*0.5)*scale);
		            radius = cvRound((r->width + r->height)*0.25*scale);
		            Cx=center.x;
		            Cy=center.y;
		            R=radius;
		            cvCircle( img, center, radius, color, 3, 8, 0 );
		            if( !nested_cascade )
		                continue;
		            cvGetSubRect( small_img, &small_img_roi, *r );
		            nested_objects = cvHaarDetectObjects( &small_img_roi, nested_cascade, storage,
		                                        1.1, 2, 0
		                                        //|CV_HAAR_FIND_BIGGEST_OBJECT
		                                        //|CV_HAAR_DO_ROUGH_SEARCH
		                                        //|CV_HAAR_DO_CANNY_PRUNING
		                                        //|CV_HAAR_SCALE_IMAGE
		                                        ,
		                                        cvSize(0, 0) );
		            for( j = 0; j < (nested_objects ? nested_objects->total : 0); j++ )
		            {
		                CvRect* nr = (CvRect*)cvGetSeqElem( nested_objects, j );
		                center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
		                center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
		                radius = cvRound((nr->width + nr->height)*0.25*scale);
		                cvCircle( img, center, radius, color, 3, 8, 0 );
		            }
		        }
		    }
	   cvReleaseImage( &gray );
	   cvReleaseImage( &small_img );
	   //fp = fopen("/sdcard/test.jpg","w+");
	   return img;
}
Пример #29
0
std::list<Garbage*>
GarbageRecognition::garbageList(IplImage * src, IplImage * model) {



    std::list<Garbage*> garbageList;

    //cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
    //object model

    //image for the histogram-based filter
    //could be a parameter

    //~ cvNamedWindow("andImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("andSimage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("andSIImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("drawContours",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("andSThreshImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("threshImage",CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("andSequalizedImage",CV_WINDOW_AUTOSIZE);
    //~ cvNamedWindow("morphImage",CV_WINDOW_AUTOSIZE);

    utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
    CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

    //~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
    //~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);



    //gets a frame for setting  image size
    //CvSize srcSize = cvSize(frameWidth,frameHeight);
    CvSize srcSize = cvGetSize(src);

    //images for HSV conversion
    IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
    IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
    IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
    IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );


    //Image for thresholding
    IplImage * andImage=cvCreateImage(srcSize,8,1);

    IplImage * andSimage=cvCreateImage(srcSize,8,1);
    IplImage * andSThreshImage=cvCreateImage(srcSize,8,1);
    IplImage * andSequalizedImage=cvCreateImage(srcSize,8,1);
    IplImage * andSIImage=cvCreateImage(srcSize,8,1);

    //Image for thresholding
    IplImage * threshImage=cvCreateImage(srcSize,8,1);

    //image for equalization
    IplImage * equalizedImage=cvCreateImage(srcSize,8,1);

    //image for Morphing operations(Dilate-erode)
    IplImage * morphImage=cvCreateImage(srcSize,8,1);

    //image for image smoothing
    IplImage * smoothImage=cvCreateImage(srcSize,8,1);

    //image for contour-finding operations
    IplImage * contourImage=cvCreateImage(srcSize,8,3);


    int frameCounter=1;
    int cont_index=0;

    //convolution kernel for morph operations
    IplConvKernel* element;

    CvRect boundingRect;

    //contours
    CvSeq * contours;

    //Main loop


    //convert image to hsv
    cvCvtColor( src, hsv, CV_BGR2HSV );
    cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );


    /*I(x,y)blue ~ ((uchar*)(img->imageData + img->widthStep*y))[x*3]
    I(x,y)green ~ ((uchar*)(img->imageData + img->widthStep*y))[x*3+1]
    I(x,y)red ~ ((uchar*)(img->imageData + img->widthStep*y))[x*3+2]*/

    for(int x=0; x<640; x++) {
        for(int y=0; y<480; y++) {
            uchar * hue=&((uchar*) (h_plane->imageData+h_plane->widthStep*y))[x];
            uchar * sat=&((uchar*) (s_plane->imageData+s_plane->widthStep*y))[x];
            uchar * val=&((uchar*) (v_plane->imageData+v_plane->widthStep*y))[x];
            if((*hue>20 && *hue<40 && *sat>60))
                *hue=255;
            else
                *hue=0;
        }
    }
    cvAnd(h_plane, v_plane, andImage);
    cvAnd(h_plane, s_plane, andSimage);


    //apply morphologic operations
    element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
                                            MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
                                            CV_SHAPE_RECT, NULL);


    cvDilate(andImage,morphImage,element,MORPH_DILATE_ITER);
    cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);


    cvThreshold(morphImage,threshImage,120,255,CV_THRESH_BINARY);

    //get all contours
    contours=myFindContours(threshImage);
    //contours=myFindContours(smoothImage);


    cont_index=0;

    cvCopy(src,contourImage,0);



    while(contours!=NULL) {

        CvSeq * aContour=getPolygon(contours);
        utils::Contours * ct = new Contours(aContour);




        //apply filters


        if( ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER) &&
                ct->areaFilter(MINCONTOUR_AREA,MAXCONTOUR_AREA) &&
                //ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO) &&
                ct->boxAreaFilter(BOXFILTER_TOLERANCE) &&
                //ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN)&&
                1) {



            //get contour bounding box
            boundingRect=cvBoundingRect(ct->getContour(),0);
            cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
                        cvPoint(boundingRect.x+boundingRect.width,
                                boundingRect.y+boundingRect.height),
                        _GREEN,1,8,0);


            //if passed filters
            ct->printContour(3,cvScalar(127,127,0,0),
                             contourImage);

            std::vector<int> centroid(2);
            centroid=ct->getCentroid();


            //build garbage List
            utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
                    boundingRect.y,boundingRect.width,boundingRect.height);

            utils::Garbage * aGarbage = new utils::Garbage(r,centroid);

            garbageList.push_back(aGarbage);


        }

        delete ct;
        cvReleaseMemStorage( &aContour->storage );
        contours=contours->h_next;
        cont_index++;
    }

    cvShowImage("drawContours",contourImage);
    // cvWaitKey(0);
    delete h;


    cvReleaseHist(&testImageHistogram);
    //Image for thresholding
    //cvReleaseMemStorage( &contours->storage );
    cvReleaseImage(&threshImage);
    cvReleaseImage(&equalizedImage);
    cvReleaseImage(&morphImage);
    cvReleaseImage(&smoothImage);
    cvReleaseImage(&contourImage);

    cvReleaseImage(&hsv);
    cvReleaseImage(&h_plane);
    cvReleaseImage(&s_plane);
    cvReleaseImage(&v_plane);
    cvReleaseImage(&andImage);
    cvReleaseImage(&andSimage);
    cvReleaseImage(&andSThreshImage);
    cvReleaseImage(&andSequalizedImage);



    return garbageList;
}
Пример #30
0
void EyeManager::update_image(IplImage *img,double t)
{
	int i, j, k;
	IplImage *color = cvCreateImage(cvGetSize(img),8, 3);
	cvSet(color, cvScalar(0, 255, 0));
//	cvCopy(img, color, eye);

	int height = cvGetSize(img).height;
	int width = cvGetSize(img).width;
	/*
	for(i=0;i<src.size();i++)
	{
		vector2f tv = d[i]*t + src[i];
		int i0, j0, i1, j1;
		i0 = (int)tv.x;	i1 = i0 + 1;
		j0 = (int)tv.y;	j1 = j0 + 1;
		if(i0<0)i0=0;if(i0>=height)i0=height-1;
		if(i1<0)i1=0;if(i1>=height)i1=height-1;
		if(j0<0)j0=0;if(j0>=width)j0=width-1;
		if(j1<0)j1=0;if(j1>=width)j1=width-1;

		double pp,pq,qp,qq;
		pp = (tv.x - i0);
		pq = -(tv.x - i1);
		qp = (tv.y - j0);
		qq = -(tv.y - j1);

		vector3f col = vector3f(src_color[i].val[0],src_color[i].val[1],src_color[i].val[2]);
		a[i0][j0] += pq*qq*col;
		a[i0][j1] += pq*qp*col;
		a[i1][j0] += pp*qq*col;
		a[i1][j1] += pp*qp*col;
		b[i0][j0] += 1;
		b[i0][j1] += 1;
		b[i1][j0] += 1;
		b[i1][j1] += 1;
	}
	int cnt=0;
	for(i=0;i<height;i++)
		for(j=0;j<width;j++)
		{
			if(b[i][j]>0.5)
				cnt++;
		}
	for(i=0;i<height;i++)
		for(j=0;j<width;j++)
		{
			a[i][j] *= (double)cnt/src.size();
			if(b[i][j]>0.5)
			cvSet2D(color,i, j, cvScalar(a[i][j].x, a[i][j].y, a[i][j].z));
		}
	*/
	if(!ijk_init)
	{
		ijk = new vector<int>[eye_vec.size()];
		for(int ii=0;ii<eye_vec.size();ii++)
		{
				i = eye_vec[ii].x;
				j = eye_vec[ii].y;
				for(k=0; k<src.size();k++)
				{
					vector2f e = vector2f(i - src[k].x, j - src[k].y);
					if(abs(e.x*d[k].y - e.y*d[k].x)/(d[k].dist()+0.001) < 0.1 * (e.dist() + 0.001))
						ijk[ii].push_back(k);
				}
		}
		ijk_init = true;
	}

	for(int ii=0;ii<eye_vec.size();ii++)
			{
				i = eye_vec[ii].x;
				j = eye_vec[ii].y;
				bool flag = false;
				double b;
				vector3f a;
				b=0;
				for(int kk=0; kk<ijk[ii].size();kk++)
				{
					k = ijk[ii][kk];
					vector2f e = vector2f(i - src[k].x, j - src[k].y);
					double dt = e*d[k]/d[k].dist();
					if(dt < t * d[k].dist())
					{
					}
					else// if(dt <= d[k].dist())
					{
						{
							flag = true;
							if(dt < (t)*d[k].dist() + 1.5)
							{
								vector2f tv = d[k]*t + src[k];
								vector3f col = vector3f(src_color[k].val[0],src_color[k].val[1],src_color[k].val[2]);
//								printf("%d %d %d  %lf %lf %lf\n",i,j,k, col.x, col.y, col.y);
								tv.x -= i;
								tv.y -= j;
								a += col / (tv.dist() + 0.001); 
								b += 1/(tv.dist()+0.001);
							}
						}
					}
				}
				if(flag)
				{
					cvSet2D(color, i, j, cvGet2D(img,i,j));
				}
				else	// sal
				{
		// 			printf("%lf %lf %lf\n",sal_col.x, sal_col.y, sal_col.z);
					cvSet2D(color, i, j,  cvScalar(sal_col.x,sal_col.y,sal_col.z));
				}
				if(b > E) // nun
				{
					a /= b;
					cvSet2D(color, i, j,  cvScalar(a.x,a.y,a.z));
				}
			}

	cvCopy(color, img, eye);
	cvReleaseImage(&color);
}