Example #1
0
int main(){
    Tserial *com;
    char ch;
    com = new Tserial();
    com->connect("COM3", 4800, spNONE);
    CvCapture *capture = 0;
    IplImage  *frame = 0;
    int       key = 0;
    com->sendChar('a');
    int rpx,rpy,ryx,ryy,bx,by,rpx1,rpy1,ryx1,ryy1,bx1,by1;
    double theta1=0.1,theta2=0.1;
    /* initialize camera */
    capture = cvCaptureFromCAM(0);
    cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1024 );

    cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 720 );

    /* always check */
    if ( !capture ) {
        fprintf( stderr, "Cannot open initialize webcam!\n" );
        return 1;
    }
 
    /* create a window for the video */
    cvNamedWindow( "image", CV_WINDOW_AUTOSIZE );
 
    while( key != 'q' ) {
        /* get a frame */
        img = cvQueryFrame( capture );
      
        /* always check */
        if( !img ) break;
        
        img0=cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
        cvCvtColor(img,img0,CV_BGR2HSV);
        // cvSetMouseCallback( "image", mouseHandler, img0 );
      // cvThreshold(img0, img0, 85, 255, CV_THRESH_BINARY);
       
        /* display curent frame */
        cvShowImage( "image", img0 );
        rpx=corp(img0,1);
        ryx=cory(img0,1);
        bx=corr(img0,1);
        rpy=corp(img0,0);
        ryy=cory(img0,0);
        by=corr(img0,0);
        rpx1=rpx-ryx;
        rpy1=rpx-ryy;
        bx1=bx-ryx;
        by1=by-ryy;
        theta1=atan((double)rpy1/(double)rpx1);
        theta2=atan((double)by1/(double)bx1);
        if(theta1>0 && theta1-theta2>0 && rpx1>0)
        com->sendChar('r');
        else if(theta1<=0 && M_PI+theta1-theta2>0)
        com->sendChar('l');
        else if(theta1<0 && theta2>=0 && rpx<ryx)
        com->sendChar('r');
        else if(theta1>0 && theta1-theta2<0)
        com->sendChar('l');
        else if(theta1>0 && theta1-theta2>0 && rpx1<0)
        com->sendChar('l');
        else if(theta1-theta2==0.0 && rpx1*bx1>0)
        com->sendChar('f');
        else if(theta1-theta2==0.0 && rpx1*bx1<0){
        com->sendChar('r');
        cvWaitKey(5);
        }
        /* exit if user press 'q' */
        key = cvWaitKey( 1 );
        cvReleaseImage(&img0);
        
            }


    /* free memory */
    cvDestroyWindow( "image" );
    cvReleaseCapture( &capture );
    com->disconnect();
    return 0;
}
Example #2
0
 int main() {
  CvPoint pt1b,pt2b, pt1t,pt2t,ptarry[4];
  int tempwidth,tempheight;
  CvRect regt,rectROIbot,rectROItop;
  rectROItop=cvRect(0,0,80,10);
  rectROIbot=cvRect(0,50,80,10);
  CvPoint b_cir_center,t_cir_center;
  CvPoint frame_center;
  CvPoint A,B,C,D;
  CvPoint temp;
  double angle,spinsize;
  int cir_radius=1; 
  int frame_width=160, frame_height=120;
  IplImage* frame;
  IplImage* threshframe;
  IplImage* hsvframe;
  IplImage* threshframebot;
  IplImage* threshframetop;
  IplImage* modframe;
  IplImage* dilframetop;
  IplImage* dilframebot;
  int release=0, rmax=100;
  int modfheight, modfwidth;
  int_serial();
  unsigned char sendBuf;
   CvCapture* capture = cvCaptureFromCAM( -1 );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
  cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   while ( 1 ) {
     // Get one frame
      frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }

     modfheight = frame->height;
     modfwidth = frame->width;
      modframe = cvCreateImage(cvSize((int)(modfwidth/moddiv),(int)(modfheight/moddiv)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
      hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
      threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
    // cvInRangeS(hsvframe,cvScalar(0, 180, 140),cvScalar(15, 230, 235),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame) red
     cvInRangeS(hsvframe,cvScalar(100, 20, 40),cvScalar(140, 120, 100),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     
      threshframebot=cvCloneImage(threshframe);
      cvSetImageROI(threshframebot,rectROIbot);

      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
 
//////////////////////////////////////////////////////////////////////////////////////////
   if (seq==0) {
     threshframebot=cvCloneImage(threshframe);
     cvSetImageROI(threshframebot,rectROIbot);
     dilframebot = cvCreateImage(cvGetSize(threshframebot),8,1);
     cvDilate(threshframebot,dilframebot,NULL,2); //cvDilate(input frame,

     CBlobResult blobs_bot;
     blobs_bot = CBlobResult(dilframebot,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_bot.Filter(blobs_bot,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_bot;
     blobs_bot.GetNthBlob(CBlobGetArea(),0,biggestblob_bot); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1b.x = biggestblob_bot.MinX()*moddiv;
     pt1b.y = biggestblob_bot.MinY()*moddiv+100;
     pt2b.x = biggestblob_bot.MaxX()*moddiv;
     pt2b.y = biggestblob_bot.MaxY()*moddiv+100;
     b_cir_center.x=(pt1b.x+pt2b.x)/2;
     b_cir_center.y=(pt1b.y+pt2b.y)/2;//}
//////////////////////////////////////////////////////////////////////////////////////////
//    if(seq==seqdiv){
      threshframetop=cvCloneImage(threshframe);
      cvSetImageROI(threshframetop,rectROItop);
      dilframetop = cvCreateImage(cvGetSize(threshframetop),8,1);
     cvDilate(threshframetop,dilframetop,NULL,2); //cvDilate(input frame,
     CBlobResult blobs_top;
     blobs_top = CBlobResult(dilframetop,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs_top.Filter(blobs_top,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob_top;
     blobs_top.GetNthBlob(CBlobGetArea(),0,biggestblob_top); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1t.x = biggestblob_top.MinX()*moddiv;
     pt1t.y = biggestblob_top.MinY()*moddiv;
     pt2t.x = biggestblob_top.MaxX()*moddiv;
     pt2t.y = biggestblob_top.MaxY()*moddiv;
     t_cir_center.x=(pt1t.x+pt2t.x)/2;
     t_cir_center.y=(pt1t.y+pt2t.y)/2;// }
//////////////////////////////////////////////////////////////////////////////////////
//   if(seq==seqdiv+1) {
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
     A.x=frame_center.x-4;
     A.y=frame_center.y;
     B.x=frame_center.x+4;
     B.y=frame_center.y;
     C.y=frame_center.y-4;
     C.x=frame_center.x;
     D.y=frame_center.y+4;
     D.x=frame_center.x;
     cvRectangle(frame,pt1t,pt2t,cvScalar(255,0,0),1,8,0);
     cvRectangle(frame,pt1b,pt2b,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
     //cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0);
     cvCircle( frame, b_cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle
     cvLine(frame, A, B,cvScalar(255,0,255),2,8,0);
     cvLine(frame, C, D,cvScalar(255,0,255),2,8,0);
    

    if (b_cir_center.x!=0&&b_cir_center.y!=100) 
    {
    cvLine(frame, b_cir_center, frame_center,cvScalar(0,255,0),1,8,0);
    sendchar(253);sendchar(255);sendchar(255);
    sendchar(254);sendchar(b_cir_center.x);sendchar(b_cir_center.y);
    printf("top:(255, 255);  bottom: (%3d, %3d)\n",b_cir_center.x,b_cir_center.y);
    }

    if(t_cir_center.x!=0&&t_cir_center.y!=0)
     {
     cvLine(frame, frame_center, t_cir_center,cvScalar(255,255,0),1,8,0);
    sendchar(253);sendchar(t_cir_center.x);sendchar(t_cir_center.y);
    sendchar(254);sendchar(255); sendchar(255);
    printf("top:(%3d, %3d);  bottom: (255, 255)\n",t_cir_center.x,t_cir_center.y);

    }

     if ((b_cir_center.x!=0&&b_cir_center.y!=100)&&(t_cir_center.x!=0&&t_cir_center.y!=0)) 
     {
     cvLine(frame, b_cir_center, t_cir_center,cvScalar(0,255,255),1,8,0);
     printf("top:(%3d, %3d);  bottom: (%3d, %3d)\n",t_cir_center.x,t_cir_center.y,b_cir_center.x,b_cir_center.y);
     sendchar(253);sendchar(t_cir_center.x); sendchar(t_cir_center.y);
     sendchar(254);sendchar(b_cir_center.x);sendchar(b_cir_center.y);
     }
		
    }
    seq++;
    seq=seq%(seqdiv+1);
     cvShowImage( "mywindow", frame); // show output image
//     cvShowImage( "bot", threshframebot);
//     cvShowImage( "top", threshframetop);
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27  ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   //v4l.flush();
   cvDestroyWindow( "mywindow" );
   
return 0;
 }
Example #3
0
int main (int argc, const char * argv[]) {
    int w=1280,h=720;
    int i=0;
    int nkeypoints=0;
    int press=0;
    char img2_file[] = "/Users/quake0day/ana2/MVI_0124_QT 768Kbps_012.mov";
    vl_bool render=1;
    vl_bool first=1;
    VlSiftFilt * myFilter=0;
    VlSiftKeypoint const* keys;
    //CvCapture * camera = cvCreateCameraCapture (CV_CAP_ANY);
    CvCapture * camera = cvCreateFileCapture(img2_file);
    vl_sift_pix *descriptorsA, *descriptorsB;
    int ndescA=0, ndescB=0;

    //DescriptorA file
    int dscfd;
    struct stat filestat;
    dscfd = open("/Users/quake0day/ana2/saveC.jpg.dsc", O_RDONLY, 0644);
    fstat(dscfd, &filestat);
    int filesize=filestat.st_size;
    descriptorsA=(vl_sift_pix*)mmap(0, filesize, PROT_READ, MAP_SHARED, dscfd, 0);
    ndescA=(filesize/sizeof(vl_sift_pix))/DESCSIZE;
    printf("number of descriptors: %d\n", ndescA);

    //Build kdtreeA
    VlKDForest *myforest=vl_kdforest_new(VL_TYPE_FLOAT, DESCSIZE, 1);
    vl_kdforest_build(myforest, ndescA, descriptorsA);

    //DescriptorsB file
    dscfd=open("/Users/quake0day/ana2/saveD.jpg.dsc", O_RDONLY, 0644);
    fstat(dscfd, &filestat);
    filesize=filestat.st_size;
    descriptorsB=(vl_sift_pix*)mmap(0, filesize, PROT_READ, MAP_SHARED, dscfd, 0);
    ndescB=(filesize/sizeof(vl_sift_pix))/DESCSIZE;
    printf("number of descriptors: %d\n", ndescB);

    //Build kdtreeB
    VlKDForest *myforestB=vl_kdforest_new(VL_TYPE_FLOAT, DESCSIZE, 1);
    vl_kdforest_build(myforestB, ndescB, descriptorsB);

    //Neighbors
    VlKDForestNeighbor *neighbors=(VlKDForestNeighbor*)malloc(sizeof(VlKDForestNeighbor));
    VlKDForestNeighbor *neighborsB=(VlKDForestNeighbor*)malloc(sizeof(VlKDForestNeighbor));

    //Image variables
    vl_sift_pix* fim;
    int err=0;
    int octave, nlevels, o_min;
    cvNamedWindow("Hello", 1);

    //For text
    CvFont font;
    double hScale=2;
    double vScale=2;
    int    lineWidth=2;
    cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth, 1);

    IplImage *myCVImage=cvQueryFrame(camera);//cvLoadImage("2.jpg", -1);

    IplImage *afterCVImage=cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
    IplImage *resizingImg=cvCreateImage(cvSize(w, h), myCVImage->depth, myCVImage->nChannels);
    octave=2;
    nlevels=5;
    o_min=1;
    myFilter=vl_sift_new(w, h, octave, nlevels, o_min);
    vl_sift_set_peak_thresh(myFilter, 0.5);
    fim=malloc(sizeof(vl_sift_pix)*w*h);
    float thre;


    while (myCVImage) {
        dprintf("%d*%d\n",myCVImage->width,myCVImage->height);

        cvResize(myCVImage, resizingImg, CV_INTER_AREA);
        dprintf("resized scale:%d*%d\n",myCVImage->width,myCVImage->height);
        if (press=='s') {
            cvSaveImage("save.jpg", resizingImg);
        }
        cvConvertImage(resizingImg, afterCVImage, 0);


        for (i=0; i<h; i++) {
            for (int j=0; j<w; j++) {
                fim[i*w+j]=CV_IMAGE_ELEM(afterCVImage,uchar,i,j);
            }
        }

        //vl_sift_set_peak_thresh(myFilter, 0.5);
        //vl_sift_set_edge_thresh(myFilter, 10.0);
        first=1;
        while (1) {
            printf("~~~~~~~~~~start of octave~~~~~~~~~~~~\n");


            if (first) {
                first=0;
                thre=0.25;
                err=vl_sift_process_first_octave(myFilter, fim);
            }
            else {
                thre=0.05;
                err=vl_sift_process_next_octave(myFilter);
            }
            if (err) {
                err=VL_ERR_OK;
                break;
            }

            printf("Octave: %d\n", vl_sift_get_octave_index(myFilter));
            vl_sift_detect(myFilter);
            nkeypoints=vl_sift_get_nkeypoints(myFilter);
            dprintf("insider numkey:%d\n",nkeypoints);
            keys=vl_sift_get_keypoints(myFilter);
            dprintf("final numkey:%d\n",nkeypoints);

            int countA=0, countB=0;
            int matchcountA=0, matchcountB=0;
            float avgA=0, avgB=0;
            if (render) {
                for (i=0; i<nkeypoints; i++) {
                    //cvCircle(resizingImg, cvPoint(keys->x, keys->y), keys->sigma, cvScalar(100, 255, 50, 0), 1, CV_AA, 0);
                    dprintf("x:%f,y:%f,s:%f,sigma:%f,\n",keys->x,keys->y,keys->s,keys->sigma);

                    double angles [4] ;
                    int nangles ;


                    /* obtain keypoint orientations ........................... */
                    nangles=vl_sift_calc_keypoint_orientations(myFilter, angles, keys);

                    /* for each orientation ................................... */
                    for (int q = 0 ; q < (unsigned) 1 ; ++q)
                    {
                        vl_sift_pix descr [128] ;


                        /* compute descriptor (if necessary) */
                        vl_sift_calc_keypoint_descriptor(myFilter, descr, keys, angles[q]);

                        for (int j=0; j<128; j++)
                        {
                            descr[j]*=512.0;
                            descr[j]=(descr[j]<255.0)?descr[j]:255.0;
                        }

                        vl_kdforest_query(myforest, neighbors, 1, descr);
                        vl_kdforest_query(myforestB, neighborsB, 1, descr);
                        if (neighbors->distance<50000.0)
                        {
                            matchcountA++;
                            cvCircle(resizingImg, cvPoint(keys->x, keys->y), keys->sigma, cvScalar(100, 0, 0, 255), 1, CV_AA, 0);

                        }

                        if (neighborsB->distance<50000.0)
                        {
                            matchcountB++;
                            cvCircle(resizingImg, cvPoint(keys->x, keys->y), keys->sigma, cvScalar(0, 50, 255, 100), 1, CV_AA, 0);

                        }

                        countA++;
                        avgA+=neighbors->distance;
                        countB++;
                        avgB+=neighborsB->distance;

                    }
                    keys++;
                }
            }
            avgA=avgA/countA;
            float percentage=((float)matchcountA*2)/ndescA;
            printf("Percentage:%f\n", percentage);
            printf("avg:%f\n",avgA);
            printf("thre==%f\n", thre);
            if (percentage>=thre) {
                printf("A shows!!!\n");
                cvPutText (resizingImg, "A shows!!",cvPoint(50, 100), &font, cvScalar(0,255,255,0));

            }

            avgB=avgB/countB;
            percentage=((float)matchcountB*2.5)/ndescB;
            printf("Percentage:%f\n", percentage);
            printf("avg:%f\n",avgB);
            printf("thre==%f\n", thre);
            if (percentage>=thre) {
                printf("B shows!!!\n");
                cvPutText (resizingImg, "B shows!!",cvPoint(400, 100), &font, cvScalar(0,255,255,0));

            }
            printf("~~~~~~~~~~~end of octave~~~~~~~~~~~~\n");
        }

        cvShowImage("Hello", resizingImg);

        myCVImage = cvQueryFrame(camera);

        press=cvWaitKey(1);
        if( press=='q' )
            break;
        else if( press=='r' )
            render=1-render;
    }
    free(fim);
    free(neighbors);
    free(neighborsB);
    cvReleaseImage(&afterCVImage);
    cvReleaseImage(&resizingImg);
    cvReleaseImage(&myCVImage);

    return 0;
}
Example #4
0
/**
 * @brief Main principal
 * @param argc El número de argumentos del programa
 * @param argv Cadenas de argumentos del programa
 * @return Nada si es correcto o algún número negativo si es incorrecto
 */
int main( int argc, char** argv ) {
	
	if( argc < 4 )
		return -1;

	// Declaración de variables
	gsl_rng *rng;
	IplImage *frame, *hsv_frame;
	float **ref_histos, histo_aux[1][HTAM];
	CvCapture *video;
	particle **particles, **aux, **nuevas_particulas;
	CvScalar color_rojo = CV_RGB(255,0,0), color_azul = CV_RGB(0,0,255);
	float factor = 1.0 / 255.0, sum = 0.0f;
	CvRect *regions;
	int num_objects = 0;
	int MAX_OBJECTS = atoi(argv[3]), PARTICLES = atoi(argv[2]);
	FILE *datos;
	char name[45], num[3], *p1, *p2;
	clock_t t_ini, t_fin;
	double ms;
	
	video = cvCaptureFromFile( argv[1] );
	if( !video ) {
		printf("No se pudo abrir el fichero de video %s\n", argv[1]);
		exit(-1);
	}

	int nFrames = (int) cvGetCaptureProperty( video , CV_CAP_PROP_FRAME_COUNT );
	first_frame = cvQueryFrame( video );
	num_objects = get_regions( &regions,  MAX_OBJECTS, argv[1] );
	if( num_objects == 0 )
		exit(-1);

	t_ini = clock();
	// Inicializamos el generador de números aleatorios
	gsl_rng_env_setup();
	rng = gsl_rng_alloc( gsl_rng_mt19937 );
	gsl_rng_set(rng, (unsigned long) time(NULL));

	hsv_frame = bgr2hsv( first_frame );
	nuevas_particulas = (particle**) malloc( num_objects * sizeof( particle* ) );
	for( int j = 0; j < num_objects; ++j )
		nuevas_particulas[j] = (particle*) malloc( PARTICLES * sizeof( particle ) );
	
	// Computamos los histogramas de referencia y distribuimos las partículas iniciales
	ref_histos = compute_ref_histos( hsv_frame, regions, num_objects );
	particles = init_distribution( regions, num_objects, PARTICLES );

	// Reservamos memoria para la tabla de bins precomputada
	int **tabla_bin = (int**) malloc( hsv_frame->height * sizeof(int*) );
	for(int f = 0; f < hsv_frame->height; ++f)
		tabla_bin[f] = (int*) malloc( hsv_frame->width * sizeof(int) );

	// Mostramos el tracking
	if( show_tracking ) {

		// Mostramos todas las partículas
		if( show_all )
			for( int k = 0; k < num_objects; ++k )
				for( int j = 0; j < PARTICLES; ++j )
					display_particle( first_frame, particles[k][j], color_azul );

		// Dibujamos la partícula más prometedora de cada objeto
		for( int k = 0; k < num_objects; ++k )
			display_particle( first_frame, particles[k][0], color_rojo );

		cvNamedWindow( "RGB", 1 );
		cvShowImage( "RGB", first_frame );
		cvWaitKey( 5 );
	}

	// Exportamos los histogramas de referencia y los frames
	if( exportar ) {
		export_ref_histos( ref_histos, num_objects );
		export_frame( first_frame, 1 );

		for( int k = 0; k < num_objects; ++k ) {
			sprintf( num, "%02d", k );
			strcpy( name, REGION_BASE);
			p1 = strrchr( argv[1], '/' );
			p2 = strrchr( argv[1], '.' );
			strncat( name, (++p1), p2-p1 );
			strcat( name, num );
			strcat( name, ".txt" );
			datos = fopen( name, "a+" );
			if( ! datos ) {
				printf("Error creando fichero para datos\n");
				exit(-1);;
			}
			fprintf( datos, "%d\t%f\t%f\n", 0, particles[k][0].x, particles[k][0].y );
			fclose( datos );
		}
	}

	// Bucle principal!!
	for(int i = 1; i < nFrames; ++i) {

		// Recordar que frame no se puede liberar debido al cvQueryFrame
		frame = cvQueryFrame( video );

		for( int f = 0 ; f < hsv_frame->height; ++f ) {
			float *ptrHSV = (float*) ( hsv_frame->imageData + hsv_frame->widthStep*f );
			unsigned char *ptrRGB = (unsigned char*) ( frame->imageData + frame->widthStep*f );
			float h;
			for( int col = 0, despH = 0; col < hsv_frame->width; ++col, despH+=3 ) {
				int despS = despH+1;
				int despV = despH+2;
				float b = ptrRGB[despH] * factor;
				float g = ptrRGB[despS] * factor;
				float r = ptrRGB[despV] * factor;
				float min = MIN(MIN(b, g), r);
				float max = MAX(MAX(b, g), r);
				ptrHSV[despV] = max; // v
				if( max != min ) {
					float delta = max - min;
					ptrHSV[despS] = delta / max; // s = (max - min) / max = 1.0 - (min / max)
					if( r == max )
						h = ( g - b ) / delta;
					else if( g == max )
						h = 2.0f + (( b - r ) / delta);
					else
						h = 4.0f + (( r - g ) / delta);
					h *= 60.0f;
					if(h < 0.0f)
						h += 360.0f;
					ptrHSV[despH] = h; // h
				}
				else {
					ptrHSV[despH] = 0.0f; // h
					ptrHSV[despS] = 0.0f; // s
				}

				// Aprovechamos las iteraciones del bucle para construir la tabla de bins precomputada
				int hd, sd, vd;
	
				// Si S o V es menor que el umbral, devuelve un bin "colorless"
				vd = MIN( (int)(ptrHSV[despV] * NV / V_MAX), NV-1 );
				if( ptrHSV[despS] < S_THRESH  ||  ptrHSV[despV] < V_THRESH )
					tabla_bin[f][col] = NH * NS + vd;

				// En otro caso se debermina un bin "colorful"
				else {	
					hd = MIN( (int)(ptrHSV[despH] * NH / H_MAX), NH-1 );
					sd = MIN( (int)(ptrHSV[despS] * NS / S_MAX), NS-1 );
					tabla_bin[f][col] = sd * NH + hd;
				}
			}
		}

		// Realizamos la predicción y medición de probabilidad para cada partícula
		for( int k = 0; k < num_objects; ++k ) {

			sum = 0.0f;
			for( int j = 0; j < PARTICLES; ++j ) {
				transition( &particles[k][j], frame->width, frame->height, rng );
				particles[k][j].w = likelihood( hsv_frame, &particles[k][j], ref_histos[k], histo_aux[0], tabla_bin );
				sum += particles[k][j].w;
			}
		
			// Normalizamos los pesos y remuestreamos un conjunto de partículas no ponderadas
			for( int j = 0; j < PARTICLES; ++j )
				particles[k][j].w /= sum;
		}

		// Remuestreamos un conjunto de partículas no ponderadas
		for (int k = 0; k < num_objects; ++k )
			resample( particles[k], PARTICLES, nuevas_particulas[k] );

		aux = particles;
		particles = nuevas_particulas;
		nuevas_particulas = aux;

		// Mostramos el tracking
		if( show_tracking ) {

			// Mostramos todas las partículas
			if( show_all )
				for( int k = 0; k < num_objects; ++k )
					for( int j = 0; j < PARTICLES; ++j )
						display_particle( frame, particles[k][j], color_azul );

			// Dibujamos la partícula más prometedora de cada objeto
			for( int k = 0; k < num_objects; ++k )
				display_particle( frame, particles[k][0], color_rojo );
			cvNamedWindow( "RGB", 1 );
			cvShowImage( "RGB", frame );
			cvWaitKey( 5 );
		}

		// Exportamos los histogramas de referencia y los frames
		if( exportar ) {
			export_frame( frame, i+1 );

			for( int k = 0; k < num_objects; ++k ) {
				sprintf( num, "%02d", k );
				strcpy( name, REGION_BASE);
				p1 = strrchr( argv[1], '/' );
				p2 = strrchr( argv[1], '.' );
				strncat( name, (++p1), p2-p1 );
				strcat( name, num );
				strcat( name, ".txt" );
				datos = fopen( name, "a+" );
				if( ! datos ) {
					printf("Error abriendo fichero para datos\n");
					exit(-1);
				}
				fprintf( datos, "%d\t%f\t%f\n", i, particles[k][0].x, particles[k][0].y );
				fclose( datos );
			}
		}
	}
	
	// Liberamos todos los recursos usados (mallocs, gsl y frames)
	for (int i = 0; i < hsv_frame->height; ++i)
		free( tabla_bin[i] );

	cvReleaseImage( &hsv_frame );
	cvReleaseCapture( &video );
	gsl_rng_free( rng );
	free( regions );

	for( int i = 0; i < num_objects; ++i ) {
		free( ref_histos[i] );
		free( particles[i] );
		free( nuevas_particulas[i] );
	}

	free( tabla_bin );
	free( particles );
	free( nuevas_particulas );

	t_fin = clock();
	ms = ((double)(t_fin - t_ini) / CLOCKS_PER_SEC) * 1000.0;
	printf("%d\t%d\t%.10g\n", PARTICLES, num_objects, ms);
}
Example #5
0
void camera_atualiza(camera *cam) {
  IplImage *image = cvQueryFrame(cam->capture);

  camera_converte(cam, image);
}
int main(int argc, char* argv[])
{
    // Default capture size - 640x480<br />
    CvSize size = cvSize(640,480);
    // Open capture device. 0 is /dev/video0, 1 is /dev/video1, etc.
    CvCapture* capture = cvCaptureFromCAM( 0 );
    if( !capture )
    {
        fprintf( stderr, "ERROR: capture is NULL \n" );
        getchar();
        return -1;
    }

    // Create a window in which the captured images will be presented
    cvNamedWindow( "Camera", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "HSV", CV_WINDOW_AUTOSIZE );
    cvNamedWindow( "EdgeDetection", CV_WINDOW_AUTOSIZE );

    // Detect a red ball
    CvScalar hsv_min = cvScalar(150, 84, 130, 0);
    CvScalar hsv_max = cvScalar(358, 256, 255, 0);
    IplImage* hsv_frame    = cvCreateImage(size, IPL_DEPTH_8U, 3);
    IplImage* thresholded  = cvCreateImage(size, IPL_DEPTH_8U, 1);

    while( 1 )
    {
        // Get one frame
        IplImage* frame = cvQueryFrame( capture );
        if( !frame )
        {
              fprintf( stderr, "ERROR: frame is null...\n" );
              getchar();
              break;
        }
        // Covert color space to HSV as it is much easier to filter colors in the HSV color-space.
        cvCvtColor(frame, hsv_frame, CV_BGR2HSV);
        // Filter out colors which are out of range.
        cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
        // Memory for hough circles
        CvMemStorage* storage = cvCreateMemStorage(0);
        // hough detector works better with some smoothing of the image
        cvSmooth( thresholded, thresholded, CV_GAUSSIAN, 9, 9 );
        CvSeq* circles = cvHoughCircles(thresholded, storage, CV_HOUGH_GRADIENT, 2,
                                        thresholded->height/4, 100, 50, 10, 400);

        for (int i = 0; i < circles->total; i++)
        {
            float* p = (float*)cvGetSeqElem( circles, i );
            printf("Ball! x=%f y=%f r=%f\n\r", p[0],p[1],p[2] );
            cvCircle( frame, cvPoint(cvRound(p[0]),cvRound(p[1])),
                                    3, CV_RGB(0,255,0), -1, 8, 0 );
            cvCircle( frame, cvPoint(cvRound(p[0]),cvRound(p[1])),
                                    cvRound(p[2]), CV_RGB(255,0,0), 3, 8, 0 );
        }

        cvShowImage( "Camera", frame ); // Original stream with detected ball overlay
        cvShowImage( "HSV", hsv_frame); // Original stream in the HSV color space
        cvShowImage( "After Color Filtering", thresholded ); // The stream after color filtering</p>
        cvReleaseMemStorage(&storage);
        // Do not release the frame!
        //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
        //remove higher bits using AND operator
        if( (cvWaitKey(10) & 255) == 27 ) break;
    }
     // Release the capture device housekeeping
     cvReleaseCapture( &capture );
     cvDestroyWindow( "mywindow" );
     return 0;
   }
Example #7
0
int main(void){

	IplImage *theFrame, *theFrame2, *theFrame3;
	CvCapture *theCam=cvCreateCameraCapture(-1);
	char theChar;
	CvSize size;
	CvPoint theCentroid;
	CvSeq* theContour;
	
	ttModels theModels;
	ttInit(&theModels);
	
	if(!theCam) {
		printf("\nCamera not found\n");
		return(0);
	}
	/*theFrame=cvLoadImage("colormap.png",1);
	theFrame2=cvLoadImage("colormap.png",1);//*/
	size=cvSize(cvGetCaptureProperty(theCam,CV_CAP_PROP_FRAME_WIDTH),cvGetCaptureProperty(theCam,CV_CAP_PROP_FRAME_HEIGHT));
	theFrame2=cvCreateImage(size,IPL_DEPTH_8U,1);
	theFrame3=cvCreateImage(size,IPL_DEPTH_8U,3);
	
	cvNamedWindow("win1",1);
	cvNamedWindow("win2",1);
	cvMoveWindow("win1",0,0);
	cvMoveWindow("win2",700,0);
	/*cvNamedWindow("H",0);
	cvNamedWindow("S",0);
	cvNamedWindow("V",0);
	cvMoveWindow("H",0,500);
	cvMoveWindow("S",350,500);
	cvMoveWindow("V",700,500);//*/
	while ((theChar=cvWaitKey(20))!=0x1B){
		theFrame=cvQueryFrame(theCam);
		cvCopy(theFrame,theFrame3,NULL);
		cvZero(theFrame2);
		ttSegmenter(theFrame,theFrame2,1);
		ttImprover(theFrame2,theFrame2);
		//printf("nchannels %d\n",theFrame2->nChannels);
		//printf("chingao!\n");
		cvShowImage("win2",theFrame2);
		theContour=ttContours(theFrame2,2,&theModels,NULL);
		
		//cvZero(out);
		if (theContour==NULL)
			continue;
		theCentroid=ttFindCentroid(theContour);
		cvCircle(theFrame3,theCentroid,1,CV_RGB(255,255,255),1,8,0);
		cvCircle(theFrame3,theCentroid,6,CV_RGB(255,0,0),1,8,0);
		//cvCircle(theFrame3,theCentroid,11,CV_RGB(255,255,255),1,8,0);
		//cvCircle(theFrame3,theCentroid,16,CV_RGB(255,0,0),1,8,0);
		//cvCircle(theFrame3,ttFindCentroid(theContour),15,CV_RGB(255,255,255),1,8,0);
		cvDrawContours(theFrame3,theContour,CV_RGB(255,255,255),CV_RGB(255,255,255),1,5,8,cvPoint(0,0));
		cvShowImage("win1",theFrame3);//*/
	}
	cvDestroyAllWindows();
	cvReleaseData(theFrame);
	cvReleaseData(theFrame2);
	cvReleaseData(theFrame3);
	cvReleaseCapture(&theCam);
	return(0);
}
Example #8
0
int main(){
  
      CvCapture* capture =0;       
      capture = cvCaptureFromCAM(0);

      if(!capture){
printf("Capture failure\n");
return -1;
      }
      
      frame=0;
      frame = cvQueryFrame(capture);           
      if(!frame) return -1;
  
     //create a blank image and assigned to 'imgTracking' which has the same size of original video
     imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);

     cvZero(imgTracking); //covert the image, 'imgTracking' to black

     cvNamedWindow("Video");     
     cvNamedWindow("Ball");

      //iterate through each frames of the video     
      while(true){

            frame = cvQueryFrame(capture);           
            if(!frame) break;
            frame=cvCloneImage(frame); 
            
           cvSmooth(frame, frame, CV_GAUSSIAN,3,3); //smooth the original image using Gaussian kernel

IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); 

            cvCvtColor(frame, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV

IplImage* imgThresh = GetThresholdedImage(imgHSV, 0, 143, 149, 6, 245, 256);
	    
	     cvSmooth(imgThresh, imgThresh, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel
	     
	     trackObject(imgThresh, 255, 0, 0, 1);

	     
          	
IplImage* imgHSV2 = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3); 
	    
	      cvCvtColor(frame, imgHSV2, CV_BGR2HSV);

IplImage* imgThresh2 = GetThresholdedImage(imgHSV2, 26, 61, 152, 79, 166, 248);

 		cvSmooth(imgThresh2, imgThresh2, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel

		trackObject(imgThresh2, 0, 255, 0, 2);

            // Add the tracking image and the frame

          cvAdd(frame, imgTracking, frame);

          cvShowImage("Ball", imgThresh); 
	  cvShowImage("Ball2", imgThresh2);            
          cvShowImage("Video", frame);
           
           //Clean up used images
           cvReleaseImage(&imgHSV);
	   cvReleaseImage(&imgHSV2);
           cvReleaseImage(&imgThresh);
	   cvReleaseImage(&imgThresh2);             
           cvReleaseImage(&frame);

            //Wait 10mS
            int c = cvWaitKey(10);
            //If 'ESC' is pressed, break the loop
            if((char)c==27 ) break;      
      }

      cvDestroyAllWindows() ;
      cvReleaseImage(&imgTracking);
      cvReleaseCapture(&capture);     

      return 0;
}
Example #9
0
IplImage * VideoReader::getImage(void)
{
    return cvQueryFrame(m_video);
}
Example #10
0
int shapeDetector(BoundingBox* result, CvCapture* capture, int numberOfIntervals){

    int numberOfWindows = 0;
    int interval, start_t=45, end_t, span_t=65;
    int w_counter=0;
    int threshold[100];
    int i,j;
	int frameCounter=0;
    int totalNumberOfPatterns=0;
    int numberOfReducedPatterns=0;


    char dynamicThresholding=1;
    char run=1;
    char showWindow [100];
    char got_result = 0; //Used to know if we had great success of just got canceled

    struct Metric metric[100];
    IplImage* imgGrayScale;
    IplImage* img;

    Pattern allPatterns[100];
    Pattern reducedPatterns[10];

    CvPoint s_list[4];

    CvSeq*   contourArray[100];
    CvMemStorage *storage = cvCreateMemStorage(0); //storage area for all contours

    box = result;

    srand(time(NULL));

    for(i=0; i<100; i++) {
        showWindow[i] = 0;
    }

    //*********************  SET UP IMAGES AND DISPLAY WINDOWS  ***********************
    //*********************************************************************************

    img = cvQueryFrame(capture);
    imgGrayScale = cvCreateImage(cvGetSize(img), 8, 1);

    switch( numberOfWindows ) {
        case 3:
            cvNamedWindow("Threshold 2", CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL);
        case 2:
            cvNamedWindow("Threshold 3", CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL);
        case 1:
            cvNamedWindow("Threshold 1", CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL);
    }

    cvNamedWindow("Tracked", CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_NORMAL);
    cvCreateTrackbar("Threshold lower",   "Tracked", &start_t, 255,    NULL);
    cvCreateTrackbar("Threshold upper",   "Tracked", &end_t,  255,    NULL);
    //---------------------------------------------------------------------------------

    span_t = end_t - start_t;
    interval = span_t/numberOfIntervals;

    for(i=0; i<numberOfIntervals; i++){
        threshold[i] = start_t+((i+1)*interval);
    }

    while(run){ //Main loop

        //*********************  IMAGE PRE-PROCESSING  ****************************
	frameCounter++;
        img = cvQueryFrame(capture);

        //converting the original image into grayscale
        cvCvtColor(img,imgGrayScale,CV_BGR2GRAY);

        //---------------------------------------------------------------------------

        // Awesome shapeProcessing function calls
        for(i=0; i<numberOfIntervals; i++){
            metric[i] = shapeProcessing(img, imgGrayScale, threshold[i]);
                
	        //Append patterns found in the layers to allPatterns list
	        for(j=0; j<metric[i].numberOfPatterns; j++){
	        
	            allPatterns[totalNumberOfPatterns] = metric[i].p_list[j];
	            totalNumberOfPatterns++;
	        }            
        }


        // Reduce patterns
        numberOfReducedPatterns = reducePatterns(allPatterns, reducedPatterns, totalNumberOfPatterns);
    
        for(i=0; i<numberOfReducedPatterns; i++){
            drawRect(reducedPatterns[i].cv_rect, img);
        }        

        if(numberOfReducedPatterns == 4){
        
            findBox_r(&reducedPatterns[0], &s_list[0]);

            box->topLeft = s_list[0];
            box->topRight = s_list[1];
            box->bottomLeft = s_list[2];
            box->bottomRight = s_list[3];
            got_result = 1;
            run = 0;
        }        
    
        // Adjust thresholds 
        if(dynamicThresholding) {
            setThresholds(&threshold[0], &metric[0], numberOfIntervals);
        }
        else{
            span_t = end_t - start_t;
            interval = span_t/numberOfIntervals;

            for(i=0; i<numberOfIntervals; i++){
                threshold[i] = start_t+((i+1)*interval);
            }
        }

        numberOfReducedPatterns=0;
        totalNumberOfPatterns=0;

        //show the image in which identified shapes are marked   
        cvShowImage("Tracked",img);

        int input;
        input = cvWaitKey(10) & 0xff; //wait for a key press. also needed to repaint windows. Masks away bits higher then interesting

        switch(input){

            case 27: //esc-key
            case 'q':
            case 'e': run=0; break;
            case 'd': draw = !draw; break;
        }

    } //end of main while(run) loop

    //cleaning up
    switch( numberOfWindows ) {
        case 3:
            cvDestroyWindow("Threshold 2");
        case 2:
            cvDestroyWindow("Threshold 3");
        case 1:
            cvDestroyWindow("Threshold 1");
    }
    cvDestroyWindow("Tracked");
    cvReleaseMemStorage(&storage);
    cvReleaseImage(&imgGrayScale);
    //cvReleaseImage(&img);
    printf("Number of frames: %d\n", frameCounter);
    return got_result;
}
Example #11
0
const IplImage * Camera::GetFrame() {
    return cvQueryFrame(camera);
}
Example #12
0
int main( int argc, char** argv )
{
    IplImage *current_frame=NULL;
	CvSize size;
	size.height = 300; size.width = 200;
	IplImage *corrected_frame = cvCreateImage( size, IPL_DEPTH_8U, 3 );
	IplImage *labelled_image=NULL;
	IplImage *vertical_edge_image=NULL;
    int user_clicked_key=0;
    
    // Load the video (AVI) file
    CvCapture *capture = cvCaptureFromAVI( "./Postboxes.avi" );
    // Ensure AVI opened properly
    if( !capture )
		return 1;    
    
    // Get Frames Per Second in order to playback the video at the correct speed
    int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
    
	// Explain the User Interface
    printf( "Hot keys: \n"
		    "\tESC - quit the program\n"
            "\tSPACE - pause/resume the video\n");

	CvPoint2D32f from_points[4] = { {3, 6}, {221, 11}, {206, 368}, {18, 373} };
	CvPoint2D32f to_points[4] = { {0, 0}, {200, 0}, {200, 300}, {0, 300} };
	CvMat* warp_matrix = cvCreateMat( 3,3,CV_32FC1 );
	cvGetPerspectiveTransform( from_points, to_points, warp_matrix );

	// Create display windows for images
	cvNamedWindow( "Input video", 0 );
	cvNamedWindow( "Vertical edges", 0 );
    cvNamedWindow( "Results", 0 );

	// Setup mouse callback on the original image so that the user can see image values as they move the
	// cursor over the image.
    cvSetMouseCallback( "Input video", on_mouse_show_values, 0 );
	window_name_for_on_mouse_show_values="Input video";

    while( user_clicked_key != ESC ) {
		// Get current video frame
        current_frame = cvQueryFrame( capture );
		image_for_on_mouse_show_values=current_frame; // Assign image for mouse callback
        if( !current_frame ) // No new frame available
			break;

		cvWarpPerspective( current_frame, corrected_frame, warp_matrix );

		if (labelled_image == NULL)
		{	// The first time around the loop create the image for processing
			labelled_image = cvCloneImage( corrected_frame );
			vertical_edge_image = cvCloneImage( corrected_frame );
		}
		check_postboxes( corrected_frame, labelled_image, vertical_edge_image );

		// Display the current frame and results of processing
        cvShowImage( "Input video", current_frame );
        cvShowImage( "Vertical edges", vertical_edge_image );
        cvShowImage( "Results", labelled_image );
        
        // Wait for the delay between frames
        user_clicked_key = cvWaitKey( 1000 / fps );
		if (user_clicked_key == ' ')
		{
			user_clicked_key = cvWaitKey(0);
		}
	}
    
    /* free memory */
    cvReleaseCapture( &capture );
    cvDestroyWindow( "video" );
 
    return 0;
}
Example #13
0
// Primary function
double Camera::getCameraHeading(bool &coneExists)
{
	coneExists = 0;
	// Take a picture
	frame = cvQueryFrame(capture);


	//time_t timeval;


	// Set up
	data = (uchar *)frame->imageData;
	datar = (uchar *)result->imageData;

	// Save the initial picture
	//cvSaveImage("picture.jpeg",frame);
	// r 255
	// g 117
	// b 0

	int idealRed = 255;
	int idealGreen = 117;
	int idealBlue = 10;

	//int redRange = 150;
	//int greenRange = 20;
	//int blueRange = 60;	// need 100 for sun directly behind cone


	//  pixel must have a r value > idealRed - redRange
	//                  a g value < idealGreen + greenRange
	//		    a b value < idealBlue + blueRange


	// Iterate through every pixel looking for rgb values within each range
	for(int i = 0; i < (frame->height); i++) {
		for(int j = 0; j < (frame->width); j++) {
			if((data[i*frame->widthStep+j*frame->nChannels+2] > (idealRed-redRange)) && 		// red value > 255-125
			   (data[i*frame->widthStep+j*frame->nChannels+1] < (idealGreen+greenRange)) && 	// green value < 117+40
			   (data[i*frame->widthStep+j*frame->nChannels]   < (idealBlue+blueRange))) 		// blue value < 0 + 100
				datar[i*result->widthStep+j*result->nChannels] = 255;
			else
				datar[i*result->widthStep+j*result->nChannels] = 0;
		}
	}



	//std::cout << "Color change complete.\n";

	/* Apply erosion and dilation to eliminate some noise and even out blob */
	if(erosion >= 0) {
		cvErode(result,result,0,erosion);
	}
	if(dilation >= 0) {
		cvDilate(result,result,0,dilation);
	}

	//std::cout << "Erosion and dilation complete.\n";

	/* FindContours should not alter result (its const in the function declaration), but it does...
	This function looks for contours (edges of polygons) on the already monochrome image */
	cvFindContours(result,storage,&contours);

	/* Draw the contours on contourimage */
	if(contours) {
		cvDrawContours(contourimage,contours,cvScalarAll(255),cvScalarAll(255),100);
	}

	//std::cout << "Contour drawing complete.\n";

	//time(&timeval);
	//std::string filename("boxes.jpeg");
	//filename = filename + ctime(&timeval);
	//cvSaveImage(filename.c_str(),contourimage);
//	cvSaveImage("boxes.jpeg",contourimage);




	//std::cout << "Countour image saved.\n";

	/* Calculate the bounding rectangle */
	bound = cvBoundingRect(contourimage,0);

	//std::cout << "Bounding rectangle computed.\n";

	/* Reset the contourimage image (otherwise contourimage turns into an Etch A Sketch) */
	if(contours) {
		//delete contours;
		//contours = new CvSeq;
		//cvZero(contours);
		cvClearSeq(contours);
	}

	cvZero(contourimage);

	//std::cout << "Countour image zeroed.\n";

	/* Calculate the bounding rectangle's top-left and bottom-right vertex */
	p1.x = bound.x;
	p2.x = bound.x + bound.width;
	p1.y = bound.y;
	p2.y = bound.x + bound.height;

	//std::cout << "Bound calculations complete.\n";





	/* Check if there is a rectangle in frame */
	if (p1.x == 0 && p1.y == 0) {


		//cvSaveImage("picture.jpeg",frame);
		cvReleaseCapture(&capture);
//		adjustment = std::numeric_limits<double>::quiet_NaN();
		adjustment = 0;
		coneExists = 0;
		return adjustment;

	} else {


		// Draw the bounding rectangle on the original image
		cvRectangle(frame,p1,p2,CV_RGB(255,0,0),3,8,0);

		// Calculate where the center of the rectangle would be
		// Add half of the bounding rectangle's width to the top-left point's x-coordinate
		p1.x = bound.x + (bound.width/2);

		// Add half of the difference between top and bottom edge to the bottom edge
		p1.y = p2.y + ((p1.y - p2.y)/2);

		// Draw a small circle at the center of the bounding rectangle
		cvCircle(frame,p1,3,CV_RGB(0,0,255),1,8,0);



		/* Check if there is a rectangle in frame */
		double fieldDegrees = 43.3;
		double halfField = fieldDegrees/2;
		adjustment = (double)p1.x;
		adjustment = adjustment/frame->width;
		adjustment = adjustment*fieldDegrees;
		adjustment = adjustment-halfField;
		if(adjustment == -0)
				adjustment = 0;




		cvZero(result);

	//	cvSaveImage("picture.jpeg",frame);


		cvReleaseCapture(&capture);

		coneExists = 1;


		return adjustment;
	}
}
Example #14
0
vector<vector<double>> calibrator(CvCapture* cap){

	// Ball Threshold values
	int h_min, s_min, v_min, h_max, s_max, v_max, write, variablecount,getconfig;

	namedWindow("Thresh");
	namedWindow("Thresitud");
	Mat frame;
	vector<Mat> frames;
	vector<double> ball;
	vector<double> goal;
	vector<double> lines;
	vector<int> *arrptr;
	bool onlyonce = true;
	vector<vector<double>> values;

	string filename = "config.txt";
	string variable;
	string line_from_config;
	ofstream fout;
	ifstream fin;
	vector<string> line;
	variablecount = 1;

	fin.open(filename);

	getconfig = 0;
	write = 0;
	h_min = 0;
    s_min = 0;
    v_min = 0;
	h_max = 255,
	s_max = 255;
	v_max = 255;

	createTrackbar( "H min", "Thresh", &h_min, SLIDER_MAX, NULL);
	createTrackbar( "H max", "Thresh", &h_max, SLIDER_MAX, NULL);
	createTrackbar( "S min", "Thresh", &s_min, SLIDER_MAX, NULL);
	createTrackbar( "S max", "Thresh", &s_max, SLIDER_MAX, NULL);
	createTrackbar( "V min", "Thresh", &v_min, SLIDER_MAX, NULL);
	createTrackbar( "V max", "Thresh", &v_max, SLIDER_MAX, NULL);
	// WRITE to file
	createTrackbar("WRITE", "Thresh", &write, 1, NULL);
	createTrackbar("QUIT CONFIG", "Thresh", &getconfig, 1, NULL);

	cout<< "Enne ifi" << endl;

	if (fin.is_open())
	{
		cout << "IF" << endl;
		getline(fin,line_from_config);
		while (line_from_config != "")
		{
			cout<<"WHILE"<<endl;
			line = split(line_from_config, " ");
			if (line[0] == "Ball"){
				for (int i = 1; i < line.size(); i++){
					ball.push_back(atoi(line[i].c_str()));
					cout<<line[i]<<endl;
				}
			}

			else if (line[0] == "Goal"){
				for (int i = 1; i < line.size(); i++){
					goal.push_back(atoi(line[i].c_str()));
					cout<<line[i]<<endl;
				}
			}

			else if (line[0] == "Lines"){
				for (int i = 1; i < line.size(); i++){
					lines.push_back(atoi(line[i].c_str()));
					cout<<line[i]<<endl;
				}
			}
			else
			{
				break;
			}



			getline(fin,line_from_config);
		}
		values.push_back(ball);
		values.push_back(goal);
		values.push_back(lines);

	}
	else
	{
		cout<<"File is empty or not opened"<<endl;
	}

	while (true){

	if (write == 1)	{

		if (onlyonce)
		{
			fout.open(filename);
			values.clear();
			onlyonce = false;
		}


		if(variablecount == 1){
			variable = "Ball";
			ball.push_back(h_min);
			ball.push_back(s_min);
			ball.push_back(v_min);
			ball.push_back(h_max);
			ball.push_back(s_max);
			ball.push_back(v_max);
			values.push_back(ball);

		}
		else if(variablecount == 2){
			variable = "Goal";
			goal.push_back(h_min);
			goal.push_back(s_min);
			goal.push_back(v_min);
			goal.push_back(h_max);
			goal.push_back(s_max);
			goal.push_back(v_max);
			values.push_back(goal);

		}
		else if(variablecount == 3){
			variable = "Lines";
			lines.push_back(h_min);
			lines.push_back(s_min);
			lines.push_back(v_min);
			lines.push_back(h_max);
			lines.push_back(s_max);
			lines.push_back(v_max);
			values.push_back(lines);
		}


		fout << variable << " " << h_min << " " << s_min <<
			" " << v_min << " "  << h_max << " " << s_max <<
			" " << v_max << endl;

		cout <<  variable << " " << h_min << " " << s_min <<
			" " << v_min << " "  << h_max << " " << s_max <<
			" " << v_max << endl;

		variablecount = variablecount +1;

		h_min = 0;
		s_min = 0;
		v_min = 0;
		h_max = 255,
		s_max = 255;
		v_max = 255;
		write = 0;


		setTrackbarPos("H min", "Thresh", h_min);
		setTrackbarPos("S min", "Thresh", s_min);
		setTrackbarPos("V min", "Thresh", v_min);
		setTrackbarPos("H max", "Thresh", h_max);
		setTrackbarPos("S max", "Thresh", s_max);
		setTrackbarPos("V max", "Thresh", v_max);
		setTrackbarPos("WRITE", "Thresh", write);
	}

	if (getconfig == 1)
	{

	}


	// take a frame, threshold it, display the thresholded image
	frame = cvQueryFrame( cap );
	frames = thresholder(frame,h_min, s_min, v_min, h_max, s_max, v_max ); // added values as argument, previously h_min, s_min ....

	imshow("Thresh", frames[0]);
	imshow("CALIBRATOR", frames[0]);



	int c = cvWaitKey(10);
	if( (char)c == 27) {
    cvDestroyAllWindows();
    return values;
	}

	}

}
int main(int argc, char* argv[]){
    IplImage *frame, *frameL, *frameR, *anaglyph;
    CvCapture *capture = cvCreateFileCapture(argv[1]);
    int videoType = atoi(argv[2]); //0:sid-by-side or 1:above/below
    
    //Simple error handling
    if(!capture){
       printf("Erro ao abrir o video.");
       exit(-1);
    }
    
    //some verifications regarding the video
    frame = cvQueryFrame(capture);
    if(!frame){
        printf("Video vazio.");
        exit(-1);
    }
    if(frame->width % 2 != 0){
          printf("Video possui largura não divisível por 2. Favor cortar!");
          exit(-1);
    }
    
    //prepare anaglyph video
    double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
    CvSize videosize;
    switch(videoType){    
        case 0:
             videosize = cvSize(
                                      (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH)/2,
                                      (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT)
                                );
             break;
        case 1:
             videosize = cvSize(
                                      (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH),
                                      (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT)/2
                                );
             break;
        default:                
             printf("Command call: SBS-UB-to-anaglyph-video <side-by-side_video> 0|1\n0:side-by-side and 1:above/below");
             exit(-1);          
    }
    CvVideoWriter *writer = cvCreateVideoWriter("anaglyph-gray.avi",CV_FOURCC('H','F','Y','U'), fps, videosize);
    
    //start working on each frame
    while((frame = cvQueryFrame(capture)) != NULL){ 
        //get width and height from original image
        int width = frame->width;
        int height = frame->height;      
        
        //new images will have half width of original image
        CvSize size;
        switch(videoType){
            case 0:
                 size = cvSize( width/2, height);
                 break;
            case 1:
                 size = cvSize( width, height/2);
                 break;
            default:
                 printf("Command call: SBS-UB-to-anaglyph-video <side-by-side_video> 0|1\n0:side-by-side and 1:above/below");
                 exit(-1);                    
        }
        
        //copy image properties
        frameL = cvCreateImage(size, frame->depth, frame->nChannels);
        frameR = cvCreateImage(size, frame->depth, frame->nChannels);
        cvZero(frameL);
        cvZero(frameR);
        
        //divide frames in two
        separateImages(frame, &frameL, &frameR, width, height, videoType);           
        
        anaglyph = cvCreateImage(size, frameL->depth, frameL->nChannels);
        cvZero(anaglyph);
        
        //create anaglyph
        createAnaglyph(frameL, frameR, &anaglyph);
        
        //if any error occurr (f.e. segmentation fault, check if you have the codec installed)
        //Huffyuv codec (lossless): http://neuron2.net/www.math.berkeley.edu/benrg/huffyuv.html
        cvWriteFrame(writer, anaglyph);  
        
        cvReleaseImage(&frameL);
        cvReleaseImage(&frameR);
        cvReleaseImage(&anaglyph);
    }
        
    //free pointers  
    cvReleaseCapture(&capture);
    cvReleaseVideoWriter(&writer);
    
    return 0;
}
Example #16
0
int main( int argc, char** argv )
{
    CvCapture* capture = 0;
    
    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] );

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }

    /* print a welcome message, and the OpenCV version */
    printf ("Welcome to lkdemo, using OpenCV version %s (%d.%d.%d)\n",
	    CV_VERSION,
	    CV_MAJOR_VERSION, CV_MINOR_VERSION, CV_SUBMINOR_VERSION);

    printf( "Hot keys: \n"
            "\tESC - quit the program\n"
            "\tr - auto-initialize tracking\n"
            "\tc - delete all the points\n"
            "\tn - switch the \"night\" mode on/off\n"
            "To add/remove a feature point click it\n" );

    cvNamedWindow( "LkDemo", 0 );
    cvSetMouseCallback( "LkDemo", on_mouse, 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i, k, c;

        frame = cvQueryFrame( capture );
        if( !frame )
            break;

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            status = (char*)cvAlloc(MAX_COUNT);
            flags = 0;
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, grey, CV_BGR2GRAY );

        if( night_mode )
            cvZero( image );
        
        if( need_to_init )
        {
            /* automatic initialization */
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
            double quality = 0.01;
            double min_distance = 10;

            count = MAX_COUNT;
            cvGoodFeaturesToTrack( grey, eig, temp, points[1], &count,
                                   quality, min_distance, 0, 3, 0, 0.04 );
            cvFindCornerSubPix( grey, points[1], count,
                cvSize(win_size,win_size), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );

            add_remove_pt = 0;
        }
        else if( count > 0 )
        {
            cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
                points[0], points[1], count, cvSize(win_size,win_size), 3, status, 0,
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags );
            flags |= CV_LKFLOW_PYR_A_READY;
            for( i = k = 0; i < count; i++ )
            {
                if( add_remove_pt )
                {
                    double dx = pt.x - points[1][i].x;
                    double dy = pt.y - points[1][i].y;

                    if( dx*dx + dy*dy <= 25 )
                    {
                        add_remove_pt = 0;
                        continue;
                    }
                }
                
                if( !status[i] )
                    continue;
                
                points[1][k++] = points[1][i];
                cvCircle( image, cvPointFrom32f(points[1][i]), 3, CV_RGB(0,255,0), -1, 8,0);
            }
            count = k;
        }

        if( add_remove_pt && count < MAX_COUNT )
        {
            points[1][count++] = cvPointTo32f(pt);
            cvFindCornerSubPix( grey, points[1] + count - 1, 1,
                cvSize(win_size,win_size), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
            add_remove_pt = 0;
        }

        CV_SWAP( prev_grey, grey, swap_temp );
        CV_SWAP( prev_pyramid, pyramid, swap_temp );
        CV_SWAP( points[0], points[1], swap_points );
        need_to_init = 0;
        cvShowImage( "LkDemo", image );

        c = cvWaitKey(10);
        if( (char)c == 27 )
            break;
        switch( (char) c )
        {
        case 'r':
            need_to_init = 1;
            break;
        case 'c':
            count = 0;
            break;
        case 'n':
            night_mode ^= 1;
            break;
        default:
            ;
        }
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("LkDemo");

    return 0;
}
Example #17
0
static void get_next_frame(void*)
{
    static int repositioning = 0;
    IplImage* frame = 0;
    double new_pos = video_pos->value();
    
    if( (new_pos-old_pos >= 1e-10 || new_pos-old_pos <= -1e-10) && !repositioning)
    {
        video_window->redraw();
        cvSetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO, new_pos );
        new_pos = cvGetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO );
        printf("Repositioning\n");
        repositioning = 1;
    }
    else
    {
        new_pos = cvGetCaptureProperty( capture, CV_CAP_PROP_POS_AVI_RATIO );
        video_pos->value(new_pos);
        repositioning = 0;
    }
    old_pos = new_pos;
    frame = cvQueryFrame( capture );

    if( frame == 0 && is_avi )
    {
        cb_Stop(0,0);
        return;
    }

    if( video_window && frame )
    {
        if( video_window->w() < frame->width || video_window->h() < frame->height )
            root_window->size( (short)(frame->width + 40), (short)(frame->height + 150));

        CvRect rect = { video_window->x(), video_window->y(),
                        frame->width, frame->height };
        
        if( !video_image || video_image->width < rect.width ||
            video_image->height < rect.height )
        {
            cvReleaseImage( &video_image );
            video_image = cvCreateImage( cvSize( rect.width, rect.height ), 8, 3 );
        }

        cvSetImageROI( video_image, cvRect(0,0,rect.width, rect.height));
        if( frame->origin == 1 )
            cvFlip( frame, video_image, 0 );
        else
            cvCopy( frame, video_image, 0 );

        DetectAndDrawFaces( video_image );
        if( writer && is_recorded )
        {
            cvWriteToAVI( writer, video_image );
        }
        cvCvtColor( video_image, video_image, CV_RGB2BGR );

        uchar* data = 0;
        int step = 0;
        CvSize size;
        cvGetRawData( video_image, &data, &step, &size );

        video_window->redraw();
        fl_draw_image( (uchar*)data, video_window->x(), video_window->y(),
                       size.width, size.height, 3, step );
    }

    if( started )
    {
        double cur_frame_stamp = get_time_accurate();
        // update fps
        if( fps < 0 )
            fps = 1000/(cur_frame_stamp - prev_frame_stamp);
        else
            fps = (1-fps_alpha)*fps + fps_alpha*1000/(cur_frame_stamp - prev_frame_stamp);
        prev_frame_stamp = cur_frame_stamp;
        sprintf( fps_buffer, "FPS: %5.1f", fps );
        fps_box->label( fps_buffer );
        fps_box->redraw();
        if( total_frames > 0 )
        {
            if( --total_frames == 0 )
                if( !is_loopy )
                    cb_Exit(0,0);
                else
                {
                    total_frames = total_frames0;
                    cvSetCaptureProperty( capture, CV_CAP_PROP_POS_FRAMES, start_pos );
                }
        }
        Fl::add_timeout( timeout, get_next_frame, 0 );
    }
}
Example #18
0
int main(int argc, char** argv) {

    setlocale(LC_ALL, "C");
    time_t t;
    time(&t);

    char logname[64];
    sprintf(logname, "../logs/%ld.log", t);
    mainLog = fopen( logname, "w");
    if( mainLog == NULL ){
        printf("log cannot be created.\n");
        exit(-1);
    }

    IplImage* empty_frame = cvCreateImage( cvSize( 360, 240 ), 8, 3);

    cvNamedWindow( "camera", CV_WINDOW_AUTOSIZE );
    cvShowImage( "camera", empty_frame );

    cvWaitKey(33);
    
    CvCapture* capture;

    if( START_CAM ){
        init_video();
        printf("mplayer init video done.\n");

        capture = cvCaptureFromCAM(1);  //(1);
    }else{
        //debug
        capture = cvCreateFileCapture( "../../video/b23.avi");
    }
    ImuThread imu;
    SbotThread sbot;

    BindSerialPorts bs;

    bs.bind( &sbot, &imu );

    imu.init();

    sbot.run();

    sleep(1);
    sbot.setSpeed(1);
    sleep(1);
    sbot.setSpeed(0);

    imu.run();

    if(START_TIMER){
        timer();
    }

    IplImage* frame;
    setlocale(LC_ALL, "C");

    int frame_counter = 0;

    bool was_obstacle = false;
    long found_obstacle;

    while(1) {
        frame = cvQueryFrame( capture );
        if( !frame ){
            printf("no frame, exiting..\n");
            break;
        }

        if(frame_counter<2){
            frame_counter++;
            continue;
        }
        frame_counter=0;

        IplImage* tmp_frame = cvCreateImage( cvSize( 360, 240 ), frame->depth,frame->nChannels);

        cvResize( frame, tmp_frame );

        cvFlip( tmp_frame, tmp_frame, 0);
        cvFlip( tmp_frame, tmp_frame, 1);

        SbotData sdata = sbot.getData();
        ImuData idata = imu.getData();

        if (sdata.obstacle && (!was_obstacle))
        {
            time_t tobs = time(NULL);
            found_obstacle = tobs;
            printf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!OBSTACLE\n");
        }
        time_t tmnow = time(NULL);
        if ((sdata.obstacle) && (tmnow - found_obstacle > 40))
        {
            printf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! BACKING UP\n");
            found_obstacle = found_obstacle +120;
            // cuvat alebo aspon tocit!!
            sbot.ignoreObstacle(true);
            sbot.setDirection( 0 );
            sbot.setSpeed( -2 );
            sleep(4);
            sbot.setDirection( -40 );
            sbot.setSpeed( 1 );
            sleep(2);
            sbot.setDirection( 0 );
            sbot.setSpeed( 0 );
            sbot.ignoreObstacle(false);
        }
        else if (sdata.obstacle)
        {
            printf("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! %ld\n", tmnow - found_obstacle);
        }
        was_obstacle = sdata.obstacle;

        log_data( sdata, idata);

        cvShowImage( "camera", tmp_frame );

        cvReleaseImage( &tmp_frame );

        char c = cvWaitKey(33);
        if( c == 27 ) break;
    }

    sbot.stop();
    imu.stop();
    
    cvReleaseCapture( &capture );
    cvDestroyWindow( "camera" );
    cvDestroyWindow( "debug" );

    fclose(mainLog);

    return 0;
}
/**************************** Main ********************************/
int main( int argc, char** argv )
{
    IplImage *frame;
    CvCapture* video;
    int frame_num = 0;
    int i;

    arg_parse( argc, argv );

    // initialization
    cvParticleObserveInitialize( featsize );

    // read a video
    if( !vid_file || (isdigit(vid_file[0]) && vid_file[1] == '\0') )
        video = cvCaptureFromCAM( !vid_file ? 0 : vid_file[0] - '0' );
    else
        video = cvCaptureFromAVI( vid_file ); 
    if( (frame = cvQueryFrame( video )) == NULL )
    {
        fprintf( stderr, "Video %s is not loadable.\n", vid_file );
        usage();
        exit(1);
    }

    // allows user to select initial region
    CvRect region;
    icvGetRegion( frame, &region );

    // configure particle filter
    bool logprob = true;
    CvParticle *particle = cvCreateParticle( num_states, num_particles, logprob );
    CvParticleState std = cvParticleState (
        std_x,
        std_y,
        std_w,
        std_h,
        std_r
    );
    cvParticleStateConfig( particle, cvGetSize(frame), std );

    // initialize particle filter
    CvParticleState s;
    CvParticle *init_particle;
    init_particle = cvCreateParticle( num_states, 1 );
    CvRect32f region32f = cvRect32fFromRect( region );
    CvBox32f box = cvBox32fFromRect32f( region32f ); // centerize
    s = cvParticleState( box.cx, box.cy, box.width, box.height, 0.0 );
    cvParticleStateSet( init_particle, 0, s );
    cvParticleInit( particle, init_particle );
    cvReleaseParticle( &init_particle );

    // template
    IplImage* reference = cvCreateImage( featsize, frame->depth, frame->nChannels );
    IplImage* tmp = cvCreateImage( cvSize(region.width,region.height), frame->depth, frame->nChannels );
    cvCropImageROI( frame, tmp, region32f );
    cvResize( tmp, reference );
    cvReleaseImage( &tmp );

    while( ( frame = cvQueryFrame( video ) ) != NULL )
    {
        // Draw new particles
        cvParticleTransition( particle );
        // Measurements
        cvParticleObserveMeasure( particle, frame, reference );

        // Draw all particles
        for( i = 0; i < particle->num_particles; i++ )
        {
            CvParticleState s = cvParticleStateGet( particle, i );
            cvParticleStateDisplay( s, frame, CV_RGB(0,0,255) );
        }
        // Draw most probable particle
        //printf( "Most probable particle's state\n" );
        int maxp_id = cvParticleGetMax( particle );
        CvParticleState maxs = cvParticleStateGet( particle, maxp_id );
        cvParticleStateDisplay( maxs, frame, CV_RGB(255,0,0) );
        ///cvParticleStatePrint( maxs );
        
        // Save pictures
        if( arg_export ) {
            sprintf( export_filename, export_format, vid_file, frame_num );
            printf( "Export: %s\n", export_filename ); fflush( stdout );
            cvSaveImage( export_filename, frame );
        }
        cvShowImage( "Select an initial region > SPACE > ESC", frame );

        // Normalize
        cvParticleNormalize( particle);
        // Resampling
        cvParticleResample( particle );

        char c = cvWaitKey( 1000 );
        if(c == '\x1b')
            break;
    }

    cvParticleObserveFinalize();
    cvDestroyWindow( "Select an initial region > SPACE > ESC" );
    cvReleaseImage( &reference );
    cvReleaseParticle( &particle );
    cvReleaseCapture( &video );
    return 0;
}
int traceTrajectory() {
    /***** DECLARATIONS *****/
    FILE *currentFile = NULL;
    char *video_path = (char*) malloc(sizeof (char) * MAX_PATH);
    char *pattern_path = (char*) malloc(sizeof (char) * MAX_PATH);
    char *center_path = (char*) malloc(sizeof (char) * MAX_PATH);
    CvCapture* capture = 0;
    bwimage_t *image_target, *image_pattern, *image_center;
    Pixel peak;
    error_e retval = EEA_OK;

    /***** MAIN PROCEDURE *****/
    do {
        /*** Find the paths to the images that need to be processed ***/
        currentFile = fopen("paths_center.txt", "r"); // file containing
                                                      // the paths to the images
        if (currentFile != NULL) {
            fgetstr(video_path, MAX_PATH, currentFile);
            fgetstr(pattern_path, MAX_PATH, currentFile);
            fgetstr(center_path, MAX_PATH, currentFile);
        } else {
            printf("paths_center.txt cannot be opened");
            exit(EXIT_FAILURE);
        }
        fclose(currentFile);

        /*** Load the input images ***/
        image_target = EEACreateImage();
        image_pattern = EEACreateImage();
        retval = EEALoadImage(pattern_path, image_pattern);
        if (retval != EEA_OK) break;
        // free paths
        free(video_path);
        free(pattern_path);

        /*** Initialize the video frame capture ***/
        capture = cvCreateFileCapture("samples/parabolic_drop_256.avi");
        if (!capture) {
            return -1;
        }
        IplImage *frame = cvQueryFrame(capture); //Init the video reading
        double fps = cvGetCaptureProperty(
                capture,
                CV_CAP_PROP_FPS
                );
        CvSize size = cvSize(
                (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH),
                (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT)
                );
        // Convert to grayscale frame
        IplImage *frameGray = cvCreateImage(size, IPL_DEPTH_8U, 1);

        /*** Process the images ***/
        bwimage_t *image_temp = createBlackBwimage(image_pattern->height,
                                                   image_pattern->width);
        Pixel center;
        copyImage(image_temp, image_pattern);
        trim(&image_temp, 0.08);
        findCenter(&center, image_temp);
        EEAFreeImage(image_temp);
        cvCvtColor(frame, frameGray, CV_RGB2GRAY);
        iplImageToBwimage(&image_target, frameGray);
        locateShape(image_target, image_pattern, 0.08, &peak);
        createCenterImage(&image_center, image_target->height,
                                image_target->width, peak, center);
        while (1) {
            frame = cvQueryFrame(capture);
            if (!frame) break;
            // Convert to grayscale frame
            cvCvtColor(frame, frameGray, CV_RGB2GRAY);
            iplImageToBwimage(&image_target, frameGray);
            locateShape(image_target, image_pattern, 0.08, &peak);
            addPosition(image_center, peak, center);
        }
        cvReleaseImage(&frame);
        cvReleaseImage(&frameGray);
        cvReleaseCapture(&capture);

        /*** Write the output images at the specified path ***/
        if (EEA_OK != (retval = EEADumpImage(center_path, image_center))) break;
        free(center_path);
    } while (0);

    /***** FREE HEAP MEMORY *****/
    EEAFreeImage(image_target);
    EEAFreeImage(image_pattern);
    EEAFreeImage(image_center);

    /***** ERROR HANDLING *****/
    switch (retval) {
        case EEA_OK:
            break;
        case EEA_ENOFILE:
            fprintf(stderr, "Cannot open file\n");
            break;
        case EEA_EBADBPS:
            fprintf(stderr, "Number of bits per sample must be equal to 8\n");
            break;
        case EEA_EBADPHOTOMETRIC:
            fprintf(stderr, "Not a colormap image\n");
            break;
        case EEA_ENOCOLORMAP:
            fprintf(stderr, "Image does not have a colormap\n");
            break;
        case EEA_ENOTGRAY:
            fprintf(stderr, "At least one entry in the colormap is not gray\n");
        case EEA_ENOMEM:
            fprintf(stderr, "Cannot allocate memory\n");
            break;
        default:
            ; /* Can't happen  */
    }

    return(EXIT_SUCCESS);
}
Example #21
0
int main(int argc, char **argv)
{
  bool isStop = false;
  const int INIT_TIME = 50;
  const double BG_RATIO = 0.02; // 背景領域更新レート
  const double OBJ_RATIO = 0.005; // 物体領域更新レート
  const double Zeta = 10.0;
  IplImage *img = NULL;

  CvCapture *capture = NULL;
  capture = cvCreateCameraCapture(0);
  //capture = cvCaptureFromAVI("test.avi");
  if(capture == NULL){
    printf("capture device not found!!");
    return -1;
  }

  img = cvQueryFrame(capture);
  int w = img->width;
  int h = img->height;

  IplImage *imgAverage = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgSgm = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgTmp = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *img_lower = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *img_upper = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 3);
  IplImage *imgSilhouette = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgSilhouetteInv = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgResult = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 3);

  printf("背景初期化中...\n");
  cvSetZero(imgAverage);
  for(int i = 0; i < INIT_TIME; i++){
    img = cvQueryFrame(capture);
    cvAcc(img, imgAverage);
    printf("輝度平均 %d/%d\n", i, INIT_TIME);
  }
  cvConvertScale(imgAverage, imgAverage, 1.0 / INIT_TIME);
  cvSetZero(imgSgm);
  for(int i = 0; i < INIT_TIME; i++){
    img = cvQueryFrame(capture);
    cvConvert(img, imgTmp);
    cvSub(imgTmp, imgAverage, imgTmp);
    cvPow(imgTmp, imgTmp, 2.0);
    cvConvertScale(imgTmp, imgTmp, 2.0);
    cvPow(imgTmp, imgTmp, 0.5);
    cvAcc(imgTmp, imgSgm);
    printf("輝度振幅 %d/%d\n", i, INIT_TIME);
  }
  cvConvertScale(imgSgm, imgSgm, 1.0 / INIT_TIME);
  printf("背景初期化完了\n");

  char winNameCapture[] = "Capture";
  char winNameSilhouette[] = "Silhouette";
  cvNamedWindow(winNameCapture, CV_WINDOW_AUTOSIZE);
  cvNamedWindow(winNameSilhouette, CV_WINDOW_AUTOSIZE);

  while(1){
    if(!isStop){
      img = cvQueryFrame(capture);
      if(img == NULL) break;
      cvConvert(img, imgTmp);

      // 輝度範囲
      cvSub(imgAverage, imgSgm, img_lower);
      cvSubS(img_lower, cvScalarAll(Zeta), img_lower);
      cvAdd(imgAverage, imgSgm, img_upper);
      cvAddS(img_upper, cvScalarAll(Zeta), img_upper);
      cvInRange(imgTmp, img_lower, img_upper, imgSilhouette);

      // 輝度振幅
      cvSub(imgTmp, imgAverage, imgTmp);
      cvPow(imgTmp, imgTmp, 2.0);
      cvConvertScale(imgTmp, imgTmp, 2.0);
      cvPow(imgTmp, imgTmp, 0.5);

      // 背景領域を更新
      cvRunningAvg(img, imgAverage, BG_RATIO, imgSilhouette);
      cvRunningAvg(imgTmp, imgSgm, BG_RATIO, imgSilhouette);

      // 物体領域を更新
      cvNot(imgSilhouette, imgSilhouetteInv);
      cvRunningAvg(imgTmp, imgSgm, OBJ_RATIO, imgSilhouetteInv);

      cvErode(imgSilhouette, imgSilhouette, NULL, 1); // 収縮
      cvDilate(imgSilhouette, imgSilhouette, NULL, 2); // 膨張
      cvErode(imgSilhouette, imgSilhouette, NULL, 1); // 収縮

      cvMerge(imgSilhouette, imgSilhouette, imgSilhouette, NULL, imgResult);
      cvShowImage(winNameCapture, img);
      cvShowImage(winNameSilhouette, imgResult);
    }
    int waitKey = cvWaitKey(33);
    if(waitKey == 'q') break;
    if(waitKey == ' '){
      isStop = !isStop;
      if(isStop) printf("stop\n");
      else printf("start\n");
    }
  }

  cvReleaseCapture(&capture);
  cvDestroyWindow(winNameCapture);
  cvDestroyWindow(winNameSilhouette);

  return 0;
}
Example #22
0
int main( int argc, char** argv )
{
    ///////////
    //serial stuff
    int fd = 0;
    char serialport[256];
    int baudrate = B19200;
    //int baudrate = B115200;  // default
    
    fd = serialport_init("/dev/ttyUSB0", baudrate);
            if(fd==-1) return -1;
    usleep(3000 * 1000 );
    ///////////
    
    
    int c = 0, fps = 0;

    //capture from camera
    CvCapture *capture = cvCaptureFromCAM(1);
    //quit if camera not found
    if(!capture) {
        printf("cannot init capture!\n");
        return -1;
    }

    //display original video stream
    cvNamedWindow("stream", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("hue", CV_WINDOW_NORMAL);
    cvResizeWindow("hue", 320, 240);
    cvMoveWindow("hue", 640, 0);

    CvFont font;
    cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0, 2, CV_AA);

    //keep capturing frames until escape
    while(c != 27) //27 is escape key
    {
        //quit if can't grab frame
        if(!(frame = cvQueryFrame(capture))) break;

        //show the default image
        //cvShowImage("stream", frame);

        //edge detection - todo: HSV color filtering
        findLine(frame);

        //edge detection
        findEdge(frame);
        if(flag == 2)
            cvPutText(frame, "right edge", cvPoint(30, 400), &font, cvScalar(255, 0, 0, 0));
        else if(flag == 1)
            cvPutText(frame, "left edge", cvPoint(30, 400), &font, cvScalar(255, 0, 0, 0));

        //display center of gravity in coordinates
        COG(&cog_x, &cog_y);
        char x_coord[10];
        char y_coord[10];
        sprintf(x_coord, "%1.2f", cog_x);
        sprintf(y_coord, "%1.2f", cog_y);
	


        
        //find center of y coordinate in left and right edge
        COG_edges(&left_y, &right_y);

printf("%1.2f\n", left_y);
printf("%1.2f\n", right_y);

        
        
        uint8_t b = 0b00000000;
        //motor logic
        char motor1[10];
        char motor2[10];
        if(flag == 1) 
        {
            b = 0b00010001;
            sprintf(motor1, "%s", "backward");
            sprintf(motor2, "%s", "backward");
        }
        else if(flag == 2)
        {
            b = 0b10011001;
            sprintf(motor1, "%s", "forward");
            sprintf(motor2, "%s", "forward");
        }
        else if((int)(left_y/10.0) - (int)(right_y/10.0) < -4) //rotate right
        {
            b = 0b10010001;
            sprintf(motor1, "%s", "forward");
            sprintf(motor2, "%s", "backward");
        }
        else if((int)(right_y/10.0) - (int)(left_y/10.0) < -4) //rotate left
        {
            b = 0b00011001;
            sprintf(motor1, "%s", "backward");
            sprintf(motor2, "%s", "forward");
        }
        else 
        {
            b = 0;
            sprintf(motor1, "%s", "STOP");
            sprintf(motor2, "%s", "STOP");
        }
            
        
        //SERIAL - motor logic
        //xxxx_xxxx = motor1_motor2, 1-15 -> -7,7 -> 8 = 0 = 1000
        //edges
        
        write(fd,&b,1);
        
        
        //cvPutText(frame, x_coord, cvPoint(30,300), &font, cvScalar(255, 0, 0, 0));
        cvPutText(frame, "y:", cvPoint(0,350), &font, cvScalar(255, 0, 0, 0));
        cvPutText(frame, y_coord, cvPoint(30,350), &font, cvScalar(255, 0, 0, 0));
        
        
        cvPutText(frame, "motor1:", cvPoint(0,150), &font, cvScalar(255, 0, 0, 0));
        cvPutText(frame, motor1, cvPoint(150,150), &font, cvScalar(255, 0, 0, 0));
        cvPutText(frame, "motor2:", cvPoint(0,200), &font, cvScalar(255, 0, 0, 0));
        cvPutText(frame, motor2, cvPoint(150,200), &font, cvScalar(255, 0, 0, 0));

        cvShowImage("stream", frame);
        c = cvWaitKey(10);
    }

    cvReleaseCapture(&capture);
        //avoid memory leaks
        cvReleaseImage(&image);
        cvReleaseImage(&red);
        cvReleaseImage(&green);
        cvReleaseImage(&red_edge);
        cvReleaseImage(&green_edge);
        cvReleaseImage(&edge);
        cvReleaseImage(&final);
        cvReleaseImage(&frame);
	cvReleaseImage(&bw);
	cvReleaseImage(&gray);

    return 0;
}
void send_frame(union sigval sv_data) {

  int i;
  IplImage *image;
  CvMat *encoded;
  send_frame_data_t *data = sv_data.sival_ptr;
  
  printf("speed: %d\n",data->speed);
  printf("client_fd: %d\n",data->client_fd);
  for(i=0; i < data->speed; i++) {
    image = cvQueryFrame(data->video);
    if (!image) {
      printf("%s\n","Could not get image!");
    }
  }
  
  
  if(!image) {
    puts("could not get frame");
    return;
  }  
  
  // set the size of the thumb
  printf("%s\n","Is thumb this the failure?");
  CvMat* thumb = cvCreateMat(250, 250, CV_8UC3);
  
  printf("%s\n","Is image this the failure?");
  cvResize(image, thumb, CV_INTER_AREA);
  
  printf("%s\n","Is encodeParams this the failure?");
  // Encode the frame in JPEG format with JPEG quality 30%.
  const static int encodeParams[] = { CV_IMWRITE_JPEG_QUALITY, 30 };
  printf("%s\n","Is encoded this the failure?");
  encoded = cvEncodeImage(".jpeg", thumb, encodeParams);

  unsigned short int seq_num = data-> frame_num;
  //unsigned int time_stamp = (data->speed *40) + data->time_stamp;
  unsigned int time_stamp = 40 + data->time_stamp;
  
  srandom((unsigned)time(NULL));
  unsigned int ssrc = random();
  
  unsigned short int payloadlength =  encoded->cols;
  unsigned short int packet_len = 12 + payloadlength; //12 byte header
  
  // Create the rtp_packet
  unsigned char *rtp_packet = malloc(packet_len+4); // need to add 4 to the prefix
  if(rtp_packet == NULL) {
    printf("%s\n", "Error allocating memory to rtp_packet");
    return;
  }

  // prefix data
  rtp_packet[0] = 0x24; // $ sign
  rtp_packet[1] = 0x00;
  rtp_packet[2] = (packet_len & 0xFF00) >> 8;
  rtp_packet[3] = packet_len & 0x00FFF;
  
  // header
  rtp_packet[4] = head_first & 0xFF;
  rtp_packet[5] = head_second & 0xFF;
  rtp_packet[6] = (seq_num & 0xFF00) >> 8;
  rtp_packet[7] = seq_num & 0x00FF;
  rtp_packet[8] = time_stamp & 0xFF000000;
  rtp_packet[9] = time_stamp & 0x00FF0000;
  rtp_packet[10] = time_stamp & 0x0000FF00;
  rtp_packet[11] = time_stamp & 0x000000FF; 
  rtp_packet[12] = ssrc & 0xFF000000;
  rtp_packet[13] = ssrc & 0x00FF0000;
  rtp_packet[14] = ssrc & 0x0000FF00;
  rtp_packet[15] = ssrc & 0x000000FF; 
  //payload
  printf("1\n");
  printf("rtp_packet length: %d, payloadlength: %d, encoded length: %d\n",
  (sizeof rtp_packet), payloadlength, encoded->cols);
  
  memcpy(rtp_packet+16, encoded->data.ptr, payloadlength);
  
  // return the packet
  if(send(data->client_fd, rtp_packet, packet_len+4,0) == -1) {
    puts("error sending packet");
		perror("send");
  }
  
  free(rtp_packet);
  printf("%d\n",data->frame_num);
  printf("%d\n",data->time_stamp);
  data->frame_num = data->frame_num + data->speed;
  data->time_stamp = time_stamp;
}
Example #24
0
int main(int argc, char **argv)
{
    struct ctx ctx = { };
    int key;
    CvPoint last_center;
    last_center.x = 0;
    last_center.y = 0;
    int threshold_x = 50;
    int threshold_y = 50;

    init_capture(&ctx);
    init_windows();
    init_ctx(&ctx);

    do
    {
        ctx.image = cvQueryFrame(ctx.capture);

        filter_and_threshold(&ctx);
        find_contour(&ctx);
        find_convex_hull(&ctx);
        find_fingers(&ctx);

        display(&ctx);
//cvWriteFrame(ctx.writer, ctx.image);

//printf("num: %d rad: %d def: %d x: %d y: %d\n", ctx.num_fingers, ctx.hand_radius, ctx.num_defects, ctx.hand_center.x, ctx.hand_center.y);

        if(ctx.num_fingers && ctx.hand_radius)
        {
            if(!last_center.x)
            {
                last_center = ctx.hand_center;

            }
            else
            {
                //Check If Position changed

                if( abs(ctx.hand_center.x - last_center.x)  > threshold_x )
                {
                    if( ctx.hand_center.x - last_center.x > 0 )
                    {
                        printf("move left\n");
                    }
                    else
                    {
                        printf("move right\n");
                    }
                }

                if( abs(ctx.hand_center.y - last_center.y)  > threshold_y )
                {
                    if( ctx.hand_center.y - last_center.y > 0 )
                    {
                        printf("move down\n");
                    }
                    else
                    {
                        printf("move up\n");
                    }
                }


                last_center = ctx.hand_center;

            }



        }




        key = cvWaitKey(1);
    }
    while (key != 'q');

    return 0;
}
Example #25
0
int main(int argc, char **argv)
{
  std::cout << "Using OpenCV " << CV_MAJOR_VERSION << "." << CV_MINOR_VERSION << "." << CV_SUBMINOR_VERSION << std::endl;

  CvCapture *capture = 0;
  int resize_factor = 100;

  if(argc > 1)
  {
    std::cout << "Openning: " << argv[1] << std::endl;
    capture = cvCaptureFromAVI(argv[1]);
  }
  else
  {
    capture = cvCaptureFromCAM(0);
    resize_factor = 50; // set size = 50% of original image
  }

  if(!capture)
  {
    std::cerr << "Cannot initialize video!" << std::endl;
    return -1;
  }
  
  IplImage *frame_aux = cvQueryFrame(capture);
  IplImage *frame = cvCreateImage(cvSize((int)((frame_aux->width*resize_factor)/100) , (int)((frame_aux->height*resize_factor)/100)), frame_aux->depth, frame_aux->nChannels);
  cvResize(frame_aux, frame);

  /* Background Subtraction Methods */
  IBGS *bgs;

  /*** Default Package ***/
  bgs = new FrameDifferenceBGS;
  //bgs = new StaticFrameDifferenceBGS;
  //bgs = new WeightedMovingMeanBGS;
  //bgs = new WeightedMovingVarianceBGS;
  //bgs = new MixtureOfGaussianV1BGS;
  //bgs = new MixtureOfGaussianV2BGS;
  //bgs = new AdaptiveBackgroundLearning;
  //bgs = new AdaptiveSelectiveBackgroundLearning;
  //bgs = new GMG;
  
  /*** DP Package (thanks to Donovan Parks) ***/
  //bgs = new DPAdaptiveMedianBGS;
  //bgs = new DPGrimsonGMMBGS;
  //bgs = new DPZivkovicAGMMBGS;
  //bgs = new DPMeanBGS;
  //bgs = new DPWrenGABGS;
  //bgs = new DPPratiMediodBGS;
  //bgs = new DPEigenbackgroundBGS;
  //bgs = new DPTextureBGS;

  /*** TB Package (thanks to Thierry Bouwmans, Fida EL BAF and Zhenjie Zhao) ***/
  //bgs = new T2FGMM_UM;
  //bgs = new T2FGMM_UV;
  //bgs = new T2FMRF_UM;
  //bgs = new T2FMRF_UV;
  //bgs = new FuzzySugenoIntegral;
  //bgs = new FuzzyChoquetIntegral;

  /*** JMO Package (thanks to Jean-Marc Odobez) ***/
  //bgs = new MultiLayerBGS;

  /*** PT Package (thanks to Martin Hofmann, Philipp Tiefenbacher and Gerhard Rigoll) ***/
  //bgs = new PixelBasedAdaptiveSegmenter;

  /*** LB Package (thanks to Laurence Bender) ***/
  //bgs = new LBSimpleGaussian;
  //bgs = new LBFuzzyGaussian;
  //bgs = new LBMixtureOfGaussians;
  //bgs = new LBAdaptiveSOM;
  //bgs = new LBFuzzyAdaptiveSOM;

  /*** LBP-MRF Package (thanks to Csaba Kertész) ***/
  //bgs = new LbpMrf;

  /*** AV Package (thanks to Lionel Robinault and Antoine Vacavant) ***/
  //bgs = new VuMeter;

  /*** EG Package (thanks to Ahmed Elgammal) ***/
  //bgs = new KDE;
  
  /*** DB Package (thanks to Domenico Daniele Bloisi) ***/
  //bgs = new IndependentMultimodalBGS;

  /*** SJN Package (thanks to SeungJong Noh) ***/
  //bgs = new SJN_MultiCueBGS;

  /*** BL Package (thanks to Benjamin Laugraud) ***/
  //bgs = new SigmaDeltaBGS;

  int key = 0;
  while(key != 'q')
  {
    frame_aux = cvQueryFrame(capture);
    if(!frame_aux) break;

    cvResize(frame_aux, frame);
    
    cv::Mat img_input(frame);
    cv::imshow("input", img_input);

    cv::Mat img_mask;
    cv::Mat img_bkgmodel;
    bgs->process(img_input, img_mask, img_bkgmodel); // by default, it shows automatically the foreground mask image
    
    //if(!img_mask.empty())
    //  cv::imshow("Foreground", img_mask);
    //  do something
    
    key = cvWaitKey(33);
  }

  delete bgs;

  cvDestroyAllWindows();
  cvReleaseCapture(&capture);

  return 0;
}
Example #26
0
void CameraStream::init()
{
	/** 获取第一帧图像,初始化 sws_,创建 h264 encoder ....
	 */
	IplImage *img = cvQueryFrame(cap_);
	// FIXME: 未必是 rgb24 吧???
	sws_ = sws_getContext(img->width, img->height, PIX_FMT_RGB24, WIDTH, HEIGHT, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, 0, 0, 0);

	x264_param_t param;
	x264_param_default_preset(&param, "veryfast", "zerolatency");
	param.i_threads = 0;
	param.i_width = WIDTH;
	param.i_height = HEIGHT;
	param.i_keyint_max = FPS * 2;
	param.i_fps_den = 1;
	param.i_fps_num = FPS;
	param.i_slice_max_size = 1300;
	param.b_repeat_headers = 1;
	param.b_annexb = 1;
	param.rc.i_rc_method = X264_RC_ABR;
	param.rc.i_bitrate = KBPS;
	param.rc.i_vbv_max_bitrate = KBPS*1.1;
		
	encoder_ = x264_encoder_open(&param);

	avpicture_alloc(&pic_, PIX_FMT_YUV420P, WIDTH, HEIGHT);

	rtp_ = rtp_session_new(RTP_SESSION_SENDRECV);
	rtp_session_set_payload_type(rtp_, 100);
	rtp_session_set_remote_addr_and_port(rtp_, server_ip_.c_str(), server_rtp_port_, server_rtcp_port_);
	rtp_session_set_local_addr(rtp_, util_get_myip(), 0, 0);
	JBParameters jb;
	jb.adaptive = 1;
	jb.max_packets = 500;
	jb.max_size = -1;
	jb.min_size = jb.nom_size = 300;
	rtp_session_set_jitter_buffer_params(rtp_, &jb);

	filter_rtp_sender_ = ms_filter_new(MS_RTP_SEND_ID);
	ms_filter_call_method(filter_rtp_sender_, MS_RTP_SEND_SET_SESSION, rtp_);

	filter_h264_sender_ = ms_filter_new_from_name("ZonekeyH264Source");
	ms_filter_call_method(filter_h264_sender_, ZONEKEY_METHOD_H264_SOURCE_GET_WRITER_PARAM, &sender_params_);

	filter_rtp_recver_ = ms_filter_new(MS_RTP_RECV_ID);
	ms_filter_call_method(filter_rtp_recver_, MS_RTP_RECV_SET_SESSION, rtp_);

	filter_decoder_ = ms_filter_new(MS_H264_DEC_ID);

	filter_yuv_sink_ = ms_filter_new_from_name("ZonekeyYUVSink");
	// TODO: 显示 ...

	ms_filter_link(filter_rtp_recver_, 0, filter_decoder_, 0);
	ms_filter_link(filter_decoder_, 0, filter_yuv_sink_, 0);

	ticker_recver_ = ms_ticker_new();
	ms_ticker_attach(ticker_recver_, filter_rtp_recver_);

	ms_filter_link(filter_h264_sender_, 0, filter_rtp_sender_, 0);

	ticker_sender_ = ms_ticker_new();
	ms_ticker_attach(ticker_sender_, filter_h264_sender_);
}
Example #27
0
void mexFunction(int output_size, mxArray *output[], int input_size, const mxArray *input[]) {
    
    char* input_buf;
    /* copy the string data from input[0] into a C string input_ buf. */
    input_buf = mxArrayToString(I_IN);
    CvCapture* capture = 0;

    capture = cvCaptureFromAVI(input_buf);
    if (!capture) {
        fprintf(stderr, "Could not initialize capturing...\n");
    }

    cvNamedWindow( "LkDemo", 0 );

    for(;;) {
        init = clock();
        IplImage* frame = 0;
        int i, k, c;
        
        frame = cvQueryFrame( capture );
        if (!frame)
            break;

        if (!image) {
            /* allocate all the buffers */
            image = cvCreateImage(cvGetSize(frame), 8, 3);
            image->origin = frame->origin;
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT * sizeof(points[0][0]));
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT * sizeof(points[0][0]));
            pointadd[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT * sizeof(points[0][0]));
            ptcolor = (int*)cvAlloc(MAX_COUNT*sizeof(ptcolor[0]));
            status = (char*)cvAlloc(MAX_COUNT);
            flags = 0;
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, grey, CV_BGR2GRAY );
        //CvRect rect = cvRect(image->width/2-50, 0, 100,image->height*0.6);
        
        if (night_mode)
            cvZero( image );

        countlast = ptcount;
        if (need_to_add) {
            /* automatic initialization */
            IplImage* eig = cvCreateImage(cvGetSize(grey), 32, 1);
            IplImage* temp = cvCreateImage(cvGetSize(grey), 32, 1);
            double quality = 0.01;
            double min_distance = 10;
            
            countadd = MAX_COUNT;
            //cvSetImageROI(grey, rect);
            //cvSetImageROI(eig, rect);
            //cvSetImageROI(temp, rect);
            
            cvGoodFeaturesToTrack(grey, eig, temp, pointadd[0], &countadd, quality, min_distance, 0, 3, 0, 0.04);
            cvFindCornerSubPix(grey, pointadd[0], countadd, cvSize(win_size, win_size), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));

            //for(l=0;l<countadd;l++)
            //	pointadd[0][l].x = pointadd[0][l].x + image->width/2-50;
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );
            //cvResetImageROI(grey);
            for (m = 0; m < countadd; m++) {
                flag = 1;
                for (i = 0; i < countlast; i++) {
                    double dx = pointadd[0][m].x - points[0][i].x;
                    double dy = pointadd[0][m].y - points[0][i].y;

                    if( dx*dx + dy*dy <= 100 ) {
                        flag = 0;
                        break;
                    }
                }

                if (flag==1) {
                    points[0][ptcount++] = pointadd[0][m];
                    cvCircle(image, cvPointFrom32f(points[1][ptcount-1]), 3, CV_RGB(255, 0, 0), -1, 8, 0);
                }
                if (ptcount >= MAX_COUNT) {
                    break;
                }
            }
        }

        if (need_to_init) {
            /* automatic initialization */
            IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
            IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
            double quality = 0.01;
            double min_distance = 10;
            
            ptcount = MAX_COUNT;
            cvGoodFeaturesToTrack(grey, eig, temp, points[1], &ptcount, quality, min_distance, 0, 3, 0, 0.04);
            cvFindCornerSubPix(grey, points[1], ptcount, cvSize(win_size, win_size), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
            cvReleaseImage( &eig );
            cvReleaseImage( &temp );
            add_remove_pt = 0;
            /* set the point color */
            for( i=0; i<ptcount; i++ ){
                switch (i%5) {
                    case 0:
                        ptcolor[i] = 0;
                        break;
                    case 1:
                        ptcolor[i] = 1;
                        break;
                    case 2:
                        ptcolor[i] = 2;
                        break;
                    case 3:
                        ptcolor[i] = 3;
                        break;
                    case 4:
                        ptcolor[i] = 4;
                        break;
                    default:
                        ptcolor[i] = 0;
                }
            }
        }
        else if( ptcount > 0 ) {
            cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
                    points[0], points[1], ptcount, cvSize(win_size, win_size), 3, status, 0,
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03), flags );
            flags |= CV_LKFLOW_PYR_A_READY;
            for( i = k = 0; i < ptcount; i++ ) {
                if( add_remove_pt ) {
                    double dx = pointadd[0][m].x - points[1][i].x;
                    double dy = pointadd[0][m].y - points[1][i].y;

                    if( dx*dx + dy*dy <= 25 ) {
                        add_remove_pt = 0;
                        continue;
                    }
                }

                pt = cvPointFrom32f(points[1][i]);
                pttl.x = pt.x-3; pttl.y = pt.y-3; // point top left
                ptdr.x = pt.x+3; ptdr.y = pt.y+3; // point down right

                if( !status[i] ){
                    pt = cvPointFrom32f(points[0][i]);
                    cvCircle( image, pt, 3, CV_RGB(0, 0, 255), -1, 8, 0);
                    continue;
                }

                pt = cvPointFrom32f(points[1][i]);
                points[1][k] = points[1][i];
                if(i<countlast){
                    /* matched feats */
                    ptcolor[k] = ptcolor[i];
                    switch (ptcolor[k]) {
                        case 0:
                            cvCircle( image, pt, 3, CV_RGB(0, 255, 0), -1, 8, 0);
                            break;
                        case 1:
                            cvCircle( image, pt, 3, CV_RGB(255, 255, 0), -1, 8, 0);
                            break;
                        case 2:
                            cvCircle( image, pt, 3, CV_RGB(0, 255, 255), -1, 8, 0);
                            break;
                        case 3:
                            cvCircle( image, pt, 3, CV_RGB(255, 0, 255), -1, 8, 0);
                            break;
                        case 4:
                            cvCircle( image, pt, 3, CV_RGB(255, 0, 0), -1, 8, 0);                            
                            break;
                        default:
                            cvCircle( image, pt, 3, CV_RGB(0, 255, 0), -1, 8, 0);
                    }
                }
                else
                    /* new feats */
                    switch (k%5) {
                        case 0:
                            //  void cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, double color, int thickness=1 );
                            cvRectangle( image, pttl, ptdr, CV_RGB(0, 255, 0), -1, 8, 0);
                            ptcolor[k] = 0;
                            break;
                        case 1:
                            cvRectangle( image, pttl, ptdr, CV_RGB(255, 255, 0), -1, 8, 0);
                            ptcolor[k] = 1;
                            break;
                        case 2:
                            cvRectangle( image, pttl, ptdr, CV_RGB(0, 255, 255), -1, 8, 0);
                            ptcolor[k] = 2;
                            break;
                        case 3:
                            cvRectangle( image, pttl, ptdr, CV_RGB(255, 0, 255), -1, 8, 0);
                            ptcolor[k] = 3;
                            break;
                        case 4:
                            cvRectangle( image, pttl, ptdr, CV_RGB(255, 0, 0), -1, 8, 0);
                            ptcolor[k] = 4;
                            break;
                        default:
                            cvRectangle( image, pttl, ptdr, CV_RGB(0, 255, 0), -1, 8, 0);
                    }
                    k++;
            }
            ptcount = k;
        }

        if( add_remove_pt && ptcount < MAX_COUNT ) {
            points[1][ptcount++] = cvPointTo32f(pt);
            cvFindCornerSubPix( grey, points[1] + ptcount - 1, 1,
                    cvSize(win_size, win_size), cvSize(-1, -1),
                    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
            add_remove_pt = 0;
        }

        CV_SWAP( prev_grey, grey, swap_temp );
        CV_SWAP( prev_pyramid, pyramid, swap_temp );
        CV_SWAP( points[0], points[1], swap_points );
        need_to_init = 0;
        cvShowImage( "LkDemo", image );

        std::string filename = "Rst/Rst";
        std::string seq;
        std::ostringstream fs;
        fs << imgseq << "\n";
        std::istringstream input(fs.str());
        input >> seq>> imgseq;
        filename += seq + ".jpg";
        cvSaveImage(filename.c_str(), image);
        imgseq++;
        if(imgseq>500)
            break;

        c = cvWaitKey(10);
        if( (char)c == 27 )
            break;
        switch( (char) c ) {
            case 'r':
                need_to_init = 1;
                break;
            case 'c':
                ptcount = 0;
                break;
            case 'n':
                night_mode ^= 1;
                break;
            default:
                ;
        }
        if (ptcount<100) {
            need_to_init =1;
        }
        if (ptcount>50&&ptcount<MAX_COUNT) {
            need_to_add = 1;
        }
        final = clock()-init;
    }
Example #28
0
int main(void)  
{        
    int key = 0;
	int seek_line_idx = 120;
	
	CvPoint pt1 = cvPoint(0,seek_line_idx);
	CvPoint pt2 = cvPoint(350,seek_line_idx);
		
	CvPoint pt1_beam_right = cvPoint(180,0);
	CvPoint pt2_beam_right = cvPoint(180,250);
	
	CvPoint pt1_beam_left = cvPoint(160,0);
	CvPoint pt2_beam_left = cvPoint(160,250);
	
	CvScalar red = CV_RGB(250,0,0);
	CvScalar green = CV_RGB(0,0,250);
	CvScalar white = CV_RGB(255,255,255);
	
	int thickness = 1;
	int connectivity = 8;
	int sub, res;
	int j;
	char buffer[BUFSIZ*2], *ptr;  // BUFSIZ = 1024
	int array[350]; 
	
	printf("Array Length: %d\n", (int)ARRAYSIZE(array));
	
	// Load background file:
	const char filename[] = "example.txt";
	FILE *file = fopen(filename, "r");
	if ( file ){	
		// Read newline terminated line:
	   //	for ( i = 0; fgets(buffer, sizeof(buffer) , file); ++i )
	   //	{
		fgets(buffer, sizeof(buffer) , file);
		
		printf("Buf Length: %d\n", (int)ARRAYSIZE(buffer));		
		//printf("%s\n\n", buffer);
		
		// Parse the comma-separated values from each line into 'array'	
		for (ptr = buffer, j = 0; j < ARRAYSIZE(array); ++j, ++ptr )
 		{
 		 	array[j] = (int)strtol(ptr, &ptr, 10);
			//printf("%d: %d\n", j, array[j]);

 		}
		// }
		fclose(file);
	}
	else{
		printf("Can't load example.txt");
		return 0;
	}
	
	// Initialize camera and OpenCV image  
    //CvCapture *capture = cvCaptureFromCAM( 0 );  
    CvCapture *capture = cvCaptureFromAVI( "sample_plug.avi" );    
    	
	IplImage *frame = cvQueryFrame( capture );
	IplImage *gray_frame = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1 );
	
	int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS ); 
	
	printf("\nFPS: %f\n", float( fps ));
	printf("Image Width: %d\n", int( frame->width ));
	printf("Image Height: %d\n", int( frame->height ));

	cvNamedWindow("video", CV_WINDOW_AUTOSIZE );
	cvNamedWindow("Plot", CV_WINDOW_AUTOSIZE );

	float clean_signal[frame->width];
	float subtract[frame->width];

	int step_key = 0;
	
	CvSize gauss_size = cvSize( 11, 11 );

	while( key != 'x' )
	{
		frame = cvQueryFrame( capture );
		
		if( !frame )
			break;
			
		cvCvtColor( frame, gray_frame, CV_RGB2GRAY ); 
		//cvGaussianBlur(gray_frame, gray_frame, gauss_size, 0);
		
		if( key == 'p'){
			key = 0;
			while( key != 'p' && key != 27 ){
				key = cvWaitKey( 250 );
			}
		}
		
		for( int i = gray_frame->width-1; i >= 0 ; i-- )
		{
			//Get image intensity on seek_line:
			//uchar val = gray_frame.at<uchar>(seek_line_idx, i);
			uchar val = CV_IMAGE_ELEM( gray_frame, uchar, seek_line_idx, i );
			
			//Get background intensity:
			sub = array[i];
			
			//Avoid chaos if itensity-bg < 0
			res = (255-val) + uchar( sub )-250;
			if(res < 0)
				res = 1;
			
			//Save itensity-bg value
			clean_signal[i] = res;
	
			// plot curve:
			//plt.at<uchar>(res, i) = 250;	
			//std::cout << res << "\n";	
		}
		
		for( int i = gray_frame->width; i >= 0; i-- )
		{
			if( double(clean_signal[i]) > 80.0 )
			{
				CvPoint pt1_plug = cvPoint( i, 0 );
				CvPoint pt2_plug = cvPoint( i, 250 );
				
				cvLine( gray_frame, pt1_plug, pt2_plug, white, thickness, connectivity );
				//line_location = i;
				break;
			}
		}
		
		cvLine(gray_frame, pt1, pt2, red, thickness,connectivity);
		//cvLine(gray_frame, pt1_beam_right, pt2_beam_right, red, thickness, connectivity);
		cvLine(gray_frame, pt1_beam_left, pt2_beam_left, red, thickness, connectivity);
		
		cvShowImage( "Plot", gray_frame );
		
		key = cvWaitKey( 1000 / fps ); 
		
	}

	cvReleaseCapture( &capture );
	//cvReleaseImage( &frame );  //This causes crash..why??
	//cvReleaseImage( &gray_frame );	
	cvDestroyWindow( "video" );
	cvDestroyWindow( "Plot" );
	
	return 0;
	
} //end
Example #29
0
//
//USAGE:  ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
//
int main(int argc, char** argv)
{
 	IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
    IplImage *ImaskAVG = 0,*ImaskAVGCC = 0;
    IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
    CvCapture* capture = 0;

	int startcapture = 1;
	int endcapture = 30;
	int c,n;

	maxMod[0] = 3;  //Set color thresholds to default values
	minMod[0] = 10;
	maxMod[1] = 1;
	minMod[1] = 1;
	maxMod[2] = 1;
	minMod[2] = 1;
	float scalehigh = HIGH_SCALE_NUM;
	float scalelow = LOW_SCALE_NUM;

	if(argc < 3) {
		printf("ERROR: Too few parameters\n");
		help();
	}else{
		if(argc == 3){
			printf("Capture from Camera\n");
			capture = cvCaptureFromCAM( 0 );
		}
		else {
			printf("Capture from file %s\n",argv[3]);
	//		capture = cvCaptureFromFile( argv[3] );
			capture = cvCreateFileCapture( argv[3] );
			if(!capture) { printf("Couldn't open %s\n",argv[3]); return -1;}

            minMod = {42, 24, 33};
            maxMod = {14, 3, 2};
		}
		if(isdigit(argv[1][0])) { //Start from of background capture
			startcapture = atoi(argv[1]);
			printf("startcapture = %d\n",startcapture);
		}
		if(isdigit(argv[2][0])) { //End frame of background capture
			endcapture = atoi(argv[2]);
			printf("endcapture = %d\n",endcapture);
		}

		if(argc > 4){ //See if parameters are set from command line
			//FOR AVG MODEL
			if(argc >= 5){
				if(isdigit(argv[4][0])){
					scalehigh = (float)atoi(argv[4]);
				}
			}
			if(argc >= 6){
				if(isdigit(argv[5][0])){
					scalelow = (float)atoi(argv[5]);
				}
			}
			//FOR CODEBOOK MODEL, CHANNEL 0
			if(argc >= 7){
				if(isdigit(argv[6][0])){
					maxMod[0] = atoi(argv[6]);
				}
			}
			if(argc >= 8){
				if(isdigit(argv[7][0])){
					minMod[0] = atoi(argv[7]);
				}
			}
			//Channel 1
			if(argc >= 9){
				if(isdigit(argv[8][0])){
					maxMod[1] = atoi(argv[8]);
				}
			}
			if(argc >= 10){
				if(isdigit(argv[9][0])){
					minMod[1] = atoi(argv[9]);
				}
			}
			//Channel 2
			if(argc >= 11){
				if(isdigit(argv[10][0])){
					maxMod[2] = atoi(argv[10]);
				}
			}
			if(argc >= 12){
				if(isdigit(argv[11][0])){
					minMod[2] = atoi(argv[11]);
				}
			}

		}
	}

    /*dancer jiwei*/
    double vdfps = 0.0;
    CvSize vdsize = cvSize(0,0);
    //vdfps = cvGetCaptureProperty ( capture, CV_CAP_PROP_FPS);
    getVideoInfo( capture, vdfps, vdsize);
    CvVideoWriter* writer = cvCreateVideoWriter( "dancer.avi",
                                                CV_FOURCC('D','X','5','0'),
                                                vdfps,
                                                vdsize);
    //end dancer jiwei


	//MAIN PROCESSING LOOP:
	bool pause = false;
	bool singlestep = false;

    if( capture )
    {
      cvNamedWindow( "Raw", 1 );
		cvNamedWindow( "AVG_ConnectComp",1);
		cvNamedWindow( "ForegroundCodeBook",1);
		cvNamedWindow( "CodeBook_ConnectComp",1);
 		cvNamedWindow( "ForegroundAVG",1);
 		//Only dancer jiwei 2012.3.3
 		cvNamedWindow( "OnlyDancer",1);
        cvNamedWindow( "RectDancer",1);
        int i = -1;

        for(;;)
        {
    			if(!pause){
//        		if( !cvGrabFrame( capture ))
//                	break;
//            	rawImage = cvRetrieveFrame( capture );
				rawImage = cvQueryFrame( capture );
				++i;//count it
//				printf("%d\n",i);
				if(!rawImage)
					break;
				//REMOVE THIS FOR GENERAL OPERATION, JUST A CONVIENIENCE WHEN RUNNING WITH THE SMALL tree.avi file
				//if(i == 56){
				if(i==0){
					pause = 1;
					printf("\n\nVideo paused for your convienience at frame 50 to work with demo\n"
					"You may adjust parameters, single step or continue running\n\n");
					help();
				}
			}
			if(singlestep){
				pause = true;
			}
			//First time:
			if(0 == i) {
				printf("\n . . . wait for it . . .\n"); //Just in case you wonder why the image is white at first
				//AVG METHOD ALLOCATION
				AllocateImages(rawImage);
				scaleHigh(scalehigh);
				scaleLow(scalelow);
				ImaskAVG = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskAVGCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				cvSet(ImaskAVG,cvScalar(255));
				//CODEBOOK METHOD ALLOCATION:
				yuvImage = cvCloneImage(rawImage);
				ImaskCodeBook = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 );
				cvSet(ImaskCodeBook,cvScalar(255));
				imageLen = rawImage->width*rawImage->height;
				cB = new codeBook [imageLen];
				for(int f = 0; f<imageLen; f++)
				{
 					cB[f].numEntries = 0;
				}
				for(int nc=0; nc<nChannels;nc++)
				{
					cbBounds[nc] = 10; //Learning bounds factor
				}
				ch[0] = true; //Allow threshold setting simultaneously for all channels
				ch[1] = true;
				ch[2] = true;
			}
			//If we've got an rawImage and are good to go:
        	if( rawImage )
        	{
				cvCvtColor( rawImage, yuvImage, CV_BGR2YCrCb );//YUV For codebook method
				//This is where we build our background model
				if( !pause && i >= startcapture && i < endcapture  ){
					//LEARNING THE AVERAGE AND AVG DIFF BACKGROUND
					accumulateBackground(rawImage);
					//LEARNING THE CODEBOOK BACKGROUND
					pColor = (uchar *)((yuvImage)->imageData);
					for(int c=0; c<imageLen; c++)
					{
						cvupdateCodeBook(pColor, cB[c], cbBounds, nChannels);
						pColor += 3;
					}
				}
				//When done, create the background model
				if(i == endcapture){
					createModelsfromStats();
				}
				//Find the foreground if any
				if(i >= endcapture) {
					//FIND FOREGROUND BY AVG METHOD:
					backgroundDiff(rawImage,ImaskAVG);
					cvCopy(ImaskAVG,ImaskAVGCC);
					cvconnectedComponents(ImaskAVGCC);
					//FIND FOREGROUND BY CODEBOOK METHOD
					uchar maskPixelCodeBook;
					pColor = (uchar *)((yuvImage)->imageData); //3 channel yuv image
					uchar *pMask = (uchar *)((ImaskCodeBook)->imageData); //1 channel image
					for(int c=0; c<imageLen; c++)
					{
						 maskPixelCodeBook = cvbackgroundDiff(pColor, cB[c], nChannels, minMod, maxMod);
						*pMask++ = maskPixelCodeBook;
						pColor += 3;
					}
					//This part just to visualize bounding boxes and centers if desired
					cvCopy(ImaskCodeBook,ImaskCodeBookCC);
					cvconnectedComponents(ImaskCodeBookCC);
				}

				/* Only Dancer
                    jiwei 2012.3.3*/
				IplImage *ImaDancer = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 );
				cvZero(ImaDancer);
				cvCopy( rawImage, ImaDancer, ImaskCodeBookCC);
				cvShowImage( "OnlyDancer", ImaDancer);
				//cvWriteToAVI( writer,  ImaDancer);
				/*IplImage *ImaCBvideo = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 );
				cvConvertImage(ImaskCodeBook, ImaCBvideo, CV_GRAY2RGB);
				cvWriteToAVI( writer,  ImaCBvideo);*/
				IplImage * imgRect = cvCreateImage( cvGetSize( ImaDancer), ImaDancer->depth,
                                              ImaDancer->nChannels);
                CvPoint pntmin, pntmax;
                drawRect( ImaDancer, pntmin, pntmax);
                cvCopy( rawImage, imgRect);
                cvRectangle( imgRect, pntmin, pntmax, cvScalar(0,0,255), 1);
				CvFont font;
                double hScale=0.4;
                double vScale=0.4;
                int    lineWidth=1;
                cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth);
                cvPutText (imgRect,"The Dancer", pntmin, &font, cvScalar(255,255,255));
				cvShowImage( "RectDancer", imgRect);
                cvWriteToAVI( writer,  imgRect);
				/*end of Only Dancer*/
				//Display
           		cvShowImage( "Raw", rawImage );
				cvShowImage( "AVG_ConnectComp",ImaskAVGCC);
   				cvShowImage( "ForegroundAVG",ImaskAVG);
 				cvShowImage( "ForegroundCodeBook",ImaskCodeBook);
 				cvShowImage( "CodeBook_ConnectComp",ImaskCodeBookCC);


				//USER INPUT:
	         	c = cvWaitKey(10)&0xFF;
				//End processing on ESC, q or Q
				if(c == 27 || c == 'q' || c == 'Q')
					break;
				//Else check for user input
				switch(c)
				{
					case 'h':
						help();
						break;
					case 'p':
						pause ^= 1;
						break;
					case 's':
						singlestep = 1;
						pause = false;
						break;
					case 'r':
						pause = false;
						singlestep = false;
						break;
					//AVG BACKROUND PARAMS
					case '-':
						if(i > endcapture){
							scalehigh += 0.25;
							printf("AVG scalehigh=%f\n",scalehigh);
							scaleHigh(scalehigh);
						}
						break;
					case '=':
						if(i > endcapture){
							scalehigh -= 0.25;
							printf("AVG scalehigh=%f\n",scalehigh);
							scaleHigh(scalehigh);
						}
						break;
					case '[':
						if(i > endcapture){
							scalelow += 0.25;
							printf("AVG scalelow=%f\n",scalelow);
							scaleLow(scalelow);
						}
						break;
					case ']':
						if(i > endcapture){
							scalelow -= 0.25;
							printf("AVG scalelow=%f\n",scalelow);
							scaleLow(scalelow);
						}
						break;
				//CODEBOOK PARAMS
                case 'y':
                case '0':
                        ch[0] = 1;
                        ch[1] = 0;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'u':
                case '1':
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 0;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'v':
                case '2':
                        ch[0] = 0;
                        ch[1] = 0;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'a': //All
                case '3':
                        ch[0] = 1;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
                case 'b':  //both u and v together
                        ch[0] = 0;
                        ch[1] = 1;
                        ch[2] = 1;
                        printf("CodeBook YUV Channels active: ");
                        for(n=0; n<nChannels; n++)
                                printf("%d, ",ch[n]);
                        printf("\n");
                        break;
				case 'i': //modify max classification bounds (max bound goes higher)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							maxMod[n] += 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'o': //modify max classification bounds (max bound goes lower)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							maxMod[n] -= 1;
						printf("%.4d,",maxMod[n]);
					}
					printf(" CodeBook High Side\n");
					break;
				case 'k': //modify min classification bounds (min bound goes lower)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							minMod[n] += 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				case 'l': //modify min classification bounds (min bound goes higher)
					for(n=0; n<nChannels; n++){
						if(ch[n])
							minMod[n] -= 1;
						printf("%.4d,",minMod[n]);
					}
					printf(" CodeBook Low Side\n");
					break;
				}

            }
		}
      cvReleaseCapture( &capture );
      cvDestroyWindow( "Raw" );
		cvDestroyWindow( "ForegroundAVG" );
		cvDestroyWindow( "AVG_ConnectComp");
		cvDestroyWindow( "ForegroundCodeBook");
		cvDestroyWindow( "CodeBook_ConnectComp");
        cvDestroyWindow( "RectDancer");
		DeallocateImages();
		if(yuvImage) cvReleaseImage(&yuvImage);
		if(ImaskAVG) cvReleaseImage(&ImaskAVG);
		if(ImaskAVGCC) cvReleaseImage(&ImaskAVGCC);
		if(ImaskCodeBook) cvReleaseImage(&ImaskCodeBook);
		if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC);
		delete [] cB;
		/*dancer*/
		cvDestroyWindow( "OnlyDancer");
		//if( ImaDancer) cvReleaseImage(&ImaDancer);
    }
	else{ printf("\n\nDarn, Something wrong with the parameters\n\n"); help();
	}
    return 0;
}
Example #30
0
void* query_frame(void* capture){
  return (void*)cvQueryFrame((CvCapture*)capture);
}