template<class T> void ShowPCM(T* pData,int nData,int nChannels,int maxWidth,int rowHeight,char* name)
{
	const int margin = 10;
	if (maxWidth&0x3)
		maxWidth = maxWidth&(~0x3) + 4;
	std::string winName = "debugPCM_";

	int row = ceil(float(nData)/maxWidth);
	IplImage *pImg = cvCreateImage(cvSize(maxWidth,rowHeight*row+margin*(row+1)),IPL_DEPTH_8U,3);
	cvSetZero(pImg);
	float value;
	int ypos,xpos;
	for (int r=0;r<row;r++)
	{
		ypos = margin*(r+1) + rowHeight*r;
		cvLine(pImg,cvPoint(0,ypos),cvPoint(pImg->width-1,ypos),CV_RGB(255,255,255),1);
		ypos += rowHeight/2;
		cvLine(pImg,cvPoint(0,ypos),cvPoint(pImg->width-1,ypos),CV_RGB(100,100,100),1);
		ypos += rowHeight/2;
		cvLine(pImg,cvPoint(0,ypos),cvPoint(pImg->width-1,ypos),CV_RGB(255,255,255),1);
	}

	if (typeid(double) == typeid(T))
		winName+="double_";
	else if (typeid(float) == typeid(T))
		winName+="float_";
	else if (typeid(int) == typeid(T))
		winName+="int_";
	else if (typeid(short) == typeid(T))
		winName+="short_";
	else if (typeid(unsigned char) == typeid(T))
		winName+="byte_";
	else
		assert(false);

	for (int d=0;d<nData;d++)
	{
		int data = 0;
		xpos = d%maxWidth;
		ypos = d/maxWidth;
		ypos = margin*(ypos+1) + rowHeight*ypos + rowHeight/2;
		if (typeid(double) == typeid(T) || typeid(float) == typeid(T))
			value = pData[d*nChannels+0];
		else if (typeid(int) == typeid(T))
			value = float(data = pData[d*nChannels+0]) / 2147483648.f;
		else if (typeid(short) == typeid(T))
			value = float(pData[d*nChannels+0]) / 32768.f;
		else if (typeid(unsigned char) == typeid(T))
			value = float(pData[d*nChannels+0]) / 128.f - 1.f;
		else
			assert(false);
		assert(value<=1.001 && value>=-1.001);

		value *= rowHeight/2;
		cvLine(pImg,cvPoint(xpos,ypos),cvPoint(xpos,ypos+value),CV_RGB(0,255,0),1);


	}

	
	winName+=name;
	cvNamedWindow(winName.c_str());
	cvShowImage(winName.c_str(),pImg);
	cvReleaseImage(&pImg);
	cvWaitKey(1);

}
Esempio n. 2
0
int cvCreateTrainingSamplesFromInfo( const char* infoname, const char* vecfilename,
                                     int num,
                                     int showsamples,
                                     int winwidth, int winheight )
{
    char fullname[PATH_MAX];
    char* filename;

    FILE* info;
    FILE* vec;
    IplImage* src;
    IplImage* sample;
    int line;
    int error;
    int i;
    int x, y, width, height;
    int total;

    assert( infoname != NULL );
    assert( vecfilename != NULL );

    total = 0;
    if( !icvMkDir( vecfilename ) )
    {

#if CV_VERBOSE
        fprintf( stderr, "Unable to create directory hierarchy: %s\n", vecfilename );
#endif /* CV_VERBOSE */

        return total;
    }

    info = fopen( infoname, "r" );
    if( info == NULL )
    {

#if CV_VERBOSE
        fprintf( stderr, "Unable to open file: %s\n", infoname );
#endif /* CV_VERBOSE */

        return total;
    }

    vec = fopen( vecfilename, "wb" );
    if( vec == NULL )
    {

#if CV_VERBOSE
        fprintf( stderr, "Unable to open file: %s\n", vecfilename );
#endif /* CV_VERBOSE */

        fclose( info );

        return total;
    }

    sample = cvCreateImage( cvSize( winwidth, winheight ), IPL_DEPTH_8U, 1 );

    icvWriteVecHeader( vec, num, sample->width, sample->height );

    if( showsamples )
    {
        cvNamedWindow( "Sample", CV_WINDOW_AUTOSIZE );
    }
    
    strcpy( fullname, infoname );
    filename = strrchr( fullname, '\\' );
    if( filename == NULL )
    {
        filename = strrchr( fullname, '/' );
    }
    if( filename == NULL )
    {
        filename = fullname;
    }
    else
    {
        filename++;
    }

    for( line = 1, error = 0, total = 0; total < num ;line++ )
    {
        int count;

        error = ( fscanf( info, "%s %d", filename, &count ) != 2 );
        if( !error )
        {
            src = cvLoadImage( fullname, 0 );
            error = ( src == NULL );
            if( error )
            {

#if CV_VERBOSE
                fprintf( stderr, "Unable to open image: %s\n", fullname );
#endif /* CV_VERBOSE */

            }
        }
        for( i = 0; (i < count) && (total < num); i++, total++ )
        {
            error = ( fscanf( info, "%d %d %d %d", &x, &y, &width, &height ) != 4 );
            if( error ) break;
            cvSetImageROI( src, cvRect( x, y, width, height ) );
            cvResize( src, sample, width >= sample->width &&
                      height >= sample->height ? CV_INTER_AREA : CV_INTER_LINEAR );
            
            if( showsamples )
            {
                cvShowImage( "Sample", sample );
                if( cvWaitKey( 0 ) == 27 )
                {
                    showsamples = 0;
                }
            }            
            icvWriteVecSample( vec, sample );
        }
        
        if( src )
        {
            cvReleaseImage( &src );
        }

        if( error )
        {

#if CV_VERBOSE
            fprintf( stderr, "%s(%d) : parse error", infoname, line );
#endif /* CV_VERBOSE */

            break;
        }
    }
    
    if( sample )
    {
        cvReleaseImage( &sample );
    }

    fclose( vec );
    fclose( info );

    return total;
}
Esempio n. 3
0
void cvShowVecSamples( const char* filename, int winwidth, int winheight,
                       double scale )
{
    CvVecFile file;
    short tmp; 
    int i;
    CvMat* sample;
    
    tmp = 0;
    file.input = fopen( filename, "rb" );

    if( file.input != NULL )
    {
        fread( &file.count, sizeof( file.count ), 1, file.input );
        fread( &file.vecsize, sizeof( file.vecsize ), 1, file.input );
        fread( &tmp, sizeof( tmp ), 1, file.input );
        fread( &tmp, sizeof( tmp ), 1, file.input );
        
        if( file.vecsize != winwidth * winheight )
        {
            int guessed_w = 0;
            int guessed_h = 0;
            
            fprintf( stderr, "Warning: specified sample width=%d and height=%d "
                "does not correspond to .vec file vector size=%d.\n",
                winwidth, winheight, file.vecsize );
            if( file.vecsize > 0 )
            {
                guessed_w = cvFloor( sqrt( (float) file.vecsize ) );
                if( guessed_w > 0 )
                {
                    guessed_h = file.vecsize / guessed_w;
                }
            }

            if( guessed_w <= 0 || guessed_h <= 0 || guessed_w * guessed_h != file.vecsize)
            {
                fprintf( stderr, "Error: failed to guess sample width and height\n" );
                fclose( file.input );

                return;
            }
            else
            {
                winwidth = guessed_w;
                winheight = guessed_h;
                fprintf( stderr, "Guessed width=%d, guessed height=%d\n",
                    winwidth, winheight );
            }
        }

        if( !feof( file.input ) && scale > 0 )
        {
            CvMat* scaled_sample = 0;

            file.last = 0;
            file.vector = (short*) cvAlloc( sizeof( *file.vector ) * file.vecsize );
            sample = scaled_sample = cvCreateMat( winheight, winwidth, CV_8UC1 );
            if( scale != 1.0 )
            {
                scaled_sample = cvCreateMat( MAX( 1, cvCeil( scale * winheight ) ),
                                             MAX( 1, cvCeil( scale * winwidth ) ),
                                             CV_8UC1 );
            }
            cvNamedWindow( "Sample", CV_WINDOW_AUTOSIZE );
            for( i = 0; i < file.count; i++ )
            {
                icvGetHaarTraininDataFromVecCallback( sample, &file );
                if( scale != 1.0 ) cvResize( sample, scaled_sample, CV_INTER_LINEAR);
                cvShowImage( "Sample", scaled_sample );
                if( cvWaitKey( 0 ) == 27 ) break;
            }
            if( scaled_sample && scaled_sample != sample ) cvReleaseMat( &scaled_sample );
            cvReleaseMat( &sample );
            cvFree( &file.vector );
        }
        fclose( file.input );
    }
}
Esempio n. 4
0
char Gui::getKey()
{
    return cvWaitKey(10);
}
Esempio n. 5
0
MDA_TASK_RETURN_CODE MDA_TASK_BUOY:: run_single_buoy(int buoy_index, BUOY_COLOR color) {
    puts("Press q to quit");

    assert (buoy_index >= 0 && buoy_index <= 1);
    
    MDA_VISION_MODULE_BUOY buoy_vision;
    MDA_TASK_RETURN_CODE ret_code = TASK_MISSING;

    /// Here we store the starting attitude vector, so we can return to this attitude later
    int starting_yaw = attitude_input->yaw();
    printf("Starting yaw: %d\n", starting_yaw);

    //set (DEPTH, 400);

    TASK_STATE state = STARTING;
    bool done_buoy = false;
    static TIMER timer;
    static TIMER master_timer;
    timer.restart();
    master_timer.restart();


//###### hack code for competition
    set (SPEED, 0);
    
    int hack_depth, hack_time;
    read_mv_setting ("hacks.csv", "BUOY_DEPTH", hack_depth);
    read_mv_setting ("hacks.csv", "BUOY_TIME", hack_time);
    printf ("Buoy: going to depth %d\n", hack_depth);
        fflush(stdout);
    if (hack_depth > 500)
	set (DEPTH, 500);
    if (hack_depth > 600)
        set (DEPTH, 600);
    set (DEPTH, hack_depth);
    set (YAW, starting_yaw);

    printf ("Buoy: moving forward for %d seconds\n", hack_time);
        fflush(stdout);
    timer.restart();
    while (timer.get_time() < hack_time) {
        set (SPEED, 8);
    }
    set(SPEED, 0);

    if (hack_depth > 600)
         set (DEPTH, 600);
    if (hack_depth > 500)
        set (DEPTH, 500);
    set (YAW, starting_yaw);
    return TASK_DONE;
//###### end hack code for competition
    /**
    * Basic Algorithm
    *  - Assume for now that we just want to hit the cylindrical buoys when they're red
    *  - We want to search for 1 or 2 buoys. If we find both:
    *    - Are both non-red and/or cycling? Go for the one indicated by buoy_index
    *    - Is one non-red and/or cycling? Go for that one
    *    - Do we only see one buoy? Go there if non-red and/or cycling
    *
    */
    
    while (1) {
        IplImage* frame = image_input->get_image();
        if (!frame) {
            ret_code = TASK_ERROR;
            break;
        }

        MDA_VISION_RETURN_CODE vision_code = buoy_vision.filter(frame);
        (void) vision_code;
        // clear dwn image - RZ: do we need this?
        //int down_frame_ready = image_input->ready_image(DWN_IMG);
        //(void) down_frame_ready;

        bool valid[2] = {false, false};
        int ang_x[2], ang_y[2];
        int range[2];
        int color[2];
        static bool color_cycling[2] = {false, false};
        static int curr_index = -1;
        static int prev_time = -1;
        static int prev_color = -1;
        static int n_valid_color_find_frames = 0;

        if (!done_buoy) {
            // state machine
            if (state == STARTING) {
                // here we just move forward until we find something, and pan if we havent found anything for a while
                printf ("Starting: Moving Foward for 1 meter\n");
                move (FORWARD, 1);

                if (timer.get_time() > 1) {
                    set (SPEED, 0);
                    timer.restart();
                    buoy_vision.clear_frames();
                    state = STOPPED;
                }
            }
            else if (state == STOPPED) {
                if (timer.get_time() < 0) {
                    printf ("Stopped: Collecting Frames\n");
                }
                else {
                    get_data_from_frame (&buoy_vision, valid, ang_x, ang_y, range, color);

                    if (!valid[0] && !valid[1]) { // no buoys
                        printf ("Stopped: No target\n");
                        if (master_timer.get_time() > 60) { // we've seen nothing for 60 seconds
                            printf ("Master Timer Timeout!!\n");
                            return TASK_MISSING;
                        }
                        if (timer.get_time() > 2) {
                            printf ("Stopped: Timeout\n");
                            timer.restart();
                            state = STARTING;
                        }
                    }
                    else { // turn towards the buoy we want
                        // chose buoy
                        if (valid[0] && valid[1])
                            curr_index = buoy_index;
                        else
                            curr_index = valid[0] ? 0 : 1;

                        printf ("Stopped: Identified buoy %d (%s) as target\n", curr_index, color_int_to_string(color[curr_index]).c_str());
                        move (RIGHT, ang_x[curr_index]);
                        
                        timer.restart();
                        master_timer.restart();
                        prev_color = color[curr_index];
                        prev_time = -1;
                        buoy_vision.clear_frames();
                        n_valid_color_find_frames = 0;
                        state = FIND_COLOR;
                    }
                }
            }
            else if (state == FIND_COLOR) {
                // stare for 6 seconds, check if the buoy color changes
                int t = timer.get_time();
                if (t >= 4) {
                    if (n_valid_color_find_frames <= 2) {
                        printf ("Find Color: Not enough good frames (%d).\n", n_valid_color_find_frames);
                        timer.restart();
                        master_timer.restart();
                        state = STARTING;
                    }
                    else if (color_cycling[curr_index] || prev_color != MV_RED) {
                        printf ("Find Color: Finished. Must approach buoy %d.\n", curr_index);
                        timer.restart();
                        master_timer.restart();
                        state = APPROACH;
                    }
                    else {
                        printf ("Find Color: Finished. No need to do buoy %d.\n", curr_index);
                        done_buoy = true;
                        return TASK_QUIT;
                    }
                }
                else if (t != prev_time) {
                    printf ("Find_Color: examined buoy %d for %d seconds.\n", curr_index, t);
                    get_data_from_frame (&buoy_vision, valid, ang_x, ang_y, range, color, curr_index);

                    if (valid[curr_index]) {  
                        if (color[curr_index] != prev_color) {
                            printf ("\tFound buoy %d as color cycling.\n", curr_index);
                            color_cycling[curr_index] = true;
                        }
                        prev_time = t;
                        n_valid_color_find_frames++;
                    } 
                }
            }
            else if (state == APPROACH) {
                get_data_from_frame (&buoy_vision, valid, ang_x, ang_y, range, color, curr_index);
                if (valid[curr_index]) {
                    printf ("Approach[%d]: range=%d, ang_x=%d\n", curr_index, range[curr_index], ang_x[curr_index]);
                    if (range[curr_index] > 100) { // long range = more freedom to turn/sink
                        if (abs(ang_x[curr_index]) > 5) {
                            set(SPEED, 0);
                            move(RIGHT, ang_x[curr_index]);
                            buoy_vision.clear_frames();
                        }    
                        /*else if (tan(ang_y[curr_index])*range[curr_index] > 50) { // depth in centimeters
                            set(SPEED, 0);
                            move (SINK, 25); // arbitrary rise for now
                        }*/
                        else {
                            set(SPEED, 1);
                        }
                    }
                    else {
                        if (abs(ang_x[curr_index]) > 10) {
                            set(SPEED, 0);
                            move(RIGHT, ang_x[curr_index]);
                            buoy_vision.clear_frames();
                        }
                        else {
                            done_buoy = true;
                        }   
                    }

                    timer.restart();
                    master_timer.restart();
                }
                else {
                    set(SPEED, 0);
                    if (timer.get_time() > 4) { // 4 secs without valid input
                        printf ("Approach: Timeout");
                        timer.restart();
                        state = STOPPED;
                    }
                }
            }
        } // done_buoy
        else { // done_buoy
            // charge forwards, then retreat back some number of meters, then realign sub to starting attitude
            printf("Ramming buoy\n");
            timer.restart();
            while (timer.get_time() < 2)
                move(FORWARD, 2);
            stop();

            // retreat backwards
            printf("Reseting Position\n");
            timer.restart();
            while (timer.get_time() < 3)
                move(REVERSE, 2);
            stop();

            ret_code = TASK_DONE;
            break;
        }

        // Ensure debug messages are printed
        fflush(stdout);
        // Exit if instructed to
        char c = cvWaitKey(TASK_WK);
        if (c != -1) {
            CharacterStreamSingleton::get_instance().write_char(c);
        }
        if (CharacterStreamSingleton::get_instance().wait_key(1) == 'q'){
            stop();
            ret_code = TASK_QUIT;
            break;
        }
    }

    return ret_code;
}
Esempio n. 6
0
void DeleteAll8TypeArea(IplImage* srcImage,IplImage* binaryImage,vector< vector<CvPoint> >& AllOutline) 
{
	int i,j,k,m,n;
	int direction[8][2] = {{-1,0},{-1,1},{0,1},{1,1},{1,0},{1,-1},{0,-1},{-1,-1}}; //  在图像中顺时针旋转
	bool **flag;                      // need to be released
	flag=new bool*[binaryImage->height];
    for(i=0;i<binaryImage->height;++i)
	{
		flag[i]=new bool[binaryImage->width];
		memset(flag[i],false,sizeof(bool)*binaryImage->width);
	}	
	for(i=0;i<binaryImage->height;++i)
	{
		for(j=0;j<binaryImage->width;++j)
		{
			int val=(byte)binaryImage->imageData[i*binaryImage->widthStep+j];
			if(val!=0) continue;
			for(k=0;k<GetArraySize(direction);++k)
			{
				int m=i+direction[k][0];
				int n=j+direction[k][1];
				if(m<0||n<0||m>=binaryImage->height||n>=binaryImage->width)
					continue;
				val=(byte)binaryImage->imageData[m*binaryImage->widthStep+n];
				if(val==0)
					break;
			}
			if(k==GetArraySize(direction))
				binaryImage->imageData[i*binaryImage->widthStep+j]=(unsigned char)255;
		}
	}
	for(i=0;i<binaryImage->height;++i)
		for(j=0;j<binaryImage->width;++j)
		{
			if(IsBoundary(binaryImage,cvPoint(j,i)) == false )
				continue;
			if(flag[i][j]) continue;
            vector<CvPoint> outline;
			outline.push_back(cvPoint(j,i));
			flag[i][j] = true;
			CvPoint StartPoint=cvPoint(j,i);
			int preDir=0;
            while ( !outline.empty() )
            {
				CvPoint pt=outline.back();
				int times;
				for(k=preDir,times=0;times<GetArraySize(direction);k=(k+1)%GetArraySize(direction),++times)
				{
					m=pt.y+direction[k][0];
					n=pt.x+direction[k][1];
					if(m<0||m>=binaryImage->height || n<0 || n>= binaryImage->width)
						continue;
					int val=(unsigned char)binaryImage->imageData[m*binaryImage->widthStep+n];
					if(val == 255)
						continue;
					if(flag[m][n] == false)
					{
						outline.push_back(cvPoint(n,m));
						flag[m][n]=true;
						preDir=k-GetArraySize(direction)/4;
						preDir=(preDir+GetArraySize(direction))%GetArraySize(direction);
						break;
					}
					if( cvPoint(n,m)== StartPoint )
					{
						AllOutline.push_back(outline);
						{							
							for(int i=0;i<outline.size();++i)
							{
								CvPoint pt=outline[i];
								srcImage->imageData[pt.y*srcImage->widthStep+pt.x]=0;
							}
							printf("first point:%d %d\n",outline[0].y,outline[0].x);
							cvShowImage("srcImage2",srcImage);
							cvWaitKey();
						}
                        outline.clear();
						break;
					}
					else
					{
						vector<CvPoint> SmallOutline;
						CvPoint temp=outline.back();
						vector<CvPoint>::reverse_iterator rit=outline.rbegin();
						while (*rit != temp)
						     ++rit;
						copy(outline.rbegin(),rit,back_insert_iterator< vector<CvPoint> >(SmallOutline));
						if(SmallOutline.size()>1)
						{
							AllOutline.push_back(SmallOutline);
							for(int i=0;i<SmallOutline.size();++i)
							{
								CvPoint pt=SmallOutline[i];
								srcImage->imageData[pt.y*srcImage->widthStep+pt.x]=0;
							}
							printf("first point:%d %d\n",SmallOutline[0].y,SmallOutline[0].x);
							cvShowImage("srcImage2",srcImage);
							cvWaitKey();
						}
					    outline.erase((++rit).base(),outline.end());
						binaryImage->imageData[temp.y*binaryImage->widthStep+temp.x]=(unsigned char)255;
						break;
					}
				}
			}
		}
	for(i=0;i<binaryImage->height;++i)
		delete [](flag[i]);
	delete []flag;

}
Esempio n. 7
0
void ForegroundSeperation(RGBTYPE* src,RGBTYPE* dst){
	UINT8T* markers, *m;
	UINT32T** pt;
	UINT32T i,j,pos,en = 256, qt;
	UINT8T active_queue;
	UINT8T t,dr,db,dg,lab,cnt;
	UINT16T idx;
	UINT16T ha[C],he[R],right;
	
    //WSQ q[SIZE];//储存mask和图像坐标的队列
    WSQ* q;
	#ifdef WIN32
	UINT8T pRGB[SIZE * 3]; 
	CvPoint start_pt;
	CvPoint end_pt;
	IplImage *img;
#endif
    q = (WSQ*)malloc(sizeof(WSQ)*SIZE);
    markers = (UINT8T*)malloc(sizeof(UINT8T)*SIZE);
	for(i = 0; i != SIZE; i++)//初始化队列
	{q[i].next = i; q[i].flag = 0;q[i].en = 0; q[i].pos = 0;}
	//这里改marker
	for( i = 0; i != R; i++)
		for( j = 0; j != C; j++)
		{
			if(i == 0 || j == 0 || i == R-1 || j == C-1)
				markers[i*C+j] = WSHED;
			else if( i > R/2 && j < C/3 )
				markers[i*C+j] = FGND;
			else if( i < R/20 || j > C/2 || (j>30)&&(i>(-(j*0.4)+660)))
				//
				markers[i*C+j] = BGND;
			else
				markers[i*C+j] = 0;
		}
		showImage_1ch(markers,"marker");
	//draw a pixel-wide border of dummy "watershed" (i.e. boundary) pixels
	//for( j = 0; j < C; j++)
		//markers[j] = markers[j+(R-1)*C] = WSHED;
    // initial phase: put all the neighbor pixels of each marker to the ordered queue -
    // determine the initial boundaries of the basins
	for( i = 1; i < R-1; i++)
	{
		//markers[i*C] = markers[i*C+C-1] = WSHED;
		for( j = 1; j < C-1; j++)
		{
			m = markers + i*C+j;
			if( m[0]  > 2 ) m[0] = 0;
			if( m[0] == 0 && ( (m[-1] < 3 && m[-1] > 0) || (m[1] < 3 && m[1] > 0) || (m[-C] < 3 && m[-C] > 0) || (m[C] < 3 && m[C] > 0) ))
			{
				pos = i*C + j;
				idx = 256;
				if( m[-1] < 3 && m[-1] > 0 )
					c_diff(src[pos],src[pos-1],idx);
				if( m[1] < 3 && m[1] > 0 )
				{
					c_diff(src[pos],src[pos+1],t);
					idx = ws_min(idx,t);
				}
				if( m[-C] < 3 && m[-C] > 0 )
				{
					c_diff(src[pos],src[pos-C],t);
					idx = ws_min(idx,t);
				}
				if( m[C] < 3 && m[C] > 0 )
				{
					c_diff(src[pos],src[pos+C],t);
					idx = ws_min(idx,t);
				}
				ws_push(idx, pos);

				m[0] = IN_QUEUE;
			}
		}
	}
// find the first non-empty queue
  	for( i = 0; i < NQ; i++)
		if(q[i].flag)
			break;
	if( i == NQ )
		return;

	active_queue = i;
	m = markers;
	while(1)
	{
		lab = 0;
		if(!q[active_queue].flag)
		{
			for( i = active_queue+1; i<NQ; i++)
				if( q[i].flag)
					break;
			if( i == NQ)
				break;
			active_queue = i;
		}
		ws_pop(active_queue,pos);

		m = markers + pos;
		t = m[-1];
		if( t < 3 && t > 0 ) lab = t;
		t = m[1];
        if( t < 3 && t > 0 )
        {
            if( lab == 0 ) lab = t;
            else if( t != lab ) lab = WSHED;
        }
        t = m[-C];
        if( t < 3 && t > 0 )
        {
            if( lab == 0 ) lab = t;
            else if( t != lab ) lab = WSHED;
        }
        t = m[C];
        if( t < 3 && t > 0 )
        {
            if( lab == 0 ) lab = t;
            else if( t != lab ) lab = WSHED;
        }
		if(lab == 0)
			lab = 0;
		m[0] = lab;
		if( !(lab == FGND || lab == BGND))
			continue;
		if( m[-1] == 0 )
		{
			c_diff( src[pos], src[pos-1],t);
			pos--;
			ws_push(t,pos); 
			pos++;
			active_queue = ws_min(active_queue,t);
			m[-1] = IN_QUEUE;
		}
		if( m[1] == 0 )
		{
			c_diff( src[pos], src[pos+1],t);
			pos++;
			ws_push(t,pos);
			pos--;
			active_queue = ws_min(active_queue,t);
			m[1] = IN_QUEUE;
		}
		if( m[-C] == 0 )
		{
			c_diff( src[pos], src[pos-C],t);
			pos-=C;
			ws_push(t,pos);
			pos+=C;
			active_queue = ws_min(active_queue,t);
			m[-C] = IN_QUEUE;
		}
		if( m[C] == 0 )
		{
			c_diff( src[pos], src[pos+C],t);
			pos+=C;
			ws_push(t,pos);
			pos-=C;
			active_queue = ws_min(active_queue,t);
			m[C] = IN_QUEUE;
		}
	}
	for( i = 0; i < R; i++)
		for( j = 0; j < C; j++)
		{
			pos = i*C+j;
			if(markers[pos] == BGND)
			{
				dst[pos].r = 0;
				dst[pos].g = 0;
				dst[pos].b = 0;
			}
			else if(markers[pos] == WSHED)
			{
				dst[pos].r = 255;
				dst[pos].g = 0;
				dst[pos].b = 0;
			}
			else dst[pos] = src[pos];
		}
	for( i = 0; i < C; i++ ) ha[i] = 0;
	for( i = 0; i < R; i++ ) he[i] = 0;
	for( i = 1; i < C-1; i++)
		for( j = 1; j < R-1; j++ )
			if(markers[j*C+i] == WSHED)
			{ha[i]++;he[j]++;}
	//for( i = 0; i < C; i++) printf("%d   ",ha[i]);
	//printf("\n");
	//for( i = 0; i < R; i++) printf("%d   ",he[i]);

	//现在检测右边界!
	for( i = 1; i < C-1; i++)
	{
		if(ha[i]<10)
		{
			cnt = 0;
			continue;
		}
		else{
			if(++cnt > 3)
			{right = i-2;break;}
		}
	}

#ifdef WIN32
	start_pt = cvPoint(right,1);
	end_pt = cvPoint(right,R-1);
	memset(pRGB, 0, SIZE * 3); //初始化
	for (i = 0; i < R; i++)  //int不够,必须二重循环
	{
		for (j = 0; j < C; j++)
		{
			pRGB[i*C * 3 + j * 3] = dst[i*C + j].b;
			pRGB[i*C * 3 + j * 3 + 1] = dst[i*C + j].g;
			pRGB[i*C * 3 + j * 3 + 2] = dst[i*C + j].r;
		}
	}
	img = cvCreateImageHeader(cvSize(C,R),IPL_DEPTH_8U,3);
	cvSetData(img,pRGB,3*C);
	cvLine(img, start_pt, end_pt, CV_RGB(0, 0, 255), 1, CV_AA, 0);
#endif
	//现在检测上边界!!
	for( i = 0; he[i] == 0;i++);
	up_st.y = i;
	up_st.x = 1;
	cnt = 0;
	for( i = up_st.y; i < R-1; i++)
	{
		if(he[i] != 1)
		{cnt = 0; continue;}
		else {
			cnt++;
			if( cnt > 3 )
				break;
		}
	}
	up_en.y = i-2;
	for( i = 1; i < C-1; i++ )
		if( markers[up_en.y*C+i] == WSHED)
			break;
	up_en.x = i;
#ifdef WIN32
	start_pt = cvPoint(1,up_st.y);
	end_pt = cvPoint(i,up_en.y);
	cvLine(img, start_pt, end_pt, CV_RGB(0, 0, 255), 1, CV_AA, 0);
	cvNamedWindow("hahaha", 0);
	cvShowImage("hahaha", img);
	cvWaitKey(0);
#endif
	//不准的下边界
/*	for( i = up_en.y; i < R-1; i++){
		if(he[i] == 1)
		{cnt = 0; continue;}
		else{
			cnt++;
			if(cnt == 3)
				break;
		}
	}
	down_st.y = i - 3;
	for( i = 1; i < C-1; i++)
		if(markers[i+down_st.y*C] == WSHED) break;
	down_st.x = i;
	for( i = down_st.y; he[i]; i++);
	down_en.y = i-1;
	for( i = 1; i < C-1; i++)
		if(markers[i+down_en.y*C] == WSHED) break;
	down_en.x = i;
#ifdef WIN32
	start_pt = cvPoint(down_st.x,down_st.y);
	end_pt = cvPoint(down_en.x,down_en.y);
	printf("%d %d %d %d",down_st.x,down_st.y,down_en.x,down_en.y);
	cvLine(img, start_pt, end_pt, CV_RGB(0, 0, 255), 1, CV_AA, 0);
	cvNamedWindow("hahaha", 0);
	cvShowImage("hahaha", img);
	cvWaitKey(0);
#endif*/
	free(markers);
	free(q);
}
int main(int argc, char* argv[])
{
	// Set up variables
	CvPoint2D32f srcTri[3], dstTri[3];
	CvMat* rot_mat = cvCreateMat(2,3,CV_32FC1);
	CvMat* warp_mat = cvCreateMat(2,3,CV_32FC1);
	IplImage *src, *dst;
	const char* name = "Affine_Transform";

	// Load image
	src=cvLoadImage("airplane.jpg");
	dst = cvCloneImage( src );
	dst->origin = src->origin;
	cvZero( dst );
	cvNamedWindow( name, 1 );

	// Create angle and scale
	double angle = 0.0;
	double scale = 1.0;

	// Create trackbars
	cvCreateTrackbar( "Angle", name, &angle_switch_value, 4, switch_callback_a );
	cvCreateTrackbar( "Scale", name, &scale_switch_value, 4, switch_callback_s );

	// Compute warp matrix
	srcTri[0].x = 0;
	srcTri[0].y = 0;
	srcTri[1].x = src->width - 1;
	srcTri[1].y = 0;
	srcTri[2].x = 0;
	srcTri[2].y = src->height - 1;

	dstTri[0].x = src->width*0.0;
	dstTri[0].y = src->height*0.25;
	dstTri[1].x = src->width*0.90;
	dstTri[1].y = src->height*0.15;
	dstTri[2].x = src->width*0.10;
	dstTri[2].y = src->height*0.75;

	cvGetAffineTransform( srcTri, dstTri, warp_mat );
	cvWarpAffine( src, dst, warp_mat );
	cvCopy ( dst, src );

	while( 1 ) {
		switch( angleInt ){
			case 0:
				angle = 0.0;
				break;
			case 1:
				angle = 20.0;
				break;
			case 2:
				angle = 40.0;
				break;
			case 3:
				angle = 60.0;
				break;
			case 4:
				angle = 90.0;
				break;
		}
		switch( scaleInt ){
			case 0:
				scale = 1.0;
				break;
			case 1:
				scale = 0.8;
				break;
			case 2:
				scale = 0.6;
				break;
			case 3:
				scale = 0.4;
				break;
			case 4:
				scale = 0.2;
				break;
		}

		// Compute rotation matrix
		CvPoint2D32f center = cvPoint2D32f( src->width/2, src->height/2 );
		cv2DRotationMatrix( center, angle, scale, rot_mat );

		// Do the transformation
		cvWarpAffine( src, dst, rot_mat );

		cvShowImage( name, dst );

		if( cvWaitKey( 15 ) == 27 )
			break;
	}

	cvReleaseImage( &dst );
	cvReleaseMat( &rot_mat );
	cvReleaseMat( &warp_mat );

	return 0;
}
Esempio n. 9
0
int main(int argc, char *argv[ ]){

	RASPIVID_CONFIG * config = (RASPIVID_CONFIG*)malloc(sizeof(RASPIVID_CONFIG));
	
	config->width=320;
	config->height=240;
	config->bitrate=0;	// zero: leave as default
	config->framerate=0;
	config->monochrome=0;

	int opt;

	while ((opt = getopt(argc, argv, "lxm")) != -1)
	{
		switch (opt)
		{
			case 'l':					// large
				config->width = 640;
				config->height = 480;
				break;
			case 'x':	   				// extra large
				config->width = 960;
				config->height = 720;
				break;
			case 'm':					// monochrome
				config->monochrome = 1;
				break;
			default:
				fprintf(stderr, "Usage: %s [-x] [-l] [-m] \n", argv[0], opt);
				fprintf(stderr, "-l: Large mode\n");
				fprintf(stderr, "-x: Extra large mode\n");
				fprintf(stderr, "-l: Monochrome mode\n");
				exit(EXIT_FAILURE);
		}
	}

	/*
	Could also use hard coded defaults method: raspiCamCvCreateCameraCapture(0)
	*/
    RaspiCamCvCapture * capture = (RaspiCamCvCapture *) raspiCamCvCreateCameraCapture2(0, config); 
	free(config);
	
	CvFont font;
	double hScale=0.4;
	double vScale=0.4;
	int    lineWidth=1;

	cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale, vScale, 0, lineWidth, 8);

	cvNamedWindow("RaspiCamTest", 1);
	int exit =0;
	do {
		IplImage* image = raspiCamCvQueryFrame(capture);
		
		char text[200];
		sprintf(
			text
			, "w=%.0f h=%.0f fps=%.0f bitrate=%.0f monochrome=%.0f"
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_FRAME_WIDTH)
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_FRAME_HEIGHT)
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_FPS)
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_BITRATE)
			, raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_MONOCHROME)
		);
		cvPutText (image, text, cvPoint(05, 40), &font, cvScalar(255, 255, 0, 0));
		
		sprintf(text, "Press ESC to exit");
		cvPutText (image, text, cvPoint(05, 80), &font, cvScalar(255, 255, 0, 0));
		
		cvShowImage("RaspiCamTest", image);
		
		char key = cvWaitKey(10);
		
		switch(key)	
		{
			case 27:		// Esc to exit
				exit = 1;
				break;
			case 60:		// < (less than)
				raspiCamCvSetCaptureProperty(capture, RPI_CAP_PROP_FPS, 25);	// Currently NOOP
				break;
			case 62:		// > (greater than)
				raspiCamCvSetCaptureProperty(capture, RPI_CAP_PROP_FPS, 30);	// Currently NOOP
				break;
		}
		
	} while (!exit);

	cvDestroyWindow("RaspiCamTest");
	raspiCamCvReleaseCapture(&capture);
	return 0;
}
Esempio n. 10
0
void my_mouse_callback( int event, int x, int y, int flags, void* param )
{
	unsigned int hsv[3] = {};

	for(k=0;k<channels;k++)
	{
		char c = 0;

		if (k==0)
			c = 'H';

		else if (k==1)
			c='S';

		else
			c='V';

		hsv[k] = hsv_data[y*step+x*channels+k];
		//printf("%c: %d\t", c, hsv[k]);
	}

	// Save <H,S,V>
	if (event == CV_EVENT_LBUTTONDOWN)
	{
		hues[recorded_count] = hsv[0];
		sats[recorded_count] = hsv[1];
		vals[recorded_count] = hsv[2];
		++recorded_count;
	}

	// Draw new image with whites in places that match
	else if (event == CV_EVENT_RBUTTONDOWN)
	{
		h_min = getMinFromArray(hues,recorded_count);
		s_min = getMinFromArray(sats,recorded_count);
		v_min = getMinFromArray(vals,recorded_count);

		h_max = getMaxFromArray(hues,recorded_count);
		s_max = getMaxFromArray(sats,recorded_count);
		v_max = getMaxFromArray(vals,recorded_count);
		
		IplImage* img = cvCreateImage(cvSize(width,height), IPL_DEPTH_8U, 1);
		cvInRangeS(img_hsv,cvScalar(h_min,s_min,v_min,0),cvScalar(h_max,s_max,v_max,0),img);

		int count = cvCountNonZero(img);
		printf("count: %d\n", count);

		cvNamedWindow("secondwindow",CV_GUI_EXPANDED);
		cvShowImage("secondwindow",img);
		key = 0;

		while (key != 's')
		{
			key = cvWaitKey(0);			
		}

		memset(hues,0,recorded_count);
		memset(sats,0,recorded_count);
		memset(vals,0,recorded_count);
		recorded_count=0;

		cvReleaseImage(&img);
		cvDestroyWindow("secondwindow");
	}
	
	//printf("\n");	
}
Esempio n. 11
0
int main(int argc, char** argv)
{
	//声明IplImage指针
	IplImage* pFrame = NULL;
	IplImage* pFrImg = NULL;
	IplImage* pBkImg = NULL;

	CvMat* pFrameMat = NULL;
	CvMat* pFrMat = NULL;
	CvMat* pBkMat = NULL;

	CvCapture* pCapture = NULL;

	int nFrmNum = 0;

	//创建窗口
	cvNamedWindow("video", 1);
	cvNamedWindow("background", 1);
	cvNamedWindow("foreground", 1);
	//使窗口有序排列
	cvMoveWindow("video", 30, 0);
	cvMoveWindow("background", 360, 0);
	cvMoveWindow("foreground", 690, 0);



	if (argc > 2)
	{
		fprintf(stderr, "Usage: bkgrd [video_file_name]\n");
		return -1;
	}

	//打开摄像头
	if (argc == 1)
		if (!(pCapture = cvCaptureFromCAM(-1)))
		{
			fprintf(stderr, "Can not open camera.\n");
			return -2;
		}

	//打开视频文件
	if (argc == 2)
		if (!(pCapture = cvCaptureFromFile(argv[1])))
		{
			fprintf(stderr, "Can not open video file %s\n", argv[1]);
			return -2;
		}

	//逐帧读取视频
	while (pFrame = cvQueryFrame(pCapture))
	{
		nFrmNum++;

		//如果是第一帧,需要申请内存,并初始化
		if (nFrmNum == 1)
		{
			pBkImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U, 1);
			pFrImg = cvCreateImage(cvSize(pFrame->width, pFrame->height), IPL_DEPTH_8U, 1);

			pBkMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
			pFrMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);
			pFrameMat = cvCreateMat(pFrame->height, pFrame->width, CV_32FC1);

			//转化成单通道图像再处理
			cvCvtColor(pFrame, pBkImg, CV_BGR2GRAY);
			cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);

			cvConvert(pFrImg, pFrameMat);
			cvConvert(pFrImg, pFrMat);
			cvConvert(pFrImg, pBkMat);
		}
		else
		{
			cvCvtColor(pFrame, pFrImg, CV_BGR2GRAY);
			cvConvert(pFrImg, pFrameMat);
			//高斯滤波先,以平滑图像
			//cvSmooth(pFrameMat, pFrameMat, CV_GAUSSIAN, 3, 0, 0);

			//当前帧跟背景图相减
			cvAbsDiff(pFrameMat, pBkMat, pFrMat);

			//二值化前景图
			cvThreshold(pFrMat, pFrImg, 60, 255.0, CV_THRESH_BINARY);

			//进行形态学滤波,去掉噪音  
			//cvErode(pFrImg, pFrImg, 0, 1);
			//cvDilate(pFrImg, pFrImg, 0, 1);

			//更新背景
			cvRunningAvg(pFrameMat, pBkMat, 0.003, 0);
			//将背景转化为图像格式,用以显示
			cvConvert(pBkMat, pBkImg);

			//显示图像
			cvShowImage("video", pFrame);
			cvShowImage("background", pBkImg);
			cvShowImage("foreground", pFrImg);

			//如果有按键事件,则跳出循环
			//此等待也为cvShowImage函数提供时间完成显示
			//等待时间可以根据CPU速度调整
			if (cvWaitKey(2) >= 0)
				break;


		}

	}




	//销毁窗口
	cvDestroyWindow("video");
	cvDestroyWindow("background");
	cvDestroyWindow("foreground");

	//释放图像和矩阵
	cvReleaseImage(&pFrImg);
	cvReleaseImage(&pBkImg);

	cvReleaseMat(&pFrameMat);
	cvReleaseMat(&pFrMat);
	cvReleaseMat(&pBkMat);

	cvReleaseCapture(&pCapture);

	return 0;
}
Esempio n. 12
0
int _tmain(int argc, _TCHAR* argv[])
{
	/*

	Начальная инициализация параметров. этот 
	код требуется запустить один раз. 

	TVAInitParams params;
	memcpy(&params.Camera, &g_camera, sizeof(TVACamera));
	params.NumZones = 0;
	params.EventSens = 0.5;
	params.EventTimeSens = 1000;
	SaveInitParams("params.xml", &params);
	*/
	// инициализация зон наблюдения. 
	for (int i = 0; i < C_MAX_OBJECTS; i++)
	{
		g_contours[i].IsRect = false;
		g_contours[i].NumPoints = C_MAX_POINTS;
		g_contours[i].Points = (TVAPoint*)malloc(C_MAX_POINTS*sizeof(TVAPoint));
	}
	
	cvInitFont(&g_font, CV_FONT_HERSHEY_PLAIN,1, 1);

	CvCapture* capture = NULL;
	if (argc < 2)
		capture = cvCaptureFromCAM(0);
	else
		capture = cvCaptureFromFile(argv[1]);
	

	if (capture == NULL)
	{
		printf("%s\n", "Cannot open camera.");
		return -1;
	}

    double fps = cvGetCaptureProperty ( // Получаем частоту кадров
        capture,
        CV_CAP_PROP_FPS
    );

    CvSize size = cvSize( // Получаем размер
       (int)cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH),
       (int)cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT)
    );
	g_mask = cvCreateImage(size, IPL_DEPTH_8U, 1);
	CvVideoWriter* writer = NULL;
	cvNamedWindow(_MODULE_);
	cvSetMouseCallback(_MODULE_, on_mouse);

	/*
		Цикл получения и обработки изображения. 
	*/
	for (;;) 
	{
		IplImage* frame = NULL;
		frame = cvQueryFrame(capture);
		if (!frame)
			break;
		/*
		 отрисовка прямоугольников
		*/
		for (int i = 0; i < g_rects_count; i++)
		{
			CvPoint p1 = cvPoint(g_rects[i].x, g_rects[i].y);
			CvPoint p2 = cvPoint(p1.x + g_rects[i].width, p1.y + g_rects[i].height);
			
			cvRectangle(frame, p1, p2, CV_RGB(255,0,0));
		}

		/*
			Отрисовка зон наблюдения. 
		*/
		for (int i = 0; i < g_contours_count; i++)
		{
			if (g_contours[i].NumPoints > 0)
			{
				for (int j = 1; j < g_contours[i].NumPoints; j++)
				{
					CvPoint p1 = cvPoint((int)g_contours[i].Points[j-1].X, (int)g_contours[i].Points[j-1].Y);
					CvPoint p2 = cvPoint((int)g_contours[i].Points[j].X, (int)g_contours[i].Points[j].Y);
					cvLine(frame, p1,p2, CV_RGB(255,0,0));
				}
				CvPoint p1 = cvPoint((int)g_contours[i].Points[g_contours[i].NumPoints-1].X, (int)g_contours[i].Points[g_contours[i].NumPoints-1].Y);
				CvPoint p2 = cvPoint((int)g_contours[i].Points[0].X, (int)g_contours[i].Points[0].Y);
				cvLine(frame, p1,p2, CV_RGB(255,0,0));			
			}
		}

		/*
			Отображение полученного изображения в окне. 
		*/
		ProcessFrame(frame);
		if (g_grid_visible)
			DrawGrid(frame);
		DrawStatus(frame);
		cvShowImage(_MODULE_, frame);
		/*
			Запись фрейма
		*/
		if (g_record_video)
			cvWriteFrame( writer, frame );

		/*
			Анализ клавиатуры
		*/
		bool state = g_set_rects || g_set_contours || g_set_zones;
		int c;
		c = cvWaitKey(10);
		if ((char)c == 27)
			break;

		if ((char)c == 's')
		{
			cvSaveImage("out.png", frame);
		}
		else if ((char)c == 'l')
		{
			if (!state)
				LoadRects(size.width, size.height);
		}
		else if ((char)c == 'g')
		{
			if (!state)
				LoadContours(size.width, size.height);
		}
		else if ((char)c == 'k')
		{
			if (!state)
				LoadZones(size.width, size.height);
		}
		else if ((char)c == 'r')
   		{
			if (g_record_video)
			{
			    // завершаем запись на диск
				cvReleaseVideoWriter( &writer );
				writer = NULL;
				g_record_video = false;
				printf("Stop recording.\n");
			}
			else
			{
				// открываем файл для записи и связываем с ним 
				// переменную writer
				writer = cvCreateVideoWriter("out.avi",CV_FOURCC('D','I','V','X'), fps, size );

				if (writer == NULL)
				{
					printf("%s\n", "Cannot create writer.");
				}
				else
				{
					g_record_video = true;
					printf("Start recording.\n");

				}
			}
		}
		else if ((char)c == 't')
		{
			if (g_set_rects)
			{
				SaveRects(size.width, size.height);
				if (!g_set_zones)
				{
					g_rects_count = 0;
					ClearMask(g_mask);
				}
				g_set_rects = false;
			}
			else if (!g_set_contours)
			{
				g_set_rects = true;
			}
		}
		else if ((char)c == 'c')
		{
			
			if (g_set_contours)
			{
				SaveContours(size.width, size.height);		
				if (!g_set_zones)
				{
					g_contours_count = 0;
					ClearMask(g_mask);
				}
				g_set_contours = false;
				g_open_contour = false;
			}
			else if (!g_set_rects)
			{
				g_set_contours = true;
			}
		}
		else if ((char)c == 'z')
		{
			if (g_set_zones)
			{
				SaveZones(size.width, size.height);
				g_set_zones = false;
				g_contours_count = 0;
				g_rects_count = 0;
				ClearMask(g_mask);
			}
			else if (!g_set_rects && !g_set_contours)
			{
				g_set_zones = true;
			}
		}
		else if ((char)c == 'w')
		{
			g_grid_visible = !g_grid_visible;
		}
	}

	cvReleaseVideoWriter( &writer );
	cvDestroyWindow(_MODULE_);
	cvReleaseCapture(&capture);
	cvReleaseImage(&g_mask);
	// освобождение памяти 
	for (int i = 0; i < C_MAX_OBJECTS; i++)
	{
		free(g_contours[i].Points);
	}

	return 0;
}
Esempio n. 13
0
void initialize(int argc, char** argv ){
	IplImage* image;
    // Structure for getting video from camera or avi
    CvCapture* capture = 0;

    // Images to capture the frame from video or camera or from file
    IplImage *frame, *frame_copy = 0;

    // Used for calculations
    int optlen = strlen("--cascade=");

    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 ){
        cascade_name = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    }else{
    	cerr<< cascade_name << " Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n"<<endl;
    	throw BAD_ARGUMENT();;
    }

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade ){
        cerr<<"ERROR: Could not load classifier cascade\n"<<endl;
        throw BAD_ARGUMENT();
    }

    // Allocate the memory storage
   // storage = cvCreateMemStorage(0);

    // Find whether to detect the object from file or from camera.
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    else
        capture = cvCaptureFromAVI( input_name );
    cvWaitKey(2000);
    //cvSetCaptureProperty(capture, CV_CAP_PROP_POS_AVI_RATIO, (double)0.9);
    //cvSetCaptureProperty(capture,CV_CAP_PROP_FORMAT,1);
    //cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 256);
    //cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 256);
    // Create a new named window with title: result
    //cvNamedWindow( "result", 1 );

    // Find if the capture is loaded successfully or not.
    // If loaded succesfully, then:
    if( capture ){
        // Capture from the camera.
        for(;;){
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture )){
                break;
            }

            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame ){
                break;
            }

            // Allocate framecopy as the same size of the frame
            //if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

            // Check the origin of image. If top left, copy the image frame to frame_copy.
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            // Else flip and copy the image
            else
            	cvCopy( frame, frame_copy, 0 );

            extractFace(frame_copy,cascade);
            //cvShowImage("Result",frame_copy);
            // Wait for a while before proceeding to the next frame
//            cvWaitKey(20);
            if( cvWaitKey( 20 ) == 27 ) //exit if Esc is pressed(repeatedly)
                break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded successfully, then:
    else{
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

        // Load the image from that filename
        image = cvLoadImage( filename, 1 );

        // If Image is loaded successfully, then:
        if( image ){
        	extractFace(image,cascade);
        	//cvShowImage("result",image);
            // Wait for user input
            cvWaitKey(0);

            // Release the image memory
           // cvReleaseImage( &image );
        }
        else{
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f ){
                char buf[1000+1];

                // Get the line from the file
                while(fgets( buf, 1000, f )){

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded successfully, then:
                    if( image ){
						//return image;
                    	//cvShowImage("result",image);
                    	extractFace(image,cascade);
                        // Wait for the user input, and release the memory
                        //cvWaitKey(0);
                        //cvReleaseImage( &image );
                    }
                }
                // Close then file
                fclose(f);
            }
        }
    }
    // Destroy the window previously created with filename: "result"
    //cvDestroyWindow("result");
}
Esempio n. 14
0
    void Camera::calibrate(Capture* cpt, Presentation* prs){
#ifdef NO_CALIBRATION
        printf("DEBUG: camera calibration disabled!\n");

        IplImage* cf = cvQueryFrame(cpt->getCapture());

        this->width = cf->width;
        this->height = cf->height;
        this->projectorWidth = this->width;
        this->projectorHeight = this->height;
        this->projectorOrigin = cvPoint(0,0);

//        cvReleaseImage (&cf);

        return;
#endif

        printf("DEBUG: calibrating camera\n");

        CvSize nsquares = cvSize(6,4);
        CvPoint2D32f* corners = new CvPoint2D32f[ 6*4 ];
        IplImage *cb = cvLoadImage("res/chessboard.png",1);
        prs->putImage(cvPoint(0,0), cvPoint(prs->getScreenWidth(), prs->getScreenHeight()), NULL, NULL, cb);
        prs->applyBuffer();

        //IplImage *fake = cvLoadImage("fake.jpg", 1);
        //IplImage *src = cvCreateImage(cvGetSize(fake), IPL_DEPTH_8U, 1);
        //cvCvtColor(fake, src, CV_RGB2GRAY);

        IplImage *frame;
        bool patternFound = false;
        int cc;

        while (!patternFound) {
            printf("trying to find chessboard\n");
            frame = cvQueryFrame(cpt->getCapture());
            patternFound = cvFindChessboardCorners(frame, nsquares, corners, &cc,
                                                   CV_CALIB_CB_ADAPTIVE_THRESH | 
                                                   CV_CALIB_CB_FILTER_QUADS | 
                                                   CV_CALIB_CB_FAST_CHECK |
                                                   CV_CALIB_CB_NORMALIZE_IMAGE);
//            prs->putImage(cvPoint(0,0), cvPoint(prs->getScreenWidth(), prs->getScreenHeight()), frame);

//            prs->applyBuffer();
            cvWaitKey(5);
        }
        this->width = frame->width;
        this->height = frame->height;
        printf("\n");
        //float x = 2*corners[0].x-corners[1].x,
        //      y = 2*corners[0].y-corners[7].y;
        float x = corners[0].x-1.5*(corners[1].x-corners[0].x),
              y = corners[0].y-1.5*(corners[6].y-corners[0].y);

        this->projectorOrigin = cvPoint((int)x, (int)y);

        //x = 2*corners[34].x-corners[33].x;
        //y = 2*corners[34].y-corners[27].y;
        x = corners[23].x+1.5*(corners[23].x-corners[22].x);
        y = corners[23].y+1.5*(corners[23].y-corners[17].y);


        this->projectorWidth = (int)(x-this->projectorOrigin.x);
        this->projectorHeight = (int)(y-this->projectorOrigin.y);

        printf("Projector: (%d, %d): %dx%d)\n", this->projectorOrigin.x, this->projectorOrigin.y, this->projectorWidth,this->projectorHeight);

        //cpt->saveFrame("cbfound.jpg", frame);

        // improve result (thoug through testing, it seems it makes it worse - disabling)
        // could use simple linear regression to get a "better" calibration.
//        IplImage *frame_gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
//        cvFindCornerSubPix( frame_gray, corners, cc, cvSize( 11, 11 ), 
//                        cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

        // draw calibration result
        cvDrawChessboardCorners( frame, nsquares , corners, cc, patternFound );
        IplImage* bg;
        bg = cvLoadImage("res/bg.jpg", CV_LOAD_IMAGE_UNCHANGED);
        prs->putImage(cvPoint(0,0), cvPoint(prs->getScreenWidth(), prs->getScreenHeight()), NULL, NULL, bg);
        prs->applyBuffer();
        // display result longer
        cvWaitKey(2000);
        prs->clearArea(cvPoint(0,0), cvPoint(prs->getScreenWidth(), prs->getScreenHeight()));
        prs->applyBuffer();
        cvWaitKey(1000);
        
        for (int i=0; i<100; i++)
            cvQueryFrame(cpt->getCapture());
        //cvNamedWindow("Foobar", CV_WINDOW_AUTOSIZE);
        //cvShowImage("Foobar", cvQueryFrame(cpt->getCapture()));
        //cvWaitKey(500);

//        cvReleaseImage(&frame);


// generate chessboard programatically. (htf does cvFillPoly work?! - using image for now)
//        IplImage *cb = cvCreateImage(cvSize(prs->getScreenWidth(), prs->getScreenHeight()), IPL_DEPTH_8U, 1);
//        int x,y, dx = prs->getScreenWidth()/nsquares.width, dy = prs->getScreenHeight()/nsquares.height;
//        CvPoint *corners[4];
//        for (x=0; x<prs->getScreenWidth(); x+=dx) {
//            for (y=0; y<prs->getScreenHeight(); y+=dy) {
//                corners[0] = cvPoint(x,y);
//                corners[1] = cvPoint(x+dx,y);
//                corners[2] = cvPoint(x+dx,y+dy);
//                corners[3] = cvPoint(x,y+dy);
//                cvFillPoly(...)
//            }
//        }
    }
Esempio n. 15
0
int main(int argc, char** argv)
{
	IplImage* img = NULL;
	IplImage* dst = NULL;
	IplImage* dst2 = NULL;
	IplImage* dst3 = NULL;
	IplImage* dst4 = NULL;
	IplImage* dst5 = NULL;

	char* cha = "./temp/";//le dossier de base
	DIR* rep = NULL;
	struct dirent* fichierLu = NULL; /* Déclaration d'un pointeur vers la structure dirent. */
	struct dirent* repLu = NULL; /* Déclaration d'un pointeur vers la structure dirent. */
	char* ch;
	rep = opendir(cha);
	if (rep == NULL)
	{
		exit(1);
	}
	while((repLu = readdir(rep)) != NULL )
	{
		while (strcmp(repLu->d_name, ".") == 0 || strcmp(repLu->d_name, "..") == 0)
		{
			repLu = readdir(rep);
			printf("%s\n",repLu->d_name);
		}
		DIR* rep2 = NULL;
		ch = concat(cha,repLu->d_name);
		rep2 = opendir(ch);
		char* c = ch;
		c = concat(ch,"/");
		while ((fichierLu = readdir(rep2)) != NULL )
		{
			if (strcmp(fichierLu->d_name, ".") != 0 && strcmp(fichierLu->d_name, "..") != 0)
			{
				printf("Le fichier lu s'appelle %s\n", fichierLu->d_name);

				ch = concat(c,fichierLu->d_name);
				img = cvLoadImage( ch , CV_LOAD_IMAGE_COLOR);
				if (img == NULL)
				{
					printf("Oups j'ai eu un problème.\n");
					return -1;
				}

				IplImage *hsv;
				hsv = cvCloneImage(img);
				cvCvtColor(img, hsv, CV_BGR2HSV);
				IplImage *hsv2;
				hsv2 = cvCloneImage(img);
				cvCvtColor(img, hsv2, CV_BGR2HSV);
				IplImage *hsv3;
				hsv3 = cvCloneImage(img);
				cvCvtColor(img, hsv3, CV_BGR2HSV);

				IplImage *maskrouge;
				maskrouge = cvCreateImage(cvGetSize(img), img->depth, 1);
				IplImage *maskbleu;
				maskbleu = cvCreateImage(cvGetSize(img), img->depth, 1);
				IplImage *maskblanc;
				maskblanc = cvCreateImage(cvGetSize(img), img->depth, 1);

				int h1=0, s1=200, v1= 80;
				int h2=106, s2=138;
				int h3=350, s3=83;
				cvInRangeS(hsv, cvScalar(h1 -tolerance, s1 - tolerance, v1 - tolerance, 0.0), cvScalar(h1 + tolerance, s1 + tolerance, v1 + tolerance, 0.0), maskrouge);
				cvInRangeS(hsv2, cvScalar(h2 -tolerance, s2 - tolerance, 0, 0.0), cvScalar(h2 + tolerance, s2 + tolerance, 255, 0.0), maskbleu);
				cvInRangeS(hsv3, cvScalar(h3 -tolerance, s3 - tolerance, 0, 0.0), cvScalar(h3 + tolerance, s3 + tolerance, 255, 0.0), maskblanc);

				//IplConvKernel *kernel;
				//kernel = cvCreateStructuringElementEx(5, 5, 2, 2, CV_SHAPE_ELLIPSE, NULL);

				//cvDilate(maskrouge, maskrouge, kernel, 1);
				//cvDilate(maskbleu, maskbleu, kernel, 1);
				//cvDilate(maskblanc, maskblanc, kernel, 1);cvDilate(maskrouge, maskrouge, kernel, 1);
				//cvDilate(maskbleu, maskbleu, kernel, 1);
				//cvDilate(maskblanc, maskblanc, kernel, 1);
				//cvErode(maskrouge, maskrouge, kernel, 1);

				// affichage

				dst = cvCreateImage(cvSize( img->width / 6, img->height / 6 ), img->depth,img->nChannels );
				dst2 = cvCreateImage(cvSize( maskrouge->width / 6, maskrouge->height / 6 ), maskrouge->depth,maskrouge->nChannels );
				dst3 = cvCreateImage(cvSize( maskbleu->width / 6, maskbleu->height / 6 ), maskbleu->depth,maskbleu->nChannels );
				dst4 = cvCreateImage(cvSize( maskblanc->width / 6, maskblanc->height / 6 ), maskblanc->depth,maskblanc->nChannels );
				//dst5 = cvCreateImage(cvSize( hsv->width / 2, hsv->height / 2 ), hsv->depth, hsv->nChannels );

				cvResize(img, dst, CV_INTER_AREA );
				cvResize(maskrouge, dst2, CV_INTER_AREA );
				cvResize(maskbleu, dst3, CV_INTER_AREA );
				cvResize(maskblanc, dst4, CV_INTER_AREA );
				//cvResize(hsv, dst5, CV_INTER_AREA );

				cvShowImage("test1",dst);
				cvShowImage("test2",dst2);
				cvShowImage("test3",dst3);
				cvShowImage("test4",dst4);
				//cvShowImage("test5",dst5);
				cvMoveWindow("test1" ,0,0);
				cvMoveWindow("test2" ,dst->width,dst->height+60);
				cvMoveWindow("test3" ,dst->width,0);
				cvMoveWindow("test4" ,0,dst->height+60);
				//cvMoveWindow("test5" , 0, 0);

				cvWaitKey(0);

				cvReleaseImage(&maskrouge);
				cvReleaseImage(&maskbleu);
				cvReleaseImage(&maskblanc);
				cvReleaseImage(&hsv);
				cvReleaseImage(&hsv2);
				cvReleaseImage(&hsv3);
				cvReleaseImage(&img);
				cvReleaseImage(&dst);
				cvReleaseImage(&dst2);
				cvReleaseImage(&dst3);
				cvReleaseImage(&dst4);
				cvReleaseImage(&dst5);
				cvDestroyWindow("test1");
				cvDestroyWindow("test2");
				cvDestroyWindow("test3");
				cvDestroyWindow("test4");
				//cvDestroyWindow("test5");
			}
		}
	}


	if (closedir(rep) == -1)
		exit(-1);

	return 0;
}
Esempio n. 16
0
int main(int argc, char** argv)
{
    IplImage *img;
        char *file1 = "haarcascade_frontalface_alt.xml";
    char *file2 = "haarcascade_eye.xml";

    /* usage: eyedetect <image> */
    //assert(argc == 2);

    /* load the face classifier */
        cascade_f = (CvHaarClassifierCascade*)cvLoad(file1, 0, 0, 0);

    /* load the eye classifier */
    cascade_e = (CvHaarClassifierCascade*)cvLoad(file2, 0, 0, 0);

    /* setup memory storage, needed by the object detector */
    storage = cvCreateMemStorage(0);

    cv::VideoCapture cap(0);
    if(!cap.isOpened()) return -1;

    //cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    //cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);


    /* load image */
    //img = cvLoadImage(argv[1], 1);

    /* always check */
    assert(cascade_f && cascade_e && storage && img);

    cvNamedWindow("Faces", 1);
    cvNamedWindow("Eyes", 1);
    cvNamedWindow("Eyes before", 1);

    //add hough controls
    cvNamedWindow("Controls", 1);
    cvCreateTrackbar("Param 1", "Controls", &param1, 300);
    cvCreateTrackbar("Param 2", "Controls", &param2, 300);
    cvCreateTrackbar("ScaleA*10", "Controls", &scaleA, 300);
    cvCreateTrackbar("-ScaleB*10", "Controls", &scaleB, 300);

    cv::Mat frame;

    for(;;)
    {

        cap >> frame;

        IplImage tmp_img = frame;

        img = &tmp_img;

        /* detect eyes and display image */
        detectEyes(img);
        cvShowImage("Faces", img);

		if(cvWaitKey(30) == 'q') break;
    }

    cvWaitKey(0);
    cvDestroyWindow("Faces");
    cvReleaseImage(&img);

    return 0;
}