コード例 #1
0
void CXCCRA2RadarCustomizerDlg::update_previews()
{
	Cvirtual_binary d;
	m_snow_preview_control.SetBitmap(create_bitmap(scale_image(m_snow_preview, m_snow_preview_mask, d)));
	// d.export("c:/temp/snow_preview_mask.png");
	m_temperate_preview_control.SetBitmap(create_bitmap(scale_image(m_temperate_preview, m_temperate_preview_mask, d)));
	// d.export("c:/temp/temperate_preview_mask.png");
	m_urban_preview_control.SetBitmap(create_bitmap(scale_image(m_urban_preview, m_urban_preview_mask, d)));
	// d.export("c:/temp/urban_preview_mask.png");
}
コード例 #2
0
ファイル: captcha.c プロジェクト: Nerei/darknet
void test_captcha(char *cfgfile, char *weightfile)
{
    setbuf(stdout, NULL);
    srand(time(0));
    //char *base = basecfg(cfgfile);
    //printf("%s\n", base);
    network net = parse_network_cfg(cfgfile);
    set_batch_network(&net, 1);
    if(weightfile){
        load_weights(&net, weightfile);
    }
    char filename[256];
    while(1){
        //printf("Enter filename: ");
        fgets(filename, 256, stdin);
        strtok(filename, "\n");
        image im = load_image_color(filename, 200, 60);
        translate_image(im, -128);
        scale_image(im, 1/128.);
        float *X = im.data;
        float *predictions = network_predict(net, X);
        print_letters(predictions, 10);
        free_image(im);
    }
}
コード例 #3
0
ファイル: captcha.c プロジェクト: Nerei/darknet
void decode_captcha(char *cfgfile, char *weightfile)
{
    setbuf(stdout, NULL);
    srand(time(0));
    network net = parse_network_cfg(cfgfile);
    set_batch_network(&net, 1);
    if(weightfile){
        load_weights(&net, weightfile);
    }
    char filename[256];
    while(1){
        printf("Enter filename: ");
        fgets(filename, 256, stdin);
        strtok(filename, "\n");
        image im = load_image_color(filename, 300, 57);
        scale_image(im, 1./255.);
        float *X = im.data;
        float *predictions = network_predict(net, X);
        image out  = float_to_image(300, 57, 1, predictions);
        show_image(out, "decoded");
        #ifdef OPENCV
        cvWaitKey(0);
        #endif
        free_image(im);
    }
}
コード例 #4
0
ファイル: vf_screenshot.c プロジェクト: C3MA/fc_mplayer
static int put_image(struct vf_instance *vf, mp_image_t *mpi, double pts)
{
    mp_image_t *dmpi = (mp_image_t *)mpi->priv;

    if(!(mpi->flags&(MP_IMGFLAG_DIRECT|MP_IMGFLAG_DRAW_CALLBACK))){
        dmpi=vf_get_image(vf->next,mpi->imgfmt,
                                    MP_IMGTYPE_EXPORT, 0,
                                    mpi->width, mpi->height);
        vf_clone_mpi_attributes(dmpi, mpi);
        dmpi->planes[0]=mpi->planes[0];
        dmpi->planes[1]=mpi->planes[1];
        dmpi->planes[2]=mpi->planes[2];
        dmpi->stride[0]=mpi->stride[0];
        dmpi->stride[1]=mpi->stride[1];
        dmpi->stride[2]=mpi->stride[2];
        dmpi->width=mpi->width;
        dmpi->height=mpi->height;
    }

    if(vf->priv->shot) {
        if (vf->priv->shot==1)
            vf->priv->shot=0;
        gen_fname(vf->priv);
        if (vf->priv->fname[0]) {
            if (!vf->priv->store_slices)
              scale_image(vf->priv, dmpi);
            write_png(vf->priv);
        }
        vf->priv->store_slices = 0;
    }

    return vf_next_put_image(vf, dmpi, pts);
}
コード例 #5
0
ファイル: nightmare.c プロジェクト: GYZHikari/darknet
void reconstruct_picture(network net, float *features, image recon, image update, float rate, float momentum, float lambda, int smooth_size)
{
    scale_image(recon, 2);
    translate_image(recon, -1);

    image delta = make_image(recon.w, recon.h, recon.c);

    network_state state = {0};
#ifdef GPU
    state.input = cuda_make_array(recon.data, recon.w*recon.h*recon.c);
    state.delta = cuda_make_array(delta.data, delta.w*delta.h*delta.c);
    state.truth = cuda_make_array(features, get_network_output_size(net));

    forward_network_gpu(net, state);
    backward_network_gpu(net, state);

    cuda_pull_array(state.delta, delta.data, delta.w*delta.h*delta.c);

    cuda_free(state.input);
    cuda_free(state.delta);
    cuda_free(state.truth);
#else
    state.input = recon.data;
    state.delta = delta.data;
    state.truth = features;

    forward_network(net, state);
    backward_network(net, state);
#endif

    axpy_cpu(recon.w*recon.h*recon.c, 1, delta.data, 1, update.data, 1);
    smooth(recon, update, lambda, smooth_size);

    axpy_cpu(recon.w*recon.h*recon.c, rate, update.data, 1, recon.data, 1);
    scal_cpu(recon.w*recon.h*recon.c, momentum, update.data, 1);

    translate_image(recon, 1);
    scale_image(recon, .5);
    constrain_image(recon);
    free_image(delta);
}
コード例 #6
0
void rescale_filters(convolutional_layer l, float scale, float trans)
{
    int i;
    for(i = 0; i < l.n; ++i){
        image im = get_convolutional_filter(l, i);
        if (im.c == 3) {
            scale_image(im, scale);
            float sum = sum_array(im.data, im.w*im.h*im.c);
            l.biases[i] += sum*trans;
        }
    }
}
コード例 #7
0
void backend_gtk_size_allocate( gpointer _data, GtkWidget *widget, GtkAllocation *allocation )
{
   struct backend_data *data = _data;

   if( data->scale_to_fit )
   {
      data->output_width = allocation->width;
      data->output_height = allocation->height;
      if( data->pause_state )
      {
	 scale_image( _data );
	 backend_gtk_redraw( _data );
      }
   }
}
コード例 #8
0
ファイル: CPP_TestJB.cpp プロジェクト: archeos/micmac-archeos
string drawMatches_process_image(const string &aFilename, int aScale, const string &aDetectTool, const string &aDetectToolOptions)
{
	string scaledFilename = scaled_filename(aFilename, aScale);
	scale_image(aFilename, aScale, scaledFilename);

	string pointsFilename = points_filename(scaledFilename, aDetectTool);
	detect_points(scaledFilename, aDetectTool, aDetectToolOptions, pointsFilename);

	double scaleToOriginal = 100./double(aScale);
	string originalSizePointsFilename = pointsFilename;
	if (aScale != 100)
	{
		originalSizePointsFilename = original_size_points_filename(pointsFilename);
		toOriginalSize(pointsFilename, scaleToOriginal, originalSizePointsFilename);
	}

	return originalSizePointsFilename;
}
コード例 #9
0
ファイル: depth.c プロジェクト: xing2fan/x264
static int get_frame( hnd_t handle, cli_pic_t *output, int frame )
{
    depth_hnd_t *h = handle;

    if( h->prev_filter.get_frame( h->prev_hnd, output, frame ) )
        return -1;

    if( h->bit_depth < 16 && output->img.csp & X264_CSP_HIGH_DEPTH )
    {
        dither_image( &h->buffer.img, &output->img, h->error_buf );
        output->img = h->buffer.img;
    }
    else if( h->bit_depth > 8 && !(output->img.csp & X264_CSP_HIGH_DEPTH) )
    {
        scale_image( &h->buffer.img, &output->img );
        output->img = h->buffer.img;
    }
    return 0;
}
コード例 #10
0
void backend_gtk_display_image( gpointer _data )
{
   struct backend_data *data = _data;

   data->current_buffer = ( data->current_buffer + 1 ) % NUM_BUFFERS;
   
   if( data->scale_to_fit )
   {
      if( ( data->format.size.width != data->output_width ) &&
	  ( data->format.size.height != data->output_height ) )
      {
	 if( data->image_data[ data->current_buffer ] )
	 {
	    scale_image( _data );
	 }
      }
   }

   backend_gtk_redraw( _data );
}
コード例 #11
0
ファイル: nightmare.c プロジェクト: GYZHikari/darknet
void optimize_picture(network *net, image orig, int max_layer, float scale, float rate, float thresh, int norm)
{
    scale_image(orig, 2);
    translate_image(orig, -1);
    net->n = max_layer + 1;

    int dx = rand()%16 - 8;
    int dy = rand()%16 - 8;
    int flip = rand()%2;

    image crop = crop_image(orig, dx, dy, orig.w, orig.h);
    image im = resize_image(crop, (int)(orig.w * scale), (int)(orig.h * scale));
    if(flip) flip_image(im);

    resize_network(net, im.w, im.h);
    layer last = net->layers[net->n-1];
    //net->layers[net->n - 1].activation = LINEAR;

    image delta = make_image(im.w, im.h, im.c);

    network_state state = {0};

#ifdef GPU
    state.input = cuda_make_array(im.data, im.w*im.h*im.c);
    state.delta = cuda_make_array(im.data, im.w*im.h*im.c);

    forward_network_gpu(*net, state);
    copy_ongpu(last.outputs, last.output_gpu, 1, last.delta_gpu, 1);

    cuda_pull_array(last.delta_gpu, last.delta, last.outputs);
    calculate_loss(last.delta, last.delta, last.outputs, thresh);
    cuda_push_array(last.delta_gpu, last.delta, last.outputs);

    backward_network_gpu(*net, state);

    cuda_pull_array(state.delta, delta.data, im.w*im.h*im.c);
    cuda_free(state.input);
    cuda_free(state.delta);
#else
    state.input = im.data;
    state.delta = delta.data;
    forward_network(*net, state);
    copy_cpu(last.outputs, last.output, 1, last.delta, 1);
    calculate_loss(last.output, last.delta, last.outputs, thresh);
    backward_network(*net, state);
#endif

    if(flip) flip_image(delta);
    //normalize_array(delta.data, delta.w*delta.h*delta.c);
    image resized = resize_image(delta, orig.w, orig.h);
    image out = crop_image(resized, -dx, -dy, orig.w, orig.h);

    /*
       image g = grayscale_image(out);
       free_image(out);
       out = g;
     */

    //rate = rate / abs_mean(out.data, out.w*out.h*out.c);

    if(norm) normalize_array(out.data, out.w*out.h*out.c);
    axpy_cpu(orig.w*orig.h*orig.c, rate, out.data, 1, orig.data, 1);

    /*
       normalize_array(orig.data, orig.w*orig.h*orig.c);
       scale_image(orig, sqrt(var));
       translate_image(orig, mean);
     */

    translate_image(orig, 1);
    scale_image(orig, .5);
    //normalize_image(orig);

    constrain_image(orig);

    free_image(crop);
    free_image(im);
    free_image(delta);
    free_image(resized);
    free_image(out);

}
コード例 #12
0
ファイル: packed_tilegen.c プロジェクト: kyroskoh/png_tilegen
int main(int argc, char **argv) {

    int ret, tmp;
    img_t img;
    char *output_dir;
    FILE *out_file;

    if (argc < 5) {
        printf("Usage:\npacked_tilegen <input> <output_dir> <tileX> <tileY> [cardinal direction N|E|S|W\n");
        exit(1);
    }

    printf("Read file \"%s\"\n", argv[1]);
    if (strstr(argv[1], ".png"))
        ret = read_png(argv[1], &img);
    else if (strstr(argv[1], ".ppm"))
        ret = read_ppm(argv[1], &img);
    else {
        printf("Unknown format!\n");
        exit(2);
    }

    output_dir = argv[2];

    if (ret) {
        printf("Could not read png input file! %d\n", ret);
        exit(ret);
    }

    img.tileX = atoi(argv[3]);
    img.tileY = atoi(argv[4]);

    if (argc > 5) {
        switch(argv[5][0]) {
        case 'n':
        case 'N':
            // keep defaults
            break;
        case 'e':
        case 'E':
            tmp = img.tileX;
            img.tileX = img.tileY;
            img.tileY = (2<<12) - 1 - tmp;
            break;
        case 's':
        case 'S':
            img.tileX = (2<<12) - 1 - img.tileX;
            img.tileY = (2<<12) - 1 - img.tileY;
            break;
        case 'w':
        case 'W':
            tmp = img.tileX;
            img.tileX = (2<<12) - 1 - img.tileY;
            img.tileY = tmp;
            break;
        default:
            fprintf(stderr, "Unknown cardinal direction \"%s\". Default to north\n", argv[5]);
        }
    }

    // create pack file for writing
    out_file = open_packed_file(output_dir, img.tileX, img.tileY);
    tmp = 0;

    while (1) {

        // crop image at given zoom level..
        crop_image(&img, tmp++, out_file);

        scale_image(&img);

        if (img.sizeX == TILE_SIZE) {
            save_image(&img, output_dir);
            break;
        }
    }

    close_packed_file(out_file);

    return 0;
}
コード例 #13
0
int testfaceLib_pThread ( const char* str_video, int trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster )
{
    FILE* fp_imaginfo = fopen( "imaginfo.txt", "w" );

	bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	int  sampleRate =1;
	
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	int  prob_estimate[7];
	char sState[256];
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster =  true;

	CxlibFaceAnalyzer faceAnalyzer(viewAngle, (EnumTrackerType)trackerType, blink, smile, gender, age, recog, sampleRate, str_facesetxml, recognizerType, bEnableAutoCluster); 

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256] = "";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);

	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{   // smile icon
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{   // gender/age/smile icons
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}

	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );
	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType== TRA_HAAR ? "haar" : (recognizerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play    = true;
	mouse_faceparam.ret_online_collecting = 0;

	static const int MAX_FACES = 16; 
	if(! quiet)
	{
		mouse_faceparam.play    = true;
		mouse_faceparam.updated = false;
		mouse_faceparam.face_num  = faceAnalyzer.getMaxFaceNum();
		mouse_faceparam.rects     = faceAnalyzer.getFaceRects();
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= faceAnalyzer.getBigCutFace();
		mouse_faceparam.typeRecognizer = 0;
		mouse_faceparam.faceRecognizer = &faceAnalyzer;
		mouse_faceparam.ret_online_collecting = 0;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
		faceAnalyzer.setMouseParam(&mouse_faceparam);
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks;
	double tracker_fps, total_fps; 

	start_ticks         = total_ticks  = 0;
	tracker_total_ticks = 0;
		
	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			break;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		///////////////////////////////////////////////////////////////////
		// do face tracking and face recognition
		start_ticks = ticks = cvGetTickCount();	

        if( is_piclist )
            faceAnalyzer.detect(gray_image, prob_estimate, sState);
        else
		    faceAnalyzer.track(gray_image, prob_estimate, sState, image);   // track face in each frame but recognize by pthread
		//faceAnalyzer.detect(gray_image, prob_estimate, sState);// track and recognizer face in each frame 

		int face_num = faceAnalyzer.getFaceNum();

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

		
		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

		// blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		{
			// get face rect and id from face tracker
			CvRectItem rectItem = faceAnalyzer.getFaceRect(i);
			CvRect rect = rectItem.rc;
			int    face_trackid = rectItem.fid;
			float  probSmile = faceAnalyzer.getFaceSmileProb(i);
			int    bBlink  = faceAnalyzer.getFaceBlink(i);
			int    bSmile  = faceAnalyzer.getFaceSmile(i);
			int    bGender = faceAnalyzer.getFaceGender(i);
			int    nAgeID  = faceAnalyzer.getFaceAge(i);
			int    nFaceID = faceAnalyzer.getFaceID(i);
			float  fFaceProb= faceAnalyzer.getFaceProb(i);
			
			char *sFaceCaption = NULL;
			char sFaceNameBuff[256];
			char *sFaceName = faceAnalyzer.getFaceName(i);
			if(sFaceName[0] != '\0')
			{
				sprintf(sFaceNameBuff, "%s %.2f", sFaceName, fFaceProb);
				sFaceCaption = sFaceName;
				sFaceCaption = sFaceNameBuff;
			}

			if( ! quiet )
			{
				CvPoint2D32f *landmark6 = NULL;
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

				int trackid = -1; //face_trackid , don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d", rect.x, rect.y, rect.width, rect.height );
		}
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		///////////////////////////////////////////////////////////////////
		total_ticks += (cvGetTickCount() - start_ticks);
		
		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);

			CvRectItem *rects = faceAnalyzer.getFaceRects();
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
		
		//show Image
		if (image.width() <= 800)
			cvShowImage( str_title, image );
		else
		{   // display scaled smaller aimge
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey(1);
		//int key = cvWaitKey(0);
		if( key == ' ' )     // press space bar to pause the video play
			cvWaitKey( 0 );                           
		else if( key == 27 ) // press 'esc' to exit
			break;	                                   
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = faceAnalyzer.getFaceRect(0).rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   //enable flag to collect face exemplars for the selected face name
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting face exemplars
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{
				// save faceset xml model
				faceAnalyzer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );
	
	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: %.1f ", tracker_fps);

	//save updated faceset model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceAnalyzer.saveFaceModelXML("faceset_model.xml");
	}

	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight =10; 
	faceAnalyzer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceAnalyzer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);

	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );

    return 0;
}
コード例 #14
0
ファイル: gfx.c プロジェクト: mjmeehan/engine9
/***
 *	load a single frame of the player
 */
static void gfx_load_menusmall_players ()
{
    SDL_Surface *orgimg, *tmpimg;
    int i, r, g, b;
    float sfkt;
    char filename[255];

    for (i = 0; i < gfx.player_gfx_count; i++) {
        if (gfx.players[i].small_image == NULL || gfx.players[i].menu_image == NULL) {
            SDL_Surface *img;
            SDL_Rect rect;

            sprintf (filename, "%s/player/player%d.png", bman.datapath, i);
            orgimg = IMG_Load (filename);
            if (orgimg == NULL) {
                printf ("Can't load image: %s\n", SDL_GetError ());
                exit (1);
            }
            rect.x = 3 * (orgimg->w/4);
            rect.y = 0;
            rect.w = orgimg->w/4;
            rect.h = GFX_PLAYERIMGSIZE_Y;
            img = gfx_copyfrom (orgimg, &rect);

            SDL_FreeSurface (orgimg);

            /* small image */
            sfkt = (float)(((float)(GFX_SMALLPLAYERIMGSIZE_X * 2)) / ((float)img->h));
            if (gfx.players[i].small_image == NULL) {
                tmpimg = scale_image (img, (int)(((float)img->w)*sfkt), GFX_SMALLPLAYERIMGSIZE_X * 2);
                getRGBpixel (tmpimg, 0, 0, &r, &g, &b);
                SDL_SetColorKey (tmpimg, SDL_SRCCOLORKEY, SDL_MapRGB (tmpimg->format, r, g, b));
                gfx.players[i].small_image = SDL_DisplayFormat (tmpimg);
                SDL_FreeSurface (tmpimg);
            }
            /* menu image */
            sfkt = (float)(((float)(GFX_MENUPLAYERIMGSIZE_X * 2)) / ((float)img->h));
            if (gfx.players[i].menu_image == NULL) {
                tmpimg = scale_image (img, (int)(((float)img->w)*sfkt), GFX_MENUPLAYERIMGSIZE_X * 2);
                getRGBpixel (tmpimg, 0, 0, &r, &g, &b);
                SDL_SetColorKey (tmpimg, SDL_SRCCOLORKEY, SDL_MapRGB (tmpimg->format, r, g, b));
                gfx.players[i].menu_image = SDL_DisplayFormat (tmpimg);
                SDL_FreeSurface (tmpimg);
            }
            SDL_FreeSurface (img);
        }
    }

    /* load the ghost player */
    sprintf (filename, "%s/player/ghost.png", bman.datapath);
    orgimg = IMG_Load (filename);
    sfkt = (float)(((float)(GFX_MENUPLAYERIMGSIZE_X * 2)) / ((float)orgimg->h));
    gfx.ghost = scale_image (orgimg, (int)(((float)orgimg->w)*sfkt), GFX_MENUPLAYERIMGSIZE_X * 2);
    if (gfx.ghost == NULL) {
        printf ("Can't load image: %s\n", SDL_GetError ());
        exit (1);
    }
    sfkt = (float)(((float)(GFX_SMALLPLAYERIMGSIZE_X * 2)) / ((float)orgimg->h));
    gfx.ghost_small = scale_image (orgimg, (int)(((float)orgimg->w)*sfkt), GFX_SMALLPLAYERIMGSIZE_X * 2);

    SDL_FreeSurface (orgimg);
}
コード例 #15
0
ファイル: window.cpp プロジェクト: cnr-isti-vclab/piccante
void Window::zoom_in()
{
    scale_image(1.25);
}
コード例 #16
0
ファイル: gfx.c プロジェクト: mjmeehan/engine9
/***
 * load a single frame from the powerups
 */
static void gfx_load_fieldtype_menu ()
{
    int i, ft, r, g, b;
    SDL_Surface *background = NULL, *orgimg = NULL, *tmpimg = NULL;
    char filename[255];
    SDL_Rect rect;

    for (i = 0; i < FT_max; i++) {
        if (gfx.menu_field[i]!=NULL) SDL_FreeSurface (gfx.menu_field[i]);
        gfx.menu_field[i] = NULL;
    }

    rect.x = 0;
    rect.y = 0;
    rect.w = GFX_IMGSIZE;
    rect.h = GFX_IMGSIZE;

    for (ft = 0; ft < FT_max; ft++) if (ft != FT_mixed) {
            /*
             * load background image
             */
            if (ft == 0) {
                if (background != NULL) SDL_FreeSurface (background);
                sprintf (filename, "%s/tileset/default/background.png", bman.datapath);
                orgimg = IMG_Load (filename);
                if (!orgimg) {
                    printf ("Can't load image. :%s\n", SDL_GetError ());
                    exit (1);
                }

                tmpimg = gfx_copyfrom (orgimg, &rect);
                SDL_FreeSurface (orgimg);
                orgimg = scale_image (tmpimg, GFX_MENUFIELDIMGSIZE, GFX_MENUFIELDIMGSIZE);
                SDL_FreeSurface (tmpimg);
                background = SDL_DisplayFormat (orgimg);
                SDL_FreeSurface (orgimg);
            }

            if (ft == FT_death) {
                if (background != NULL) SDL_FreeSurface (background);
                sprintf (filename, "%s/tileset/default/powerbad.png", bman.datapath);
                orgimg = IMG_Load (filename);
                if (!orgimg) {
                    printf ("Can't load image. :%s\n", SDL_GetError ());
                    exit (1);
                }

                tmpimg = gfx_copyfrom (orgimg, &rect);
                SDL_FreeSurface (orgimg);
                orgimg = scale_image (tmpimg, GFX_MENUFIELDIMGSIZE, GFX_MENUFIELDIMGSIZE);
                SDL_FreeSurface (tmpimg);
                background = SDL_DisplayFormat (orgimg);
                SDL_FreeSurface (orgimg);
            }

            if (ft == FT_fire) {
                if (background != NULL) SDL_FreeSurface (background);
                sprintf (filename, "%s/tileset/default/powerup.png", bman.datapath);
                orgimg = IMG_Load (filename);
                if (!orgimg) {
                    printf ("Can't load image. :%s\n", SDL_GetError ());
                    exit (1);
                }

                tmpimg = gfx_copyfrom (orgimg, &rect);
                SDL_FreeSurface (orgimg);
                orgimg = scale_image (tmpimg, GFX_MENUFIELDIMGSIZE, GFX_MENUFIELDIMGSIZE);
                SDL_FreeSurface (tmpimg);
                background = SDL_DisplayFormat (orgimg);
                SDL_FreeSurface (orgimg);
            }

            if (ft == FT_sp_trigger) {
                if (background != NULL) SDL_FreeSurface (background);
                sprintf (filename, "%s/tileset/default/powersp.png", bman.datapath);
                orgimg = IMG_Load (filename);
                if (!orgimg) {
                    printf ("Can't load image. :%s\n", SDL_GetError ());
                    exit (1);
                }

                tmpimg = gfx_copyfrom (orgimg, &rect);
                SDL_FreeSurface (orgimg);
                orgimg = scale_image (tmpimg, GFX_MENUFIELDIMGSIZE, GFX_MENUFIELDIMGSIZE);
                SDL_FreeSurface (tmpimg);
                background = SDL_DisplayFormat (orgimg);
                SDL_FreeSurface (orgimg);
            }

            /*
             * load fieldgfx for the menu
             */
            gfx.menu_field[ft] = gfx_copyfrom (background, NULL);
            sprintf (filename, "%s/tileset/default/%s.png", bman.datapath, ft_filenames[ft]);

            orgimg = IMG_Load (filename);
            if (!orgimg) {
                printf ("Can't load image. :%s\n", SDL_GetError ());
                exit (1);
            }

            tmpimg = gfx_copyfrom (orgimg, &rect);
            SDL_FreeSurface (orgimg);
            orgimg = scale_image (tmpimg, GFX_MENUFIELDIMGSIZE, GFX_MENUFIELDIMGSIZE);
            SDL_FreeSurface (tmpimg);
            getRGBpixel (orgimg, 0, 0, &r, &g, &b);
            SDL_SetColorKey (orgimg, SDL_SRCCOLORKEY, SDL_MapRGB (orgimg->format, r, g, b));
            tmpimg = SDL_DisplayFormat (orgimg);
            SDL_FreeSurface (orgimg);
            SDL_BlitSurface (tmpimg, NULL, gfx.menu_field[ft], NULL);
            SDL_FreeSurface (tmpimg);
        }

    if (background)
        SDL_FreeSurface (background);
}
コード例 #17
0
ファイル: gfx.c プロジェクト: mjmeehan/engine9
/*
 * Load all players graphics we have
 */
void
gfx_load_players (int sx, int sy)
{
    float sfkt;
          /* ssfkt; */
    char filename[255];
    int i,
        r,
        g,
        b;
    SDL_Surface *tmpimage,
                *tmpimage1;
    sfkt = ((float) sx) / ((float) GFX_IMGSIZE);
    /*ssfkt = ((float) GFX_SMALLPLAYERIMGSIZE_X) / ((float) GFX_IMGSIZE); */

    d_printf ("gfx_load_players (%d, %d)\n", sx, sy);

    /* loading the player images */
    for (i = 0; i < gfx.player_gfx_count; i++) {
        sprintf (filename, "%s/player/player%d.png", bman.datapath, i);
        tmpimage = IMG_Load (filename);
        if (tmpimage == NULL) {
            printf ("Can't load image: %s\n", SDL_GetError ());
            exit (1);
        }

        else {
            /* load the game player image */
            gfx.players[i].ani.h = sy * 2;
            gfx.players[i].ani.w = (tmpimage->w / 4) * sfkt;
            gfx.players[i].ani.frames = tmpimage->h / GFX_PLAYERIMGSIZE_Y;

            tmpimage1 = scale_image (tmpimage, gfx.players[i].ani.w * 4, gfx.players[i].ani.frames * gfx.players[i].ani.h);
            getRGBpixel (tmpimage1, 0, 0, &r, &g, &b);
            SDL_SetColorKey (tmpimage1, SDL_SRCCOLORKEY, SDL_MapRGB (tmpimage1->format, r, g, b));
            gfx.players[i].ani.image = SDL_DisplayFormat (tmpimage1);
            SDL_FreeSurface (tmpimage1);

            /* calculate the numbers of images for the animation */
            gfx.players[i].offset.x = (sx - gfx.players[i].ani.w) / 2;
            gfx.players[i].offset.y = -sy;
            SDL_FreeSurface (tmpimage);
        }
    }

    /* load the death image */
    sprintf (filename, "%s/player/dead0.png", bman.datapath);
    tmpimage = IMG_Load (filename);
    if (tmpimage == NULL) {
        /* no image found - set field clear */
        printf ("Player Animation Could not be loaded (%s)\n", filename);
        exit (1);
    }

    gfx.dead.frames = tmpimage->h / (2* GFX_IMGSIZE);
    tmpimage1 = scale_image (tmpimage, ((2 * sx * tmpimage->w) / (2 * GFX_IMGSIZE)), gfx.dead.frames * (2 * sy));
    getRGBpixel (tmpimage1, 0, 0, &r, &g, &b);
    SDL_SetColorKey (tmpimage1, SDL_SRCCOLORKEY, SDL_MapRGB (tmpimage1->format, r, g, b));
    gfx.dead.image = SDL_DisplayFormat (tmpimage1);
    SDL_FreeSurface (tmpimage1);
    SDL_FreeSurface (tmpimage);

    /* load the illnessthing */
    sprintf (filename, "%s/player/playersick.png", bman.datapath);
    tmpimage = IMG_Load (filename);
    if (tmpimage == NULL) {
        printf ("Can't load image: %s\n", SDL_GetError ());
        exit (1);
    }
    gfx.ill.frames = tmpimage->h / (2 * GFX_IMGSIZE);
    tmpimage1 = scale_image (tmpimage, (2 * sx * tmpimage->w) / (2 * GFX_IMGSIZE), gfx.ill.frames * (2 * sy));
    getRGBpixel (tmpimage1, 0, 0, &r, &g, &b);
    SDL_SetColorKey (tmpimage1, SDL_SRCCOLORKEY, SDL_MapRGB (tmpimage1->format, r, g, b));
    gfx.ill.image = SDL_DisplayFormat (tmpimage1);
    SDL_FreeSurface (tmpimage);
    SDL_FreeSurface (tmpimage1);

    /* load the respawn gfx */
    sprintf (filename, "%s/player/respawn.png", bman.datapath);
    tmpimage = IMG_Load (filename);
    if (tmpimage == NULL) {
        printf ("Can't load image: %s\n", SDL_GetError ());
        exit (1);
    }
    gfx.respawn.frames = tmpimage->h / (2 * GFX_IMGSIZE);
    gfx.respawn.image  = scale_image (tmpimage, (2 * sx * tmpimage->w) / (2 * GFX_IMGSIZE), gfx.respawn.frames * (2 * sy));
    SDL_FreeSurface (tmpimage);
};
コード例 #18
0
SDL_Surface *slideshow::prep_image(int index)
{
	std::string filename = m_file_list->get(index);
	SDL_Surface *image = IMG_Load(filename.c_str());

	if ( !image )
	{
		return create_placeholder_image( filename, IMG_GetError() );
	}

	// convert the image to an acceptable pixel format
	// -----------------------------------------------------------------------------

	if ( !is_true_color(image) )
	{
		SDL_Surface *tc_image = convert_to_true_color(image);
		SDL_FreeSurface(image); 

		if ( !tc_image )
			return NULL;

		image = tc_image;
	}
	
	// scale the image if necessary
	// -----------------------------------------------------------------------------

	int w = image->w;
	int h = image->h;

	if (w != m_options->width)
	{
		h = (h * m_options->width) / w;
		w = m_options->width;
	}

	if (h > m_options->height)
	{
		w = (w * m_options->height) / h;
		h = m_options->height;
	}

	if ( w != image->w || h != image->h)
	{
		// scale the image
		SDL_Surface *scaled_image = scale_image(image, w, h);
		SDL_FreeSurface(image);

		if ( !scaled_image )
			return NULL;

		image = scaled_image;
	}

	// frame the scaled image, if necessary
	// -----------------------------------------------------------------------------

	if ( m_options->width != image->w || m_options->height != image->h)
	{
		SDL_Surface *framed_image = frame_image(image, m_options->width, m_options->height);
		SDL_FreeSurface(image); 

		if ( !framed_image )
			return NULL;

		image = framed_image;
	}

	// return the processed image
	// -----------------------------------------------------------------------------
	
	return image;
}
コード例 #19
0
int testfaceLib_sThread ( const char* str_video, int  trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster)
{
	int  faceimgID = 0;
	char driver[8];
	char dir[1024];
	char fname[1024];
	char ext[8];
	char sImgPath[1024];

	if(sfolder)
	{
		char sysCommand[128];
		sprintf (sysCommand, "mkdir %s", sfolder);
		system (sysCommand);

		sprintf(sImgPath, "%s//%s", sfolder,  "imaginfo.txt");
		sprintf(fname,   "%s//%s", sfolder,  "faceinfo.txt");
	}
	else
	{
		sprintf(sImgPath, "%s", "imaginfo.txt");
		sprintf(fname,   "%s", "faceinfo.txt");
	}

	FILE* fp_imaginfo = fopen( sImgPath, "wt" );
    FILE* fp_faceinfo = fopen( fname, "wt" );

    bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256]="";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);
	
	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}
	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );

	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType == TRA_HAAR ? "haar" : (trackerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// config face tracker
	const int  face_max = 16;
	CvRectItem rects[face_max];
	
	tagDetectConfig configParam;
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;

	CxlibFaceDetector detector;
	detector.init(viewAngle, (EnumFeaType)trackerType);
	detector.config( configParam );

	CxlibFaceTracker tracker;
	tracker.init(viewAngle, (EnumTrackerType)trackerType);
	tracker.config( configParam, TR_NLEVEL_3 );

	if( cvGetErrStatus() < 0 )
	{
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// config landmark detector
	CvPoint2D32f   landmark6[6+1]; // consider both 6-pt and 7-pt
	float          parameters[16];
	bool      bLandmark = false;
	CxlibLandmarkDetector landmarkDetector(LDM_6PT);

	int size_smallface = 64;
	int size_bigface   = 128;
	CxlibAlignFace cutFace(size_smallface, size_bigface);
	
	// config blink/smile/gender detector
	int    bBlink = 0, bSmile = 0, bGender = 0, bAge = 0;  //+1, -1, otherwise 0: no process 
	float  probBlink = 0, probSmile = 0, probGender = 0, probAge[4];
	int    nAgeID = 0;

	CxlibBlinkDetector  blinkDetector(size_smallface);
	CxlibSmileDetector  smileDetector(size_smallface);
	CxlibGenderDetector genderDetector(size_smallface);
	CxlibAgeDetector    ageDetector(size_bigface);

	// config face recognizer
	float probFaceID = 0;
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	CxlibFaceRecognizer faceRecognizer( size_bigface, recognizerType );
	if(recog) faceRecognizer.loadFaceModelXML(str_facesetxml);
	
	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play = true;
	mouse_faceparam.ret_online_collecting = 0;
		
	if(! quiet)
	{
		mouse_faceparam.face_num  = face_max;
		mouse_faceparam.rects     = rects;
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= cutFace.getBigCutFace();
		mouse_faceparam.typeRecognizer = 1;
		mouse_faceparam.faceRecognizer = &faceRecognizer;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks, landmark_total_ticks, align_total_ticks,
		   blink_total_ticks, smile_total_ticks, gender_total_ticks, age_total_ticks, recg_total_ticks;
	double frame_fps, tracker_fps, landmark_fps, align_fps, blink_fps, smile_fps, gender_fps, age_fps, recg_fps, total_fps; 

	start_ticks         = total_ticks          = 0;
	tracker_total_ticks = landmark_total_ticks = align_total_ticks  = 0;
	blink_total_ticks   = smile_total_ticks    = gender_total_ticks = age_total_ticks = recg_total_ticks = 0;

	tracker_fps = landmark_fps = align_fps = blink_fps = smile_fps = gender_fps = age_fps = recg_fps = total_fps = 0.0;        

	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;
	int   print_faceid=-1;
	float print_score = 0;
	std::string  print_facename;

	bool bRunLandmark = blink || smile|| gender|| age|| recog || saveface;
	IplImage *thumbnailImg   = cvCreateImage(cvSize(THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT), IPL_DEPTH_8U, 3);   
	
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster = true;
	if( is_piclist ) bEnableAutoCluster = false;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			continue;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		// do face tracking
		start_ticks = ticks = cvGetTickCount();	
       
		int face_num = 0;
        if( is_piclist )
            face_num = detector.detect( gray_image, rects, face_max );
        else
            face_num = tracker.track( gray_image, rects, face_max, image ); // track in a video for faster speed
		  //face_num = tracker.detect( gray_image, rects, face_max ); // detect in an image

		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

        // blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		//for( int i=0; i< MIN(1,face_num); i++ )
		{
			// get face rect and id from face tracker
			CvRect rect = rects[i].rc;

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d %f", rect.x, rect.y, rect.width, rect.height, rects[i].prob );

			int    face_trackid = rects[i].fid;
			float  like = rects[i].prob;
			int    angle= rects[i].angle;

			// filter out outer faces
			if( rect.x+rect.width  > gray_image.width()   || rect.x < 0 ) continue;
			if( rect.y+rect.height > gray_image.height() || rect.y < 0 ) continue;

			//tracker.getThumbnail(image, rect, thumbnailImg);

			// detect landmark points 
			ticks = cvGetTickCount();	

			if(bRunLandmark)
			{
                if( is_piclist )
				    bLandmark = landmarkDetector.detect( gray_image, &rect, landmark6, parameters, angle ); //detect in an image
                else
				    bLandmark = landmarkDetector.track( gray_image, &rect, landmark6, parameters, angle ); // track in a video for faster speed

				ticks = cvGetTickCount() - ticks;
				landmark_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
				landmark_total_ticks += ticks;
			}
			else
				bLandmark = false;

	
			if(quiet == false && bLandmark == false) 
			{
				//DrawFaceRect
				cxlibDrawFaceRect(image, rect);
				continue;
			}

			// warped align face and hist eq to delighting
			ticks = cvGetTickCount();	

			cutFace.init(gray_image, rect, landmark6);

			ticks = cvGetTickCount() - ticks;
			if(ticks > 1)
				align_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
			else
			{	align_fps = 0;
				ticks = 0;
			}
			align_total_ticks += ticks;

			if(saveface)   //save face icon for training later
			{
				//save cutfaces
				if(sfolder)
				{
#ifdef WIN32
					_splitpath(vidcap->filename(),driver,dir,fname,ext);
					sprintf(sImgPath, "%s//%s%s", sfolder, fname,ext);
#else
					sprintf(sImgPath, "%s//%06d.jpg", sfolder, faceimgID++);
#endif
				}
				else
					sprintf(sImgPath, "%s#.jpg", vidcap->filename());
				
				cvSaveImage(sImgPath, cutFace.getBigCutFace());
			}

			// detect blink
			bBlink = 0;	
			probBlink = 0;
			if(blink && bLandmark)
			{
				ticks = cvGetTickCount();	
				float blink_threshold = blinkDetector.getDefThreshold();//0.5;
				int ret = blinkDetector.predict( &cutFace, &probBlink);
			
				if(probBlink > blink_threshold )
					bBlink = 1;  //eye close
				else 
					bBlink = -1; //eye open

				ticks = cvGetTickCount() - ticks;
				blink_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				blink_total_ticks += ticks;

				print_score = probBlink;
			}
			else blink_fps = 0;

			// detect smile
			bSmile    = 0;	
			probSmile = 0;
			if ( smile && bLandmark )
			{	
				ticks = cvGetTickCount();
				float smile_threshold = smileDetector.getDefThreshold(); //0.48;  
				int ret = smileDetector.predict(&cutFace, &probSmile);

				if(probSmile > smile_threshold)
					bSmile = 1;  //smile
				else 
					bSmile = -1; //not smile

				ticks	  = cvGetTickCount() - ticks;
				smile_fps = 1000.0 /( 1e-3 * ticks / cvGetTickFrequency() );
				smile_total_ticks += ticks;

				print_score = probSmile;
			}
			else smile_fps = 0;

			//detect gender
			bGender    = 0;	
			probGender = 0;
			if(gender && bLandmark)
			{
				ticks = cvGetTickCount();	
				float gender_threshold = genderDetector.getDefThreshold(); // 0.42; 
				int ret = genderDetector.predict(&cutFace, &probGender);

				if(probGender > gender_threshold)
					bGender =  1; //female
				else
					bGender = -1; //male

				//bGender = -1:male, 1:female, 0: null
				// smooth prediction result
                if( ! is_piclist )
				    bGender = genderDetector.voteLabel(face_trackid, bGender);
				
				ticks = cvGetTickCount() - ticks;
				gender_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				gender_total_ticks += ticks;

				print_score = probGender; 
			}
			else gender_fps = 0;

			//detect age
			nAgeID  = -1;
			if(age && bLandmark && rect.width*rect.height > 40*40)
			{
				ticks = cvGetTickCount();	

				//nAgeID = 0:"Baby", 1:"Kid", 2:"Adult", 3:"Senior"
				nAgeID = ageDetector.predict(&cutFace, probAge);

				// smooth prediction result
                if( ! is_piclist )
				    nAgeID = ageDetector.voteLabel(face_trackid, nAgeID); 

				ticks = cvGetTickCount() - ticks;
				age_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				age_total_ticks += ticks;

				print_score = probAge[nAgeID]; 
				//if( ! quiet )	cxDrawAignFace2Image(image, pCutFace2);
			}
			else 
			{
				age_fps = 0;
			}

			// recognize the face id
			// we only do recognition every 5 frames,interval
			char  *sFaceCaption = NULL;
			char  sFaceCaptionBuff[256];
            int face_id = 0;
			probFaceID = 0;
			if ( recog && bLandmark )
			{
				ticks = cvGetTickCount();
				float face_threshold = faceRecognizer.getDefThreshold(); 
				/////////////////////////////////////////////////////////////////////////////////////////
				int face_id  = -1;
				if(bEnableAutoCluster & !is_piclist)
				{
					bool bAutocluster = true;
					if(mouse_faceparam.ret_online_collecting) bAutocluster = false;
					//face clustering
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID, bAutocluster, face_trackid, frames);
				}
				else//face recognition
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID);
				/////////////////////////////////////////////////////////////////////////////////////////

				ticks    = cvGetTickCount() - ticks;
				recg_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );
				recg_total_ticks += ticks;
				
				// smooth prediction result
                if( ! is_piclist && !bEnableAutoCluster)
                {
				    if(probFaceID > face_threshold*1.0)
					    face_id = faceRecognizer.voteLabel(face_trackid, face_id); 
				    else
					    face_id = faceRecognizer.voteLabel(face_trackid, -1);
                }
				else if(probFaceID <= face_threshold)
				{
					face_id =-1;
				}

				//set face name caption
				if(face_id >= 0)
				{
					// recognized face name
					const char* sFaceName = faceRecognizer.getFaceName(face_id);
					sprintf(sFaceCaptionBuff, "%s %.2f", sFaceName, probFaceID);
					//sprintf(sFaceCaptionBuff, "%s", sFaceName); //dispaly score
					sFaceCaption = sFaceCaptionBuff;
					
					print_score  = probFaceID;
					print_faceid = face_id;
				}
				else
				{   // failed to recognize 
					//sprintf(sFaceCaptionBuff, "N\A %.2f", probFaceID);
					//sFaceCaption = sFaceCaptionBuff;
				}

				// collect and save unknown face exemplars
				if(probFaceID < face_threshold*0.9 || face_id != mouse_faceparam.ret_faceset_id )
				{
					if(mouse_faceparam.ret_online_collecting && (face_num ==1 || face_trackid == mouse_faceparam.ret_facetrack_id))
					{
						if( rect.x > gray_image.width()/4 && rect.x+rect.width < gray_image.width()*3/4 ) 
						{
							mouse_faceparam.updated = true;
							int nFaceSetIdx = faceRecognizer.getFaceSetIdx(mouse_faceparam.ret_faceset_id);
							bool bflag = faceRecognizer.tryInsertFace(cutFace.getBigCutFace(), nFaceSetIdx);
							//printf("insert flag %d\n", bflag);
						}
					}
				}
			}
			else recg_fps = 0;

			if( ! quiet )
			{
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

				//sprintf(sFaceCaptionBuff, "%.2f", print_score);
				//sFaceCaption = sFaceCaptionBuff;

				int trackid = -1; //face_trackid. don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            // log file
            if( fp_faceinfo != NULL )
            {
                // index,  rect,  landmark6,  bBlink, probBlink, bSmile, probSmile, bGender, probGender, nAgeID, probAge[nAgeID], face_id, probFaceID
				//fprintf( fp_faceinfo, "#%s# @%s@ ",    vidcap->filename(), sImgPath);
				fprintf( fp_faceinfo, "#%s# ",    vidcap->filename());
                fprintf( fp_faceinfo, "faceidx=( %06d %02d )", vidcap->index(), i+1 );
				fprintf( fp_faceinfo, "   rect=( %3d %3d %3d %3d )", rect.x, rect.y, rect.width, rect.height );
                fprintf( fp_faceinfo, "   landmark6=(" );
                int l;
                for( l = 0; l < 6; l++ )
                    fprintf( fp_faceinfo, " %3.0f %3.0f", landmark6[l].x, landmark6[l].y );
                fprintf( fp_faceinfo, " )");
                fprintf( fp_faceinfo, "   blink=( %+d %f )", bBlink, probBlink );
                fprintf( fp_faceinfo, "   smile=( %+d %f )", bSmile, probSmile );
                fprintf( fp_faceinfo, "   gender=( %+d %f )", bGender, probGender );
                fprintf( fp_faceinfo, "   agegroup=( %+d %f )", nAgeID, (nAgeID >= 0 && nAgeID < 4) ? probAge[nAgeID] : 1.0f );
                fprintf( fp_faceinfo, "   identity=( %+d %f )", face_id, probFaceID );
                fprintf( fp_faceinfo, "\n" );
            }
        }
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		ticks    = cvGetTickCount() - start_ticks;
		total_ticks += (ticks);
		frame_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );

		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
	
		//show Image
		if (image.width() <= 800)
		{
			//show image
			cvShowImage( str_title, image );
		}
		else
		{   // show scaled smaller image
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey( 30 );
		//int key = cvWaitKey( );
		if( key == ' ' ) // press the spacebar to pause the video play 
			cvWaitKey( 0 );                           
		else if( key == 27 )
			break;	    // press 'esc' to exit
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = rects[0].rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   // collect face exemplars for current selected facename
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			// turn on/off the autofocus flag
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting faces
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{   // save face models
				faceRecognizer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print speed info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );

	if (landmark_total_ticks != 0.0)
		landmark_fps = 1.0f  / ( landmark_total_ticks * temp / frames );

	if (align_total_ticks != 0.0)
		align_fps    = 1.0f  / ( align_total_ticks * temp / frames );

	if (blink_total_ticks != 0.0)
		blink_fps  = 1.0f  / (blink_total_ticks * temp / frames);

	if (smile_total_ticks != 0.0)
		smile_fps  = 1.0f  / (smile_total_ticks * temp / frames);

	if (gender_total_ticks != 0.0)
		gender_fps = 1.0f  / (gender_total_ticks * temp / frames);

	if (age_total_ticks != 0.0)
		age_fps = 1.0f  / (age_total_ticks * temp / frames);

	if (recg_total_ticks != 0.0)
		recg_fps   = 1.0f  / (recg_total_ticks  * temp / frames);

	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: Fd:%.1f Ld:%.1f Fa:%.1f Bl:%.1f Sm:%.1f Ge:%.1f Ag:%.1f Rc:%.1f",
		tracker_fps, landmark_fps, align_fps, 
		blink_fps,   smile_fps,    gender_fps, age_fps, recg_fps);

	//save updated face model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceRecognizer.saveFaceModelXML("faceset_model.xml");
	}

	
	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight = 10;
	faceRecognizer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceRecognizer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);
	//faceRecognizer.saveFaceModelXML("faceset_modelMerged#.xml");

	//release buff 
	
	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&thumbnailImg);
	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );
	
    if( fp_faceinfo != NULL )
        fclose( fp_faceinfo );

    return 0;
}
コード例 #20
0
ファイル: window.cpp プロジェクト: cnr-isti-vclab/piccante
void Window::zoom_out()
{
    scale_image(0.8);
}
コード例 #21
0
ファイル: gfx.c プロジェクト: mjmeehan/engine9
void
gfx_loaddata ()
{
    int i, j;
    char filename[255];
    SDL_Surface *tmpimage,
                *tmpimage1;

    /* load the logo */
    sprintf (filename, "%s/gfx/logo.png", bman.datapath);
    tmpimage = IMG_Load (filename);
    if (tmpimage == NULL) {
        printf ("Can't load image: %s\n", SDL_GetError ());
        exit (1);
    }
    tmpimage1 = scale_image (tmpimage, gfx.res.x, gfx.res.y);
    SDL_FreeSurface (tmpimage);
    SDL_SetColorKey (tmpimage1, SDL_SRCCOLORKEY, SDL_MapRGB (tmpimage1->format, 255, 255, 0));
    gfx.logo = SDL_DisplayFormat (tmpimage1);
    SDL_FreeSurface (tmpimage1);

    font_load ();

    /* load the menugraphics */
    for (i = 0; i < 9; i++) {
        sprintf (filename, "%s/gfx/menu%d.png", bman.datapath, i);
        menuimages[i] = IMG_Load (filename);
        if (menuimages[i] == NULL) {
            printf ("Can't load image: %s\n", SDL_GetError ());
            exit (1);
        }
    }

    /* load menu buttongraphic */
    for (j = 0; j < 3; j++)
        for (i = 0; i < 3; i++) {
            sprintf (filename, "%s/gfx/menubutton%d_%d.png", bman.datapath, j, i);
            tmpimage = IMG_Load (filename);
            if (tmpimage == NULL) {
                printf ("Can't load image: %s\n", SDL_GetError ());
                exit (1);
            }
            SDL_SetColorKey (tmpimage, SDL_SRCCOLORKEY, SDL_MapRGB (tmpimage->format, 255, 255, 255));
            menubuttonimages[j][i] = SDL_DisplayFormat (tmpimage);
            SDL_FreeSurface (tmpimage);
        }

    /* load menu buttongraphic */
    for (j = 0; j < 2; j++)
        for (i = 0; i < 3; i++) {
            sprintf (filename, "%s/gfx/menuentry%d_%d.png", bman.datapath, j, i);
            tmpimage = IMG_Load (filename);
            if (tmpimage == NULL) {
                printf ("Can't load image: %s\n", SDL_GetError ());
                exit (1);
            }
            SDL_SetColorKey (tmpimage, SDL_SRCCOLORKEY, SDL_MapRGB (tmpimage->format, 255, 255, 255));
            menuentryimages[j][i] = SDL_DisplayFormat (tmpimage);
            SDL_FreeSurface (tmpimage);
        }

    /* load menu listgraphic */
    for (j = 0; j < 2; j++)
        for (i = 0; i < 9; i++) {
            sprintf (filename, "%s/gfx/menulist%d_%d.png", bman.datapath, j, i);
            tmpimage = IMG_Load (filename);
            if (tmpimage == NULL) {
                printf ("Can't load image: %s\n", SDL_GetError ());
                exit (1);
            }
            SDL_SetColorKey (tmpimage, SDL_SRCCOLORKEY, SDL_MapRGB (tmpimage->format, 255, 255, 255));
            menulistimages[j][i] = SDL_DisplayFormat (tmpimage);
            SDL_FreeSurface (tmpimage);
        }

    /* load menuselect animation */
    sprintf (filename, "%s/gfx/menuselect.png", bman.datapath);
    gfx.menuselect.image = IMG_Load (filename);
    if (gfx.menuselect.image == NULL) {
        printf ("Can't load image: %s\n", SDL_GetError ());
        exit (1);
    }
    gfx.menuselect.frames = tmpimage->h / (2 * GFX_IMGSIZE);

    gfx_load_fieldtype_menu ();
    gfx_load_menusmall_players ();
};