int CAMERA_MISC::close() { if(running) stop(); if(running) return -1; if(!ready) return -1; raspiCamCvReleaseCapture(&capture); ready = false; //log->writeLogLine("Connection to Camera closed"); return 0; }
void acq_image_thread_func(void * lpParam){ int i = 0 ; unsigned long t_start, t_stop ; unsigned long t_diff ; RASPIVID_CONFIG * config = (RASPIVID_CONFIG*)malloc(sizeof(RASPIVID_CONFIG)); RASPIVID_PROPERTIES * properties = (RASPIVID_PROPERTIES*)malloc(sizeof(RASPIVID_PROPERTIES)); config->width=640; config->height=480; config->bitrate=0; // zero: leave as default config->framerate=30; config->monochrome=1; properties->hflip = 1 ; properties->vflip = 1 ; properties -> sharpness = 0 ; properties -> contrast = 0 ; properties -> brightness = 45 ; properties -> saturation = 0 ; properties -> exposure = SPORTS; properties -> shutter_speed = 0 ; // 0 is autoo printf("Init sensor \n"); capture = (RaspiCamCvCapture *) raspiCamCvCreateCameraCapture3(0, config, properties, 1); free(config); printf("Wait stable sensor \n"); for(i = 0 ; (i < 30) ; ){ int success = 0 ; success = raspiCamCvGrab(capture); if(success){ IplImage* image = raspiCamCvRetrieve(capture); i ++ ; } } i = 0 ; printf("Start Capture !\n"); t_start = get_long_time(); while(i < nb_frames && thread_alive){ int success = 0 ; success = raspiCamCvGrab(capture); if(success){ IplImage* image = raspiCamCvRetrieve(capture); t_diff = get_long_time(); if(push_frame(image,t_diff, &my_frame_buffer) < 0) printf("lost frame %d ! \n", i);; i ++ ; usleep(1000); } } t_stop = get_long_time(); printf("Capture done \n"); t_diff = t_stop - t_start ;; printf("Capture took %lu ms\n", t_diff/1000L); //printf("Actual frame-rate was %f \n", nb_frames/t_diff); raspiCamCvReleaseCapture(&capture); }
RaspiCamCvCapture * raspiCamCvCreateCameraCapture(int index) { RaspiCamCvCapture * capture = (RaspiCamCvCapture*)malloc(sizeof(RaspiCamCvCapture)); // Our main data storage vessel.. RASPIVID_STATE * state = (RASPIVID_STATE*)malloc(sizeof(RASPIVID_STATE)); capture->pState = state; MMAL_STATUS_T status = -1; MMAL_PORT_T *camera_video_port = NULL; MMAL_PORT_T *camera_still_port = NULL; bcm_host_init(); // read default status default_status(state); int w = state->width; int h = state->height; state->py = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1); // Y component of YUV I420 frame if (state->graymode==0) { state->pu = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1); // U component of YUV I420 frame state->pv = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1); // V component of YUV I420 frame } vcos_semaphore_create(&state->capture_sem, "Capture-Sem", 0); vcos_semaphore_create(&state->capture_done_sem, "Capture-Done-Sem", 0); if (state->graymode==0) { state->pu_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1); state->pv_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1); state->yuvImage = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3); state->dstImage = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3); // final picture to display } // create camera if (!create_camera_component(state)) { vcos_log_error("%s: Failed to create camera component", __func__); raspiCamCvReleaseCapture(&capture); return NULL; } camera_video_port = state->camera_component->output[MMAL_CAMERA_VIDEO_PORT]; camera_still_port = state->camera_component->output[MMAL_CAMERA_CAPTURE_PORT]; // assign data to use for callback camera_video_port->userdata = (struct MMAL_PORT_USERDATA_T *)state; // start capture if (mmal_port_parameter_set_boolean(camera_video_port, MMAL_PARAMETER_CAPTURE, 1) != MMAL_SUCCESS) { vcos_log_error("%s: Failed to start capture", __func__); raspiCamCvReleaseCapture(&capture); return NULL; } // Send all the buffers to the video port int num = mmal_queue_length(state->video_pool->queue); int q; for (q = 0; q < num; q++) { MMAL_BUFFER_HEADER_T *buffer = mmal_queue_get(state->video_pool->queue); if (!buffer) vcos_log_error("Unable to get a required buffer %d from pool queue", q); if (mmal_port_send_buffer(camera_video_port, buffer)!= MMAL_SUCCESS) vcos_log_error("Unable to send a buffer to encoder output port (%d)", q); } //mmal_status_to_int(status); // Disable all our ports that are not handled by connections //check_disable_port(camera_still_port); //if (status != 0) // raspicamcontrol_check_configuration(128); vcos_semaphore_wait(&state->capture_done_sem); return capture; }
int main( int argc, char** argv ) { // Medida de tiempo inicial clock_t t0, t_load, t_loop, t_loop0; double dtime; t0 = clock(); // Declaration of variables CascadeClassifier face_cascade; // Cascade Classifier Mat captureFrame, grayscaleFrame; // Captured and converted to gray Frames double x_face_pos, y_face_pos, area_face; // Coordinates of the detected face vector< Rect_<int> > faces; // Vector of faces #ifdef RASPBERRY RaspiCamCvCapture * captureDevice; // Video input #else CvCapture * captureDevice; // Video input #endif char sTmp[255]; #ifdef TRACE sprintf(sTmp,"\n Directorio de ejecucion del programa: "); trace (sTmp); cout << get_ProgramDirectory(); #endif #ifdef RECOGNITION // Declaration of variables Face recognition string people[MAX_PEOPLE]; // Each person of the model of face recognition int im_width, im_height; // heigh, witdh of 1st images of the model of face recognition int prediction_seuil; // Prediction limit Ptr<FaceRecognizer> model; // Model of face recognitio // Prediction limit depending on the device #ifdef EIGENFACES prediction_seuil = 10; #else prediction_seuil = 1000; #endif // Model of face recognition #ifdef EIGENFACES model = createEigenFaceRecognizer(); #else model = createFisherFaceRecognizer(); #endif // Read measures file read_measures_file(im_width, im_height, '='); // Read people file read_people_file(people, '(', ')', '='); // Load model load_model_recognition(model); t_load = clock(); #ifdef DEBUG dtime = difftime(t_load,t0); sprintf(sTmp,"\n (Face Tracking) tiempo de carga del modelo = "); debug(sTmp); cout << print_time(dtime); #endif #endif // Video input depending on the device #ifdef RASPBERRY captureDevice = raspiCamCvCreateCameraCapture(0); // Index doesn't really matter #else captureDevice = cvCreateCameraCapture(0); #endif // Load of Haar Cascade if (!load_haar_cascade(face_cascade)) { return -1;} // Create new window SHOW cvNamedWindow("Face tracking", 1); do { t_loop0 = clock(); #ifdef RASPBERRY IplImage* image = raspiCamCvQueryFrame(captureDevice); // Get images from the video input #else IplImage* image = cvQueryFrame(captureDevice); // Get images from the video input #endif captureFrame = cvarrToMat(image); // Convert images to Mat cvtColor(captureFrame, grayscaleFrame, CV_RGB2GRAY); // Convert the image to gray scale // Detection and Face Recognition face_detection(face_cascade, grayscaleFrame, captureFrame, &faces, x_face_pos, y_face_pos, area_face); #ifdef RECOGNITION // Detection and Face Recognition face_recognition(people, grayscaleFrame, captureFrame, &faces, im_width, im_height, model, prediction_seuil, x_face_pos, y_face_pos, area_face); #endif // Display results #ifdef SHOW imshow("Face tracking", captureFrame); #endif t_loop = clock(); #ifdef DEBUG dtime = difftime(t_loop,t_loop0); sprintf(sTmp,"\n (Face Tracking) tiempo del bucle del reconocimiento de cara = "); debug(sTmp); cout << print_time(dtime); #endif } while(cvWaitKey(10) < 0); // Destroy window #ifdef SHOW cvDestroyWindow("Face tracking"); #endif #ifdef TRACE trace ("\n"); #endif #ifdef RASPBERRY raspiCamCvReleaseCapture(&captureDevice); #else cvReleaseCapture(&captureDevice); #endif return 0; }
RaspiCamCvCapture * raspiCamCvCreateCameraCapture(int index) { RaspiCamCvCapture * capture = (RaspiCamCvCapture*)malloc(sizeof(RaspiCamCvCapture)); // Our main data storage vessel.. RASPIVID_STATE * state = (RASPIVID_STATE*)malloc(sizeof(RASPIVID_STATE)); capture->pState = state; MMAL_STATUS_T status = -1; MMAL_PORT_T *camera_video_port = NULL; MMAL_PORT_T *camera_video_port_2 = NULL; //MMAL_PORT_T *camera_still_port = NULL; MMAL_PORT_T *encoder_input_port = NULL; MMAL_PORT_T *encoder_output_port = NULL; bcm_host_init(); // read default status default_status(state); int w = state->width; int h = state->height; state->py = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3); // Y component of YUV I420 frame if (state->graymode==0) { state->pu = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1); // U component of YUV I420 frame state->pv = cvCreateImage(cvSize(w/2,h/2), IPL_DEPTH_8U, 1); // V component of YUV I420 frame } vcos_semaphore_create(&state->capture_sem, "Capture-Sem", 0); vcos_semaphore_create(&state->capture_done_sem, "Capture-Done-Sem", 0); if (state->graymode==0) { state->pu_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1); state->pv_big = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1); state->yuvImage = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3); state->dstImage = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 3); // final picture to display } //printf("point2.0\n"); // create camera if (!create_camera_component(state)) { vcos_log_error("%s: Failed to create camera component", __func__); raspiCamCvReleaseCapture(&capture); return NULL; } else if ((status = create_encoder_component(state)) != MMAL_SUCCESS) { vcos_log_error("%s: Failed to create encode component", __func__); destroy_camera_component(state); return NULL; } //printf("point2.1\n"); //create_encoder_component(state); camera_video_port = state->camera_component->output[MMAL_CAMERA_VIDEO_PORT]; camera_video_port_2 = state->camera_component->output[MMAL_CAMERA_VIDEO_PORT_2]; //camera_still_port = state->camera_component->output[MMAL_CAMERA_CAPTURE_PORT]; encoder_input_port = state->encoder_component->input[0]; encoder_output_port = state->encoder_component->output[0]; // assign data to use for callback camera_video_port->userdata = (struct MMAL_PORT_USERDATA_T *)state; // assign data to use for callback camera_video_port_2->userdata = (struct MMAL_PORT_USERDATA_T *)state; // Now connect the camera to the encoder status = connect_ports(camera_video_port_2, encoder_input_port, &state->encoder_connection); if (state->filename) { if (state->filename[0] == '-') { state->callback_data.file_handle = stdout; } else { state->callback_data.file_handle = open_filename(state); } if (!state->callback_data.file_handle) { // Notify user, carry on but discarding encoded output buffers vcos_log_error("%s: Error opening output file: %s\nNo output file will be generated\n", __func__, state->filename); } } // Set up our userdata - this is passed though to the callback where we need the information. state->callback_data.pstate = state; state->callback_data.abort = 0; encoder_output_port->userdata = (struct MMAL_PORT_USERDATA_T *)&state->callback_data; // Enable the encoder output port and tell it its callback function status = mmal_port_enable(encoder_output_port, encoder_buffer_callback); if (status != MMAL_SUCCESS) { state->encoder_connection = NULL; vcos_log_error("%s: Failed to connect camera video port to encoder input", __func__); return NULL; } if (status != MMAL_SUCCESS) { vcos_log_error("Failed to setup encoder output"); return NULL; } //mmal_port_enable(encoder_output_port, encoder_buffer_callback); // Only encode stuff if we have a filename and it opened // Note we use the copy in the callback, as the call back MIGHT change the file handle if (state->callback_data.file_handle) { int running = 1; // Send all the buffers to the encoder output port { int num = mmal_queue_length(state->encoder_pool->queue); int q; for (q=0;q<num;q++) { MMAL_BUFFER_HEADER_T *buffer2 = mmal_queue_get(state->encoder_pool->queue); if (!buffer2) vcos_log_error("Unable to get a required buffer %d from pool queue", q); if (mmal_port_send_buffer(encoder_output_port, buffer2)!= MMAL_SUCCESS) vcos_log_error("Unable to send a buffer to encoder output port (%d)", q); } } } // start capture if (mmal_port_parameter_set_boolean(camera_video_port_2, MMAL_PARAMETER_CAPTURE, 1) != MMAL_SUCCESS) { vcos_log_error("%s: Failed to start capture", __func__); raspiCamCvReleaseCapture(&capture); return NULL; } // Send all the buffers to the video port int num = mmal_queue_length(state->video_pool->queue); int q; for (q = 0; q < num; q++) { MMAL_BUFFER_HEADER_T *buffer = mmal_queue_get(state->video_pool->queue); if (!buffer) vcos_log_error("Unable to get a required buffer %d from pool queue", q); if (mmal_port_send_buffer(camera_video_port, buffer)!= MMAL_SUCCESS) vcos_log_error("Unable to send a buffer to encoder output port (%d)", q); } //mmal_status_to_int(status); // Disable all our ports that are not handled by connections //check_disable_port(camera_still_port); //if (status != 0) // raspicamcontrol_check_configuration(128); vcos_semaphore_wait(&state->capture_done_sem); return capture; }
int main(int argc, char* argv[]) { printf("Press Esc-Key to Exit Process.\n"); RASPIVID_CONFIG * config = new RASPIVID_CONFIG(); if(!config){ printf("failed to create RASPIDVID_CONFIG.\n"); return -1; } config->width=static_cast<int>(WIN_WIDTH); config->height=static_cast<int>(WIN_HEIGHT); config->bitrate=0; // zero: leave as default config->framerate=0; config->monochrome=0; cvNamedWindow( DISP_WIN , CV_WINDOW_AUTOSIZE ); RaspiCamCvCapture* capture = NULL; capture = raspiCamCvCreateCameraCapture2( 0, config ); if(config){ delete config; config = NULL; } if(!capture){ printf("failed to create capture\n"); return -1; } // キャプチャサイズを設定する. double w = WIN_WIDTH; double h = WIN_HEIGHT; raspiCamCvSetCaptureProperty (capture, RPI_CAP_PROP_FRAME_WIDTH, w); raspiCamCvSetCaptureProperty (capture, RPI_CAP_PROP_FRAME_HEIGHT, h); // 正面顔検出器の読み込み CvHaarClassifierCascade* cvHCC = (CvHaarClassifierCascade*)cvLoad(CASCADE, NULL,NULL,NULL); // 検出に必要なメモリストレージを用意する CvMemStorage* cvMStr = cvCreateMemStorage(0); while(1){ IplImage* frame = raspiCamCvQueryFrame(capture); if(!frame){ printf("failed to query frame.\n"); break; } // 画像中から検出対象の情報を取得する CvSeq* face = cvHaarDetectObjects( frame , cvHCC , cvMStr , 1.2 , 2 , CV_HAAR_DO_CANNY_PRUNING , minsiz , minsiz ); if(!face){ printf("failed to detect objects.\n"); break; } int i=0; for(i = 0; i < face->total; i++) { // 検出情報から顔の位置情報を取得 CvRect* faceRect = (CvRect*)cvGetSeqElem(face, i); if(!faceRect){ printf("failed to get Face-Rect.\n"); break; } // 取得した顔の位置情報に基づき、矩形描画を行う cvRectangle( frame , cvPoint(faceRect->x, faceRect->y) , cvPoint(faceRect->x + faceRect->width, faceRect->y + faceRect->height) , CV_RGB(255, 0 ,0) , 2 , CV_AA , 0 ); } cvShowImage( DISP_WIN, frame); char c = cvWaitKey(DELAY_MSEC); if( c==27 ){ // ESC-Key break; } sleep(0); } // 用意したメモリストレージを解放 cvReleaseMemStorage(&cvMStr); // カスケード識別器の解放 cvReleaseHaarClassifierCascade(&cvHCC); raspiCamCvReleaseCapture(&capture); cvDestroyWindow(DISP_WIN); return 0; }
int main(int argc, char *argv[ ]) { char SETTINGS_FILE[]="settings.txt"; RASPIVID_CONFIG * config = (RASPIVID_CONFIG*)malloc(sizeof(RASPIVID_CONFIG)); int full_width=2592; int full_height=1944; config->width=full_width; config->height=full_height; config->bitrate=0; // zero: leave as default config->framerate=15; config->monochrome=1; RaspiCamCvCapture * capture = (RaspiCamCvCapture *) raspiCamCvCreateCameraCapture2(0, config); free(config); CvFont font; double hScale=0.8; double vScale=0.8; int lineWidth=2; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale, vScale, 0, lineWidth, 8); IplImage* tmp = 0; IplImage* image = 0; cvNamedWindow("RaspiCamTest", 1); cvMoveWindow("RaspiCamTest", 100,100); int viewport_x = 0; int viewport_y = -400; int viewport_width = 640; int viewport_height = 480; CvScalar sharpness= {100, 5.0, 1.0, .10}; int flip=0; int full_view=0; FILE * f = fopen(SETTINGS_FILE,"r"); if(f) { fscanf(f,"%d %d %d\n",&viewport_x,&viewport_y,&flip); fclose(f); } int exit =0; do { IplImage* big_img = raspiCamCvQueryFrame(capture); if(!image) { image = cvCreateImage( cvSize(viewport_width, viewport_height), big_img->depth,3 ); } if(!tmp) { tmp = cvCreateImage( cvSize(viewport_width, viewport_height), big_img->depth,1 ); } if(!full_view) { CvRect cropRect = cvRect( (big_img->width-viewport_width)/2+viewport_x ,(big_img->height-viewport_height)/2+viewport_y ,viewport_width ,viewport_height ); cvSetImageROI(big_img,cropRect); CvRect destRect=cvRect(0,0,viewport_width,viewport_height); cvSetImageROI(image,destRect); cvCvtColor(big_img,image,CV_GRAY2BGR); destRect=cvRect(0,0,viewport_width,viewport_height); cvSetImageROI(image,destRect); } else { CvRect cropRect = cvRect( 0,0, full_width, full_height ); cvSetImageROI(big_img,cropRect); cvRectangle(big_img ,cvPoint((big_img->width-viewport_width)/2+viewport_x, (big_img->height-viewport_height)/2+viewport_y) ,cvPoint((big_img->width+viewport_width)/2+viewport_x, (big_img->height+viewport_height)/2+viewport_y) ,CV_RGB( 0, 255, 0 )); cvResize(big_img,tmp); cvCvtColor(tmp,image,CV_GRAY2BGR); } if(flip) { cvFlip(image,0,-1); } sharpness = GetSharpness(big_img,image); double threshold=2.8; char text[200]; if(full_view) { sprintf(text , "full view" ); cvPutText (image, text, cvPoint(05, 360), &font, cvScalar(255, 0, 0, 0)); } sprintf(text , (sharpness.val[1]>threshold ? "** OK **" : "!! keep going !!" ) ); cvPutText (image, text, cvPoint(05, 400), &font, cvScalar(255, 0, 0, 0)); sprintf(text, "Sharpness: %.0f (%.2f) x=%d y=%d", sharpness.val[0], sharpness.val[1], viewport_x, viewport_y); cvPutText (image, text, cvPoint(05, 440), &font, cvScalar(255, 0, 0, 0)); cvLine( image, cvPoint(0,240), cvPoint(639,240), CV_RGB( 255, 0, 0 )); cvLine( image, cvPoint(320,0), cvPoint(320,479), CV_RGB( 255, 0, 0 )); cvCircle( image, cvPoint(320,240), 100, CV_RGB( 255, 0, 0 )); cvShowImage("RaspiCamTest", image); char key = cvWaitKey(10); if(flip) { switch(key) { case 83: //left viewport_x -= 10; if(viewport_x<-(full_width-viewport_width)/2) { viewport_x=-(full_width-viewport_width)/2; } break; case 84: //up viewport_y -= 10; if(viewport_y< -(full_height-viewport_height)/2) { viewport_y=-(full_height-viewport_height)/2; } break; case 81: //right viewport_x += 10; if(viewport_x> (full_width-viewport_width)/2) { viewport_x= (full_width-viewport_width)/2; } break; case 82: //down viewport_y += 10; if(viewport_x> (full_height-viewport_height)/2) { viewport_x= (full_height-viewport_height)/2; } break; } } else { switch(key) { case 81: //left viewport_x -= 10; if(viewport_x<-(full_width-viewport_width)/2) { viewport_x=-(full_width-viewport_width)/2; } break; case 82: //up viewport_y -= 10; if(viewport_y< -(full_height-viewport_height)/2) { viewport_y=-(full_height-viewport_height)/2; } break; case 83: //right viewport_x += 10; if(viewport_x> (full_width-viewport_width)/2) { viewport_x= (full_width-viewport_width)/2; } break; case 84: //down viewport_y += 10; if(viewport_x> (full_height-viewport_height)/2) { viewport_x= (full_height-viewport_height)/2; } break; } } switch(key) { case 's': //save current position { FILE *f = fopen(SETTINGS_FILE,"w"); if(f) { fprintf(f,"%d %d %d\n",viewport_x,viewport_y,flip); fclose(f); } break; } case 'r': //read position { FILE *f = fopen(SETTINGS_FILE,"r"); if(f) { fscanf(f,"%d %d %d\n",&viewport_x,&viewport_y,&flip); fclose(f); } break; } case 'c': viewport_x=viewport_y=0; break; case 'f': flip = !flip; break; case 'x': system("sudo halt"); break; case 27: // Esc to exit exit = 1; break; } } while (!exit); cvDestroyWindow("RaspiCamTest"); raspiCamCvReleaseCapture(&capture); return 0; }
int main(int argc, char *argv[ ]){ RASPIVID_CONFIG * config = (RASPIVID_CONFIG*)malloc(sizeof(RASPIVID_CONFIG)); config->width=320; config->height=240; config->bitrate=0; // zero: leave as default config->framerate=0; config->monochrome=0; int opt; while ((opt = getopt(argc, argv, "lxm")) != -1) { switch (opt) { case 'l': // large config->width = 640; config->height = 480; break; case 'x': // extra large config->width = 960; config->height = 720; break; case 'm': // monochrome config->monochrome = 1; break; default: fprintf(stderr, "Usage: %s [-x] [-l] [-m] \n", argv[0], opt); fprintf(stderr, "-l: Large mode\n"); fprintf(stderr, "-x: Extra large mode\n"); fprintf(stderr, "-l: Monochrome mode\n"); exit(EXIT_FAILURE); } } /* Could also use hard coded defaults method: raspiCamCvCreateCameraCapture(0) */ RaspiCamCvCapture * capture = (RaspiCamCvCapture *) raspiCamCvCreateCameraCapture2(0, config); free(config); CvFont font; double hScale=0.4; double vScale=0.4; int lineWidth=1; cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale, vScale, 0, lineWidth, 8); cvNamedWindow("RaspiCamTest", 1); int exit =0; do { IplImage* image = raspiCamCvQueryFrame(capture); char text[200]; sprintf( text , "w=%.0f h=%.0f fps=%.0f bitrate=%.0f monochrome=%.0f" , raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_FRAME_WIDTH) , raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_FRAME_HEIGHT) , raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_FPS) , raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_BITRATE) , raspiCamCvGetCaptureProperty(capture, RPI_CAP_PROP_MONOCHROME) ); cvPutText (image, text, cvPoint(05, 40), &font, cvScalar(255, 255, 0, 0)); sprintf(text, "Press ESC to exit"); cvPutText (image, text, cvPoint(05, 80), &font, cvScalar(255, 255, 0, 0)); cvShowImage("RaspiCamTest", image); char key = cvWaitKey(10); switch(key) { case 27: // Esc to exit exit = 1; break; case 60: // < (less than) raspiCamCvSetCaptureProperty(capture, RPI_CAP_PROP_FPS, 25); // Currently NOOP break; case 62: // > (greater than) raspiCamCvSetCaptureProperty(capture, RPI_CAP_PROP_FPS, 30); // Currently NOOP break; } } while (!exit); cvDestroyWindow("RaspiCamTest"); raspiCamCvReleaseCapture(&capture); return 0; }