int main(int argc, char** argv) { struct stat sb; if (!(stat(OUTPUT_DIR, &sb) == 0 && S_ISDIR(sb.st_mode))) { if (mkdir(OUTPUT_DIR, S_IRWXU | S_IRGRP | S_IROTH | S_IXGRP | S_IXOTH) != 0) { printf("Error creating directory %s\n",OUTPUT_DIR); return -1; } } char tempFileName[512]; sprintf(tempFileName, "%stest.txt",OUTPUT_DIR); FILE* tempFile = fopen(tempFileName, "w+"); if (tempFile == NULL) { printf("Failed to create file in current folder. Please check permissions.\n"); return -1; } fclose(tempFile); remove(tempFileName); //if we get here, we know the directory exists and we can write to it //now set up, start, and grab image from camera fc2Context context; fc2PGRGuid guid; fc2Image raw_image, converted_image; Imlib_Image temp_image; check_point_grey(fc2CreateContext(&context)); check_point_grey(fc2GetCameraFromIndex(context,0,&guid)); check_point_grey(fc2Connect(context,&guid)); check_point_grey(fc2SetDefaultColorProcessing(FC2_IPP)); check_point_grey(fc2SetVideoModeAndFrameRate(context, FC2_VIDEOMODE_1280x960Y8, FC2_FRAMERATE_15)); PrintCameraInfo(context); check_point_grey(fc2CreateImage(&raw_image)); check_point_grey(fc2CreateImage(&converted_image)); check_point_grey(fc2StartCapture(context)); check_point_grey(fc2RetrieveBuffer(context,&raw_image)); check_point_grey(fc2ConvertImageTo(FC2_PIXEL_FORMAT_BGRU,&raw_image,&converted_image)); temp_image = imlib_create_image_using_copied_data(converted_image.cols, converted_image.rows, (unsigned int *) converted_image.pData); imlib_context_set_image(temp_image); char filename[512]; sprintf(filename,"%stest.ppm",OUTPUT_DIR); imlib_save_image(filename); printf("Saved %s\n",filename); //image saved, now clean up imlib_free_image_and_decache(); check_point_grey(fc2StopCapture(context)); check_point_grey(fc2DestroyContext(context)); check_point_grey(fc2DestroyImage(&raw_image)); check_point_grey(fc2DestroyImage(&converted_image)); printf("finished cleanup\n"); return 0; }
// // idlpgr_CreateImage // IDL_VPTR IDL_CDECL idlpgr_CreateImage(int argc, IDL_VPTR argv[]) { fc2Error error; fc2Image *image; IDL_MEMINT dim; dim = (IDL_MEMINT) sizeof(fc2Image); image = (fc2Image *) IDL_MemAlloc(dim, NULL, 0); error = fc2CreateImage(image); if (error) IDL_MessageFromBlock(msgs, M_IDLPGR_ERRORCODE, IDL_MSG_LONGJMP, "Could create image", error); return IDL_GettmpULong64((IDL_ULONG64) image); }
int capture(fleaCamera* camera, void* image_buf, float* frame_time) { fc2Error error; fc2Image rawImage; fc2TimeStamp ts; error = fc2CreateImage( &rawImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2CreateImage: %d\n", error ); } // Retrieve the image error = fc2RetrieveBuffer( camera->context, &rawImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in retrieveBuffer: %d\n", error); return -1; } // Get and print out the time stamp ts = fc2GetImageTimeStamp( &rawImage); (*frame_time) = (ts.cycleSeconds * 8000) + ts.cycleCount; /* diff = (ts.cycleSeconds - prevTimestamp.cycleSeconds) * 8000 + (ts.cycleCount - prevTimestamp.cycleCount); prevTimestamp = ts; printf( "timestamp [%d %d] - %d\n", ts.cycleSeconds, ts.cycleCount, diff ); */ //PrintImageInfo(&rawImage); memcpy(image_buf, rawImage.pData, rawImage.dataSize); error = fc2DestroyImage( &rawImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2DestroyImage: %d\n", error ); } return 0; }
FC2_Image FC2_Image__create(void) { FC2_Image image = Memory__new(FC2_Image); FC2_Error error = fc2CreateImage(image); assert (error == FC2_ERROR_OK); return image; }
void GrabImages( fc2Context context, int numImagesToGrab ) { fc2Error error; fc2Image rawImage; fc2Image convertedImage; fc2TimeStamp prevTimestamp = {0}; int i; error = fc2CreateImage( &rawImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2CreateImage: %d\n", error ); } error = fc2CreateImage( &convertedImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2CreateImage: %d\n", error ); } // If externally allocated memory is to be used for the converted image, // simply assigning the pData member of the fc2Image structure is // insufficient. fc2SetImageData() should be called in order to populate // the fc2Image structure correctly. This can be done at this point, // assuming that the memory has already been allocated. for ( i=0; i < numImagesToGrab; i++ ) { // Retrieve the image error = fc2RetrieveBuffer( context, &rawImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in retrieveBuffer: %d\n", error); } else { // Get and print out the time stamp fc2TimeStamp ts = fc2GetImageTimeStamp( &rawImage); int diff = (ts.cycleSeconds - prevTimestamp.cycleSeconds) * 8000 + (ts.cycleCount - prevTimestamp.cycleCount); prevTimestamp = ts; printf( "timestamp [%d %d] - %d\n", ts.cycleSeconds, ts.cycleCount, diff ); } } if ( error == FC2_ERROR_OK ) { // Convert the final image to RGB error = fc2ConvertImageTo(FC2_PIXEL_FORMAT_BGR, &rawImage, &convertedImage); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2ConvertImageTo: %d\n", error ); } // Save it to PNG printf("Saving the last image to fc2TestImage.png \n"); error = fc2SaveImage( &convertedImage, "fc2TestImage.png", FC2_PNG ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2SaveImage: %d\n", error ); printf( "Please check write permissions.\n"); } } error = fc2DestroyImage( &rawImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2DestroyImage: %d\n", error ); } error = fc2DestroyImage( &convertedImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2DestroyImage: %d\n", error ); } }
int main(int argc, char *argv[]) { const unsigned int cameraIndex = 0u; const unsigned int numImagesPerFPSMeasurement = 240u; const int windowWidth = 1440; const int windowHeight = 900; const char cascadeFilename[] = "haarcascade_frontalface_alt.xml"; const double detectionScaleFactor = 1.25; const int detectionMinNeighbours = 4; const int detectionFlags = CV_HAAR_SCALE_IMAGE; const cv::Size detectionMinSize(120, 120); const cv::Size detectionMaxSize; const cv::Scalar detectionDrawColor(255.0, 0.0, 255.0); char strBuffer[256u]; const size_t strBufferSize = 256u; int matType; cv::Mat equalizedGrayMat; #ifdef _WIN32 snprintf(strBuffer, strBufferSize, "%s/../%s", argv[0], cascadeFilename); cv::CascadeClassifier detector(strBuffer); #else cv::CascadeClassifier detector(cascadeFilename); #endif if (detector.empty()) { snprintf(strBuffer, strBufferSize, "%s could not be loaded.", cascadeFilename); SDL_ShowSimpleMessageBox( SDL_MESSAGEBOX_ERROR, "Failed to Load Cascade File", strBuffer, NULL); return EXIT_FAILURE; } std::vector<cv::Rect> detectionRects; fc2Error error; fc2Image image; error = fc2CreateImage(&image); if (error != FC2_ERROR_OK) { showFC2Error(error); return EXIT_FAILURE; } fc2Context context; error = fc2CreateContext(&context); if (error != FC2_ERROR_OK) { showFC2Error(error); return EXIT_FAILURE; } fc2PGRGuid cameraGUID; error = fc2GetCameraFromIndex(context, cameraIndex, &cameraGUID); if (error != FC2_ERROR_OK) { showFC2Error(error); return EXIT_FAILURE; } error = fc2Connect(context, &cameraGUID); if (error != FC2_ERROR_OK) { showFC2Error(error); return EXIT_FAILURE; } error = fc2StartCapture(context); if (error != FC2_ERROR_OK) { fc2Disconnect(context); showFC2Error(error); return EXIT_FAILURE; } if (SDL_Init(SDL_INIT_VIDEO) < 0) { fc2StopCapture(context); fc2Disconnect(context); showSDLError(); return EXIT_FAILURE; } SDL_Window *window = SDL_CreateWindow( "LookSpry", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, windowWidth, windowHeight, 0u); if (window == NULL) { fc2StopCapture(context); fc2Disconnect(context); showSDLError(); return EXIT_FAILURE; } SDL_Renderer *renderer = SDL_CreateRenderer(window, -1, 0u); if (renderer == NULL) { fc2StopCapture(context); fc2Disconnect(context); SDL_DestroyWindow(window); showSDLError(); return EXIT_FAILURE; } SDL_RendererInfo rendererInfo; SDL_GetRendererInfo(renderer, &rendererInfo); if (strcmp(rendererInfo.name, "direct3d") == 0) { SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "best"); } else if (strcmp(rendererInfo.name, "opengl") == 0) { SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear"); } snprintf(strBuffer, strBufferSize, "LookSpry | %s", rendererInfo.name); SDL_SetWindowTitle(window, strBuffer); SDL_Texture *videoTex = NULL; void *videoTexPixels; int pitch; clock_t startTicks = clock(); clock_t endTicks; unsigned int numImagesCaptured = 0u; bool running = true; bool detecting = true; bool mirroring = true; SDL_Event event; while (running) { while (SDL_PollEvent(&event)) { if (event.type == SDL_QUIT) { running = false; break; } else if (event.type == SDL_KEYUP) { switch(event.key.keysym.sym) { // When 'd' is pressed, start or stop [d]etection. case SDLK_d: detecting = !detecting; break; // When 'm' is pressed, [m]irror or un-mirror the video. case SDLK_m: mirroring = !mirroring; break; default: break; } } } error = fc2RetrieveBuffer(context, &image); if (error != FC2_ERROR_OK) { fc2Disconnect(context); SDL_DestroyTexture(videoTex); SDL_DestroyRenderer(renderer); SDL_DestroyWindow(window); showFC2Error(error); return EXIT_FAILURE; } if (videoTex == NULL) { equalizedGrayMat.create(image.rows, image.cols, CV_8UC1); SDL_RenderSetLogicalSize(renderer, image.cols, image.rows); Uint32 videoTexPixelFormat; switch (image.format) { // For monochrome capture modes, plan to render captured data to the Y // plane of a planar YUV texture. case FC2_PIXEL_FORMAT_RAW8: case FC2_PIXEL_FORMAT_MONO8: videoTexPixelFormat = SDL_PIXELFORMAT_YV12; matType = CV_8UC1; break; // For color capture modes, plan to render captured data to the entire // space of a texture in a matching color format. case FC2_PIXEL_FORMAT_422YUV8: videoTexPixelFormat = SDL_PIXELFORMAT_UYVY; matType = CV_8UC2; break; case FC2_PIXEL_FORMAT_RGB: videoTexPixelFormat = SDL_PIXELFORMAT_RGB24; matType = CV_8UC3; break; case FC2_PIXEL_FORMAT_BGR: videoTexPixelFormat = SDL_PIXELFORMAT_BGR24; matType = CV_8UC3; break; default: fc2StopCapture(context); fc2Disconnect(context); SDL_DestroyTexture(videoTex); SDL_DestroyRenderer(renderer); SDL_DestroyWindow(window); SDL_ShowSimpleMessageBox( SDL_MESSAGEBOX_ERROR, "Unsupported FlyCapture2 Pixel Format", "LookSpry supports RAW8, MONO8, 422YUV8, RGB, and BGR.", NULL); return EXIT_FAILURE; } videoTex = SDL_CreateTexture( renderer, videoTexPixelFormat, SDL_TEXTUREACCESS_STREAMING, image.cols, image.rows); if (videoTex == NULL) { fc2StopCapture(context); fc2Disconnect(context); SDL_DestroyRenderer(renderer); SDL_DestroyWindow(window); showSDLError(); return EXIT_FAILURE; } snprintf( strBuffer, strBufferSize, "LookSpry | %s | %dx%d --> %dx%d", rendererInfo.name, image.cols, image.rows, windowWidth, windowHeight); SDL_SetWindowTitle(window, strBuffer); } cv::Mat srcMat(image.rows, image.cols, matType, image.pData, image.stride); if (detecting) { switch (image.format) { // For monochrome capture modes, just equalize. case FC2_PIXEL_FORMAT_RAW8: case FC2_PIXEL_FORMAT_MONO8: cv::equalizeHist(srcMat, equalizedGrayMat); break; // For color capture modes, convert to gray and equalize. case FC2_PIXEL_FORMAT_422YUV8: cv::cvtColor(srcMat, equalizedGrayMat, cv::COLOR_YUV2GRAY_UYVY); cv::equalizeHist(equalizedGrayMat, equalizedGrayMat); break; case FC2_PIXEL_FORMAT_RGB: cv::cvtColor(srcMat, equalizedGrayMat, cv::COLOR_RGB2GRAY); cv::equalizeHist(equalizedGrayMat, equalizedGrayMat); break; case FC2_PIXEL_FORMAT_BGR: cv::cvtColor(srcMat, equalizedGrayMat, cv::COLOR_BGR2GRAY); cv::equalizeHist(equalizedGrayMat, equalizedGrayMat); break; default: break; } // Run the detector on the equalized image. detector.detectMultiScale( equalizedGrayMat, detectionRects, detectionScaleFactor, detectionMinNeighbours, detectionFlags, detectionMinSize, detectionMaxSize); // Draw the resulting detection rectangles on the original image. for (cv::Rect detectionRect : detectionRects) { cv::rectangle(srcMat, detectionRect, detectionDrawColor); } } SDL_LockTexture(videoTex, NULL, &videoTexPixels, &pitch); switch (image.format) { case FC2_PIXEL_FORMAT_RAW8: case FC2_PIXEL_FORMAT_MONO8: // Make the planar YUV video gray by setting all bytes in its U and V // planes to 128 (the middle of the range). memset(((unsigned char *)videoTexPixels + image.dataSize), 128, image.dataSize / 2u); break; default: break; } if (mirroring) { // Flip the image data while copying it to the texture. cv::Mat dstMat(image.rows, image.cols, matType, videoTexPixels, image.stride); cv::flip(srcMat, dstMat, 1); } else { // Copy the image data, as-is, to the texture. // Note that the PointGrey image and srcMat have pointers to the same // data, so the following code does reference the data that we modified // earlier via srcMat. memcpy(videoTexPixels, image.pData, image.dataSize); } SDL_UnlockTexture(videoTex); SDL_RenderCopy(renderer, videoTex, NULL, NULL); SDL_RenderPresent(renderer); numImagesCaptured++; if (numImagesCaptured >= numImagesPerFPSMeasurement) { endTicks = clock(); snprintf( strBuffer, strBufferSize, "LookSpry | %s | %dx%d --> %dx%d | %ld FPS", rendererInfo.name, image.cols, image.rows, windowWidth, windowHeight, numImagesCaptured * CLOCKS_PER_SEC / (endTicks - startTicks)); SDL_SetWindowTitle(window, strBuffer); startTicks = endTicks; numImagesCaptured = 0u; } } fc2StopCapture(context); fc2Disconnect(context); SDL_DestroyTexture(videoTex); SDL_DestroyRenderer(renderer); SDL_DestroyWindow(window); return EXIT_SUCCESS; }
int main(int argc, char** argv){ int numPics = 100; fc2VideoMode mode1 = FC2_VIDEOMODE_640x480Y8; char *mode1_str = "FC2_VIDEOMODE_640x480Y8"; //fc2FrameRate rate1 = FC2_FRAMERATE_30; //char *rate1_str = "FC2_FRAMERATE_30"; fc2VideoMode mode2 = FC2_VIDEOMODE_1280x960Y8; char *mode2_str = "FC2_VIDEOMODE_1280x960Y8"; fc2FrameRate rate = FC2_FRAMERATE_15; char *rate_str = "FC2_FRAMERATE_15"; fc2VideoMode mode; char *mode_str; //fc2FrameRate rate; //char *rate_str; //int prog_mode; int camOffset = 0; PrintBuildInfo(); /* if (argc != 2) */ /* { */ /* printf("Error: Must chose mode\n"); */ /* printf("Usage: %s {1, 2, 3, 4, 5, 6} \n",argv[0]); */ /* printf("Modes: 1 = first camera at 1280x960Y8\n" */ /* " 2 = second camera at 1280x960Y8\n" */ /* " 3 = both cameras at 1280x960Y8\n" */ /* " 4 = first camera at 640x480Y8\n" */ /* " 5 = second camera at 640x480Y8\n" */ /* " 6 = both cameras at 640x480Y8\n"); */ /* return -1; */ /* } */ /* prog_mode = atoi(argv[1]); */ /* if ((prog_mode > 6) || (prog_mode < 1)) */ /* { */ /* printf("Must chose valid mode, 1 through 6\n"); */ /* } */ if (CheckSaving(OUTPUT_DIR) != 0) { printf("Cannot save to %s, please check permissions\n",OUTPUT_DIR); return -1; } //have correct number of arguments and can save fc2Context tempContext; check_point_grey(fc2CreateContext(&tempContext)); unsigned int numCams; check_point_grey(fc2GetNumOfCameras(tempContext, &numCams)); check_point_grey(fc2DestroyContext(tempContext)); if (numCams == 0) { //no cameras printf("No cameras found, exiting.\n"); return -1; } //if we get here, we know we have at least 1 camera connected /* if (prog_mode < 4) */ /* { */ /* mode = FC2_VIDEOMODE_1280x960Y8; */ /* mode_str = "FC2_VIDEOMODE_1280x960Y8"; */ /* rate = FC2_FRAMERATE_15; */ /* rate_str = "FC2_FRAMERATE_15"; */ /* } */ // printf("Using resolution %s and frame rate %s\n",mode_str,rate_str); /* if ((prog_mode == 1) || (prog_mode == 4)) */ /* { // run only the first camera */ /* numCams = 1; */ /* } */ /* if ((prog_mode == 2) || (prog_mode == 5)) */ /* { // run only the second camera */ /* camOffset = 1; */ /* } */ printf("Taking %i pictures per camera with %i camera(s).\n",numPics, (numCams - camOffset)); struct point_grey *pg_ptr[numCams - camOffset]; for (int i = 0; i < (numCams - camOffset); i++) //initialization loop { if (i == 0) { mode = mode2; mode_str = mode2_str; //rate = rate2; //rate_str = rate2_str; } else { mode = mode1; mode_str = mode1_str; //rate = rate1; //rate_str = rate1_str; } pg_ptr[i] = (struct point_grey *)point_grey_malloc(sizeof(struct point_grey)); check_point_grey(fc2CreateContext(&pg_ptr[i]->context)); check_point_grey(fc2GetCameraFromIndex(pg_ptr[i]->context, (i + camOffset), &pg_ptr[i]->guid)); check_point_grey(fc2Connect(pg_ptr[i]->context, &pg_ptr[i]->guid)); check_point_grey(fc2SetDefaultColorProcessing(FC2_NEAREST_NEIGHBOR_FAST)); check_point_grey(fc2SetVideoModeAndFrameRate(pg_ptr[i]->context, mode, rate)); AssignName(pg_ptr[i]->context, pg_ptr[i]->name); check_point_grey(fc2CreateImage(&pg_ptr[i]->raw_image)); check_point_grey(fc2CreateImage(&pg_ptr[i]->converted_image)); PrintCameraInfo(pg_ptr[i]->context); printf("Using resolution %s and frame rate %s\n",mode_str,rate_str); //**CALLING THIS HERE WILL WORK WITH 2 CAMERAS AT 640X480 BUT NOT AT //1280X960** check_point_grey(fc2StartCapture(pg_ptr[i]->context)) printf("completed initialization loop iteration %d, %s\n",i,pg_ptr[i]->name); } // initialization loop Imlib_Image temp_image; double start = current_time(); for (int j = 0; j < numPics; j++) //loop through numPics { for (int i = 0; i < (numCams - camOffset); i++) //picture taking loop { check_point_grey(fc2RetrieveBuffer(pg_ptr[i]->context, &pg_ptr[i]->raw_image)); check_point_grey(fc2ConvertImageTo(FC2_PIXEL_FORMAT_BGRU, &pg_ptr[i]->raw_image, &pg_ptr[i]->converted_image)); temp_image = imlib_create_image_using_copied_data(pg_ptr[i]->converted_image.cols, pg_ptr[i]->converted_image.rows, (unsigned int *) pg_ptr[i]->converted_image.pData); printf("%simage_%d\n",pg_ptr[i]->name,j); if (j == (numPics - 1)) {//save the final image from each camera SaveImlibImage(temp_image, pg_ptr[i]->name, mode_str); } else { imlib_context_set_image(temp_image); //this is where we would do something else with the image imlib_free_image_and_decache(); } } //picture taking loop } //numPics loop double stop = current_time(); //check elapsed time double elapsed = stop - start; double images_per_sec = (double)numPics / elapsed; printf("%d images per camera taken in %f seconds (%f images/sec/cam)\n", numPics, elapsed, images_per_sec); for (int i = 0; i < (numCams - camOffset); i++) //cleanup loop { //**CALLING THIS HERE WILL WORK WITH 2 CAMERAS AT 640X480 BUT NOT AT //1280X960** check_point_grey(fc2StopCapture(pg_ptr[i]->context)); check_point_grey(fc2DestroyContext(pg_ptr[i]->context)); check_point_grey(fc2DestroyImage(&pg_ptr[i]->raw_image)); check_point_grey(fc2DestroyImage(&pg_ptr[i]->converted_image)); free(pg_ptr[i]); printf("completed cleanup loop iteration %d\n",i); } //cleanup loop printf("Program complete!\n"); return 0; }
int RunSingleCamera( fc2Context context, fc2PGRGuid guid) { const int k_numImages = 1000; fc2Error error; fc2CameraInfo camInfo; unsigned int numStreamChannels = 0; fc2GigEImageSettingsInfo imageSettingsInfo; fc2Image rawImage, convertedImage; fc2GigEImageSettings imageSettings; int imageCnt; unsigned int i; char filename[512]; printf( "Connecting to camera...\n" ); // Connect to a camera error = fc2Connect( context, &guid ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2Connect: %s\n", fc2ErrorToDescription(error) ); return -1; } // Get the camera information error = fc2GetCameraInfo( context, &camInfo); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2GetCameraInfo: %s\n", fc2ErrorToDescription(error) ); return -1; } PrintCameraInfo(&camInfo); error = fc2GetNumStreamChannels( context, &numStreamChannels ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2GetNumStreamChannels: %s\n", fc2ErrorToDescription(error) ); return -1; } for ( i = 0; i < numStreamChannels; i++) { fc2GigEStreamChannel streamChannel; error = fc2GetGigEStreamChannelInfo( context, i, &streamChannel ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2GetGigEStreamChannelInfo: %s\n", fc2ErrorToDescription(error) ); return -1; } printf( "\nPrinting stream channel information for channel %u:\n", i ); PrintStreamChannelInfo( &streamChannel ); } printf( "Querying GigE image setting information...\n" ); error = fc2GetGigEImageSettingsInfo( context, &imageSettingsInfo ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2GetGigEImageSettingsInfo: %s\n", fc2ErrorToDescription(error) ); return -1; } imageSettings.offsetX = 0; imageSettings.offsetY = 0; imageSettings.height = imageSettingsInfo.maxHeight; imageSettings.width = imageSettingsInfo.maxWidth; imageSettings.pixelFormat = FC2_PIXEL_FORMAT_MONO8; printf( "Setting GigE image settings...\n" ); error = fc2SetGigEImageSettings( context, &imageSettings ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2SetGigEImageSettings: %s\n", fc2ErrorToDescription(error) ); return -1; } printf( "Starting image capture...\n" ); // Start capturing images error = fc2StartCapture( context); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2StartCapture: %s\n", fc2ErrorToDescription(error) ); return -1; } // Prepare images error = fc2CreateImage( &rawImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2CreateImage: %s\n", fc2ErrorToDescription(error) ); return -1; } error = fc2CreateImage( &convertedImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2CreateImage: %s\n", fc2ErrorToDescription(error) ); return -1; } for ( imageCnt=0; imageCnt < k_numImages; imageCnt++ ) { // Retrieve an image error = fc2RetrieveBuffer( context, &rawImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2RetrieveBuffer: %s\n", fc2ErrorToDescription(error) ); return -1; } printf( "Grabbed image %d\n", imageCnt ); // Convert the raw image error = fc2ConvertImageTo( FC2_PIXEL_FORMAT_MONO8, &rawImage, &convertedImage ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2ConvertImage: %s\n", fc2ErrorToDescription(error) ); return -1; } // Create a unique filename sprintf( filename, "GigEGrabEx-%u-%d.pgm", camInfo.serialNumber, imageCnt ); /* // Save the image. If a file format is not passed in, then the file // extension is parsed to attempt to determine the file format. error = fc2SaveImage( &convertedImage, filename, FC2_PGM ); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2SaveImage: %s\n", fc2ErrorToDescription(error) ); return -1; } */ } printf( "Stopping capture...\n" ); // Stop capturing images error = fc2StopCapture( context); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2StopCapture: %s\n", fc2ErrorToDescription(error) ); } // Disconnect the camera error = fc2Disconnect( context); if ( error != FC2_ERROR_OK ) { printf( "Error in fc2Disconnect: %s\n", fc2ErrorToDescription(error) ); } error = fc2DestroyImage( &rawImage ); error = fc2DestroyImage( &convertedImage ); return 0; }