bool MJPEGStreamer::execute() { pthread_mutex_lock(&(mServer->Mutex)); if(mServer->getClientRequest()) { mServer->setDataBufferSize(compress_yuyv_to_jpeg(getImage(), mServer->getDataBuffer(), (size_t)(800*600*2), 80)); } mServer->notifyDataBufferUpdate(); mServer->setClientRequest(false); pthread_mutex_unlock(&(mServer->Mutex)); return true; }
int getFrame(unsigned char *p) { int len, size, fIndex; memset(&vd.queue_buf, 0, sizeof(vd.queue_buf)); vd.queue_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd.queue_buf.memory = V4L2_MEMORY_MMAP; ioctl(vd.fd, VIDIOC_DQBUF, &vd.queue_buf); vd.frame_buf = vd.buffers[vd.queue_buf.index].start; len = vd.buffers[vd.queue_buf.index].length; fIndex = vd.queue_buf.index; memset(vd.jpeg_buf, 0, JPEG_FRAME_LEN); size = compress_yuyv_to_jpeg(vd.frame_buf, vd.jpeg_buf, 640, 480, len, 80); memset(p, 0, size); // DBG("size: %d\n", size); memcpy(p, vd.jpeg_buf, size); //unget frame memset(&vd.queue_buf, 0, sizeof(vd.queue_buf)); vd.queue_buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd.queue_buf.memory = V4L2_MEMORY_MMAP; vd.queue_buf.index = fIndex; ioctl(vd.fd, VIDIOC_QBUF, &vd.queue_buf); return size; }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread(void *arg) { context *pcontext = arg; pglobal = pcontext->pglobal; /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, pcontext); while(!pglobal->stop) { while(pcontext->videoIn->streamingState == STREAMING_PAUSED) { usleep(1); // maybe not the best way so FIXME } if(stop_camera == 1) { /* check active outputs */ pthread_mutex_lock(&pglobal->in[pcontext->id].out); if(pglobal->in[pcontext->id].num_outs == 0) { /* stop camera */ uvcStopGrab(pcontext->videoIn); /* wait for active outputs */ pthread_cond_wait(&pglobal->in[pcontext->id].out_update, &pglobal->in[pcontext->id].out); } /* allow others to access the global buffer again */ pthread_mutex_unlock(&pglobal->in[pcontext->id].out); } /* grab a frame */ if(uvcGrab(pcontext->videoIn) < 0) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->buf.bytesused, pcontext->id); /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if(pcontext->videoIn->buf.bytesused < minimum_size) { DBG("dropping too small frame, assuming it as broken\n"); continue; } /* copy JPG picture to global buffer */ pthread_mutex_lock(&pglobal->in[pcontext->id].db); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ if(pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) { DBG("compressing frame from input: %d\n", (int)pcontext->id); pglobal->in[pcontext->id].size = compress_yuyv_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, gquality); } else { DBG("copying frame from input: %d\n", (int)pcontext->id); pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->buf.bytesused); } #if 0 /* motion detection can be done just by comparing the picture size, but it is not very accurate!! */ if((prev_size - global->size)*(prev_size - global->size) > 4 * 1024 * 1024) { DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024); } prev_size = global->size; #endif /* copy this frame's timestamp to user space */ pglobal->in[pcontext->id].timestamp = pcontext->videoIn->buf.timestamp; /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update); pthread_mutex_unlock(&pglobal->in[pcontext->id].db); /* only use usleep if the fps is below 5, otherwise the overhead is too long */ if(pcontext->videoIn->fps < 5) { DBG("waiting for next frame for %d us\n", 1000 * 1000 / pcontext->videoIn->fps); usleep(1000 * 1000 / pcontext->videoIn->fps); } else { DBG("waiting for next frame\n"); } } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }
int main (int argc, char *argv[]) { char *videodevice = "/dev/video0"; char *outputfile = "snap.jpg"; char *post_capture_command[3]; int format = V4L2_PIX_FMT_MJPEG; int grabmethod = 1; int width = 320; int height = 240; int brightness = 0, contrast = 0, saturation = 0, gain = 0; int verbose = 0; int delay = 0; int quality = 95; int post_capture_command_wait = 0; time_t ref_time; struct vdIn *videoIn; FILE *file; (void) signal (SIGINT, sigcatch); (void) signal (SIGQUIT, sigcatch); (void) signal (SIGKILL, sigcatch); (void) signal (SIGTERM, sigcatch); (void) signal (SIGABRT, sigcatch); (void) signal (SIGTRAP, sigcatch); // set post_capture_command to default values post_capture_command[0] = NULL; post_capture_command[1] = NULL; post_capture_command[2] = NULL; //Options Parsing (FIXME) while ((argc > 1) && (argv[1][0] == '-')) { switch (argv[1][1]) { case 'v': verbose++; break; case 'o': outputfile = &argv[1][2]; break; case 'd': videodevice = &argv[1][2]; break; case 'x': width = atoi (&argv[1][2]); break; case 'y': height = atoi (&argv[1][2]); break; case 'r': grabmethod = 0; break; case 'm': format = V4L2_PIX_FMT_YUYV; break; case 't': delay = atoi (&argv[1][2]); break; case 'c': post_capture_command[0] = &argv[1][2]; break; case 'w': post_capture_command_wait = 1; break; case 'B': brightness = atoi (&argv[1][2]); break; case 'C': contrast = atoi (&argv[1][2]); break; case 'S': saturation = atoi (&argv[1][2]); break; case 'G': gain = atoi (&argv[1][2]); break; case 'q': quality = atoi (&argv[1][2]); break; case 'h': usage (); break; default: fprintf (stderr, "Unknown option %s \n", argv[1]); usage (); } ++argv; --argc; } if ((width > 960) || (height > 720) || (quality != 95)) format = V4L2_PIX_FMT_YUYV; if (post_capture_command[0]) post_capture_command[1] = outputfile; if (verbose >= 1) { fprintf (stderr, "Using videodevice: %s\n", videodevice); fprintf (stderr, "Saving images to: %s\n", outputfile); fprintf (stderr, "Image size: %dx%d\n", width, height); fprintf (stderr, "Taking snapshot every %d seconds\n", delay); if (grabmethod == 1) fprintf (stderr, "Taking images using mmap\n"); else fprintf (stderr, "Taking images using read\n"); if (post_capture_command[0]) fprintf (stderr, "Executing '%s' after each image capture\n", post_capture_command[0]); } videoIn = (struct vdIn *) calloc (1, sizeof (struct vdIn)); if (init_videoIn (videoIn, (char *) videodevice, width, height, format, grabmethod) < 0) exit (1); //Reset all camera controls if (verbose >= 1) fprintf (stderr, "Resetting camera settings\n"); v4l2ResetControl (videoIn, V4L2_CID_BRIGHTNESS); v4l2ResetControl (videoIn, V4L2_CID_CONTRAST); v4l2ResetControl (videoIn, V4L2_CID_SATURATION); v4l2ResetControl (videoIn, V4L2_CID_GAIN); //Setup Camera Parameters if (brightness != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera brightness to %d\n", brightness); v4l2SetControl (videoIn, V4L2_CID_BRIGHTNESS, brightness); } else if (verbose >= 1) { fprintf (stderr, "Camera brightness level is %d\n", v4l2GetControl (videoIn, V4L2_CID_BRIGHTNESS)); } if (contrast != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera contrast to %d\n", contrast); v4l2SetControl (videoIn, V4L2_CID_CONTRAST, contrast); } else if (verbose >= 1) { fprintf (stderr, "Camera contrast level is %d\n", v4l2GetControl (videoIn, V4L2_CID_CONTRAST)); } if (saturation != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera saturation to %d\n", saturation); v4l2SetControl (videoIn, V4L2_CID_SATURATION, saturation); } else if (verbose >= 1) { fprintf (stderr, "Camera saturation level is %d\n", v4l2GetControl (videoIn, V4L2_CID_SATURATION)); } if (gain != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera gain to %d\n", gain); v4l2SetControl (videoIn, V4L2_CID_GAIN, gain); } else if (verbose >= 1) { fprintf (stderr, "Camera gain level is %d\n", v4l2GetControl (videoIn, V4L2_CID_GAIN)); } ref_time = time (NULL); while (run) { if (verbose >= 2) fprintf (stderr, "Grabbing frame\n"); if (uvcGrab (videoIn) < 0) { fprintf (stderr, "Error grabbing\n"); close_v4l2 (videoIn); free (videoIn); exit (1); } if ((difftime (time (NULL), ref_time) > delay) || delay == 0) { if (verbose >= 1) fprintf (stderr, "Saving image to: %s\n", outputfile); file = fopen (outputfile, "wb"); if (file != NULL) { switch (videoIn->formatIn) { case V4L2_PIX_FMT_YUYV: compress_yuyv_to_jpeg (videoIn, file, quality); break; default: fwrite (videoIn->tmpbuffer, videoIn->buf.bytesused + DHT_SIZE, 1, file); break; } fclose (file); videoIn->getPict = 0; } if (post_capture_command[0]) { if (verbose >= 1) fprintf (stderr, "Executing '%s %s'\n", post_capture_command[0], post_capture_command[1]); if (spawn (post_capture_command, post_capture_command_wait, verbose)) { fprintf (stderr, "Command exited with error\n"); close_v4l2 (videoIn); free (videoIn); exit (1); } } ref_time = time (NULL); } if (delay == 0) break; } close_v4l2 (videoIn); free (videoIn); return 0; }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread( void *arg ) { /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, NULL); while( !pglobal->stop ) { /* grab a frame */ if( uvcGrab(videoIn) < 0 ) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } DBG("received frame of size: %d\n", videoIn->buf.bytesused); /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if ( videoIn->buf.bytesused < minimum_size ) { DBG("dropping too small frame, assuming it as broken\n"); continue; } /* copy JPG picture to global buffer */ pthread_mutex_lock( &pglobal->db ); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ if (videoIn->formatIn != V4L2_PIX_FMT_MJPEG ) { DBG("compressing frame\n"); pglobal->size = compress_yuyv_to_jpeg(videoIn, pglobal->buf, videoIn->framesizeIn, gquality,videoIn->fmt.fmt.pix.pixelformat); } else { DBG("copying frame\n"); pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer, videoIn->buf.bytesused); } #if 0 /* motion detection can be done just by comparing the picture size, but it is not very accurate!! */ if ( (prev_size - global->size)*(prev_size - global->size) > 4*1024*1024 ) { DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024); } prev_size = global->size; #endif /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->db_update); pthread_mutex_unlock( &pglobal->db ); DBG("waiting for next frame\n"); /* only use usleep if the fps is below 5, otherwise the overhead is too long */ if ( videoIn->fps < 5 ) { usleep(1000*1000/videoIn->fps); } } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread(void *arg) { int cid = 0, id = (int)arg; context *activecam, *pcontext = &cams[id]; pglobal = pcontext->pglobal; activecam = pcontext; /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, pcontext); while(!pglobal->stop) { if(cid != camera) { video_pause(activecam->videoIn); cid = camera; activecam = &cams[id + cid]; IPRINT("Switch to camera..: %s\n", mdev[cid]); video_unpause(activecam->videoIn); } while(activecam->videoIn->streamingState == STREAMING_PAUSED) { usleep(1); // maybe not the best way so FIXME } /* grab a frame */ if(uvcGrab(activecam->videoIn) < 0) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } DBG("received frame of size: %d from plugin: %d\n", activecam->videoIn->buf.bytesused, activecam->id); /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if(activecam->videoIn->buf.bytesused < minimum_size) { DBG("dropping too small frame, assuming it as broken\n"); continue; } /* copy JPG picture to global buffer */ pthread_mutex_lock(&pglobal->in[pcontext->id].db); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ if(activecam->videoIn->formatIn == V4L2_PIX_FMT_YUYV) { DBG("compressing frame from input: %d\n", (int)activecam->id); pglobal->in[pcontext->id].size = compress_yuyv_to_jpeg(activecam->videoIn, pglobal->in[pcontext->id].buf, activecam->videoIn->framesizeIn, gquality); } else { DBG("copying frame from input: %d\n", (int)activecam->id); pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, activecam->videoIn->tmpbuffer, activecam->videoIn->buf.bytesused); } /* copy this frame's timestamp to user space */ pglobal->in[pcontext->id].timestamp = activecam->videoIn->buf.timestamp; /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update); pthread_mutex_unlock(&pglobal->in[pcontext->id].db); /* only use usleep if the fps is below 5, otherwise the overhead is too long */ if(activecam->videoIn->fps < 5) { DBG("waiting for next frame for %d us\n", 1000 * 1000 / activecam->videoIn->fps); usleep(1000 * 1000 / activecam->videoIn->fps); } else { DBG("waiting for next frame\n"); } } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread(void *arg) { g_settings.init(); setCameraExposure(); CVideoFrame* pFrame = NULL; #ifndef TEST_USE_JPEGS_NOT_CAMERA int width = VIEW_PIXEL_X_WIDTH; int height = VIEW_PIXEL_Y_HEIGHT; IplImage * img = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); // obraz OpenCV #endif frameGrinder.init(); #ifdef TEST_USE_JPEGS_NOT_CAMERA std::string sBasePath = "/home/"; sBasePath += HOME_NAME; std::string sPath = sBasePath; sPath += "/0243-20150125-22-21-46.jpg"; //sPath += "/0007-20150125-22-36-25.jpg"; cv::Mat frame1 = cv::imread(sPath.c_str(), CV_LOAD_IMAGE_COLOR); if (frame1.empty()) { dbgMsg_s("Failed to read image data from a file1\n"); } sPath = sBasePath; sPath += "/0243-20150125-22-21-46.jpg"; //sPath += "/0007-20150125-22-36-25.jpg"; cv::Mat frame2 = cv::imread(sPath.c_str(), CV_LOAD_IMAGE_COLOR); if (frame2.empty()) { dbgMsg_s("Failed to read image data from a file2\n"); } bool toggle = false; #endif context *pcontext = (context*) arg; pglobal = pcontext->pglobal; /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, pcontext); while (!pglobal->stop) { while (pcontext->videoIn->streamingState == STREAMING_PAUSED) { usleep(1); // maybe not the best way so FIXME } #ifdef TEST_USE_JPEGS_NOT_CAMERA if (frameGrinder.safeGetFreeFrame(&pFrame)) { if (toggle) { pFrame->m_frame = frame1; } else { pFrame->m_frame = frame2; } toggle = (!toggle); if (!pFrame->m_frame.empty()) { frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT); } else { dbgMsg_s("Frame is empty\n"); frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_FREE); } frameGrinder.m_testMonitor.m_nTasksDone[CTestMonitor::TASK_DONE_CAMERA]++; } #else /* grab a frame */ if (uvcGrab(pcontext->videoIn) < 0) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->buf.bytesused, pcontext->id); /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if (pcontext->videoIn->buf.bytesused < minimum_size) { DBG("dropping too small frame, assuming it as broken\n"); continue; } if (g_settings.isDynamicSettingsEnabled()) { g_settings.getValueFromFile(CSetting::SETTING_EXPOSURE); } if(g_settings.isValueChanged(CSetting::SETTING_EXPOSURE)) { setCameraExposure(); } #ifdef NO_CV_JUST_STREAM_THE_CAMERA /* copy JPG picture to global buffer */ pthread_mutex_lock(&pglobal->in[pcontext->id].db); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ if (pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) { DBG("compressing frame from input: %d\n", (int) pcontext->id); pglobal->in[pcontext->id].size = compress_yuyv_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, gquality); } else { DBG("copying frame from input: %d\n", (int) pcontext->id); pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->buf.bytesused); } /* copy this frame's timestamp to user space */ pglobal->in[pcontext->id].timestamp = pcontext->videoIn->buf.timestamp; /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update); pthread_mutex_unlock(&pglobal->in[pcontext->id].db); #else // #ifndef NO_CV_JUST_STREAM_THE_CAMERA if (frameGrinder.safeGetFreeFrame(&pFrame)) { std::vector<uchar> vectordata(pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbuffer + (height * width)); cv::Mat data_mat(vectordata, false); cv::Mat image(cv::imdecode(data_mat, 1)); //put 0 if you want greyscale pFrame->m_frame = image; if (!pFrame->m_frame.empty()) { frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT); } else { dbgMsg_s("Frame is empty\n"); frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_FREE); } frameGrinder.m_testMonitor.m_nTasksDone[CTestMonitor::TASK_DONE_CAMERA]++; } #endif // #ifndef NO_CV_JUST_STREAM_THE_CAMERA #endif // TEST_USE_JPEGS_NOT_CAMERA } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }
int main (int argc, char *argv[]) { char *input_file = NULL; struct timeval start_time, stop_time; char outnameBuff[1024]; int quality = 95; int soft_compress_yuv = 0; int convert_yuv_to_ppm = 0; char *sep; int inp; uint16_t width, height; void *pixels; //Options Parsing (FIXME) while ((argc > 1) && (argv[1][0] == '-')) { switch (argv[1][1]) { case 'j': soft_compress_yuv = 1; break; case 'p': convert_yuv_to_ppm = 1; break; case 'h': usage ("yuv_decode"); break; default: fprintf (stderr, "Unknown option %s \n", argv[1]); usage ("yuv_decode"); } ++argv; --argc; } if (argc <= 1) usage("yuv_decode"); input_file = argv[1]; strcpy(outnameBuff, input_file); /* sep = rindex(input_file, '.'); if (!sep) strcpy(outnameBuff, input_file); else { memset(outnameBuff, 0, sizeof(outnameBuff)); memcpy(outnameBuff, input_file, sep - input_file); } */ inp = open(input_file, O_RDONLY); if (inp < 0) { perror("Error opening input file"); exit(0); } if (sizeof(width) != read(inp, &width, sizeof(width))) { perror("Read width"); exit(0); } if (sizeof(height) != read(inp, &height, sizeof(height))) { perror("Read height"); exit(0); } pixels = mmap(NULL, width * height * 2 + sizeof(width) + sizeof(height), PROT_READ, MAP_SHARED | MAP_FILE, inp, 0); if (MAP_FAILED == pixels) { perror("mmap"); exit(0); } printf("%s (%d x %d)\n", outnameBuff, width, height); if (soft_compress_yuv) compress_yuyv_to_jpeg (outnameBuff, quality, width, height, pixels + sizeof(width) + sizeof(height)); if (convert_yuv_to_ppm) convert_yuyv_to_ppm(outnameBuff, width, height, pixels + sizeof(width) + sizeof(height)); munmap(pixels, width * height * 2); return 0; }