/* the single writer thread */ void *cam_thread(void *arg) { while (!stop) { /* grab a frame */ if (uvcGrab(cd.videoIn) < 0) { fprintf(stderr, "Error grabbing\n"); exit(1); } /* copy frame to global buffer */ pthread_mutex_lock(&db); g_size = cd.videoIn->buf.bytesused; memcpy(g_buf, cd.videoIn->tmpbuffer, cd.videoIn->buf.bytesused); /* signal fresh_frame */ pthread_cond_broadcast(&db_update); pthread_mutex_unlock(&db); /* only use usleep if the fps is below 5, otherwise the overhead is too long */ if (cd.videoIn->fps < 5) { usleep(1000 * 1000 / cd.videoIn->fps); } } return NULL; }
void grabUVC(struct vdIn * v, Ipp8u * p) { if (uvcGrab(v) < 0) { printf("Error grabbing\n"); exit(1); } IppiSize roiSize = {v->width,v->height}; int steps[3] = {v->width,v->width/2,v->width/2}; Ipp8u * dest [3] = {p, p + (v->width*v->height), p + (v->width*v->height+v->width*v->height/4)}; ippiYCbCr422ToYCbCr420_8u_C2P3R(v->framebuffer, v->width*2, dest, steps, roiSize); /* // copy the gray plane int k, d = 0; unsigned char * s = v->framebuffer; for(k=0; k<(v->width*v->height); k++) { p[k] = s[d]; d+=2; } */ // captura sin transformación de formato // memcpy(p, videoIn->framebuffer, videoIn->width * (videoIn->height) * 2); }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread(void *arg) { context_t *pcontext = (context_t *) arg; input_t *input = pcontext->input; while (!pcontext->stop) { while (!pcontext->stop && pcontext->videoIn->streamingState == STREAMING_PAUSED) { usleep(100); // maybe not the best way so FIXME } if (pcontext->stop) break; /* grab a frame */ if (uvcGrab(pcontext->videoIn) < 0) { PTRACE(0, "Error grabbing frames"); return NULL; } /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if (pcontext->videoIn->buf.bytesused < pcontext->minimum_size) { PTRACE(3, "dropping too small frame, assuming it as broken"); continue; } if (input->callback) input->callback(input, pcontext->videoIn->tmpbuffer, pcontext->videoIn->buf.bytesused, pcontext->videoIn->buf.timestamp); /* only use usleep if the fps is below 5, otherwise the overhead is too long */ if (pcontext->videoIn->fps < 60) { usleep( 1000 * 1000 * pcontext->videoIn->fps_div / pcontext->videoIn->fps - 5000); } else { } } PTRACE(1, "leaving input thread"); return NULL; }
/** * \brief Thread method */ void Worker::run() { init(); while (!_finished) { if( uvcGrab() == false) qFatal ( "Wroker:run() -> Fatal error: uvcgrab() returned error" ); printFPS(); } }
IplImage *image_input::capture_uvc_camera() { #ifdef ENABLE_CAMERA struct vdIn *videoIn; char *videodevice = (char*)"/dev/video0"; int format = V4L2_PIX_FMT_YUYV; int grabmethod = 1; IplImage* frame; videoIn = (struct vdIn *) calloc (1, sizeof (struct vdIn)); if (init_videoIn(videoIn, videodevice, camera.width, camera.height, format, grabmethod) < 0) { puts("init failed"); exit (1); } //Reset all camera controls v4l2ResetControl (videoIn, V4L2_CID_BRIGHTNESS); v4l2ResetControl (videoIn, V4L2_CID_CONTRAST); v4l2ResetControl (videoIn, V4L2_CID_SATURATION); v4l2ResetControl (videoIn, V4L2_CID_GAIN); //Setup Camera Parameters v4l2SetControl (videoIn, V4L2_CID_BRIGHTNESS, camera.brightness); v4l2SetControl (videoIn, V4L2_CID_CONTRAST, camera.contrast); v4l2SetControl (videoIn, V4L2_CID_SATURATION, camera.saturation); v4l2SetControl (videoIn, V4L2_CID_GAIN, camera.gain); if (uvcGrab (videoIn) < 0) { fprintf(stderr, "Error grabbing\n"); close_v4l2(videoIn); free(videoIn); exit(1); } // IplImage frame = cvCreateImage(cvSize(videoIn->width, videoIn->height), IPL_DEPTH_8U, 3); // convert image format convert_yuyv_to_rgb(videoIn->framebuffer, (unsigned char*)frame->imageData, videoIn->width, videoIn->height); close_v4l2 (videoIn); free (videoIn); return frame; #else // ENABLE_CAMERA return NULL; #endif // ENABLE_CAMERA }
void CameraGrabber::run() { int i = 0; RoboCompCameraSimplePub::TImg img; img.resize( vd[0].buf.length ); while (true) { uvcGrab(); memcpy(&img[0], vd[0].framebuffer, vd[0].buf.length ); // try // { camera_proxy->putYUVImage( img );} // catch(const Ice::Exception &ex) // { std::cout << ex << std::endl; } i++; if(i%10 == 0) printf("publishing %d\n",i); } }
void CameraManager2::start_grabbing() { char outputfile[40]; for (int i = 0; i<11; i++) { sprintf(outputfile, "snap%i.jpg", i); std::cout << i << std::endl; if (uvcGrab() < 0) { fprintf (stderr, "Error grabbing\n"); } else { FILE *file = fopen(outputfile, "wb"); fwrite(vd->tmpbuffer, vd->buf.bytesused + DHT_SIZE, 1, file); } } close_v4l2(); }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread( void *arg ) { /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, NULL); while( !pglobal->stop ) { /* grab a frame */ if( uvcGrab(videoIn) < 0 ) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } DBG("received frame of size: %d\n", videoIn->buf.bytesused); /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if ( videoIn->buf.bytesused < minimum_size ) { DBG("dropping too small frame, assuming it as broken\n"); continue; } /* copy JPG picture to global buffer */ pthread_mutex_lock( &pglobal->db ); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ if (videoIn->formatIn != V4L2_PIX_FMT_MJPEG ) { DBG("compressing frame\n"); pglobal->size = compress_yuyv_to_jpeg(videoIn, pglobal->buf, videoIn->framesizeIn, gquality,videoIn->fmt.fmt.pix.pixelformat); } else { DBG("copying frame\n"); pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer, videoIn->buf.bytesused); } #if 0 /* motion detection can be done just by comparing the picture size, but it is not very accurate!! */ if ( (prev_size - global->size)*(prev_size - global->size) > 4*1024*1024 ) { DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024); } prev_size = global->size; #endif /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->db_update); pthread_mutex_unlock( &pglobal->db ); DBG("waiting for next frame\n"); /* only use usleep if the fps is below 5, otherwise the overhead is too long */ if ( videoIn->fps < 5 ) { usleep(1000*1000/videoIn->fps); } } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }
int main_loop(void *data) { int ret=0; int i,j,k,l,m,n,o; unsigned int YUVMacroPix; unsigned char *pix8 = (unsigned char *)&YUVMacroPix; Pix *pix2; char *pix; if ((pix2= malloc(sizeof(Pix)))==NULL) { printf("couldn't allocate memory for: pix2\n"); ret=1; return(ret); } //fprintf(stderr,"Thread started...\n"); /* ImageSurf=SDL_CreateRGBSurface(SDL_SWSURFACE, overlay->w, overlay->h, 24, 0x00ff0000,0x0000ff00,0x000000ff,0); */ while (videoIn->signalquit) { currtime = SDL_GetTicks(); if (currtime - lasttime > 0) { frmrate = 1000/(currtime - lasttime); } lasttime = currtime; // sprintf(capt,"Frame Rate: %d",frmrate); // SDL_WM_SetCaption(capt, NULL); if (uvcGrab(videoIn) < 0) { printf("Error grabbing=> Frame Rate is %d\n",frmrate); videoIn->signalquit=0; ret = 2; } SDL_LockYUVOverlay(overlay); memcpy(p, videoIn->framebuffer, videoIn->width * (videoIn->height) * 2); SDL_UnlockYUVOverlay(overlay); SDL_DisplayYUVOverlay(overlay, &drect); /*capture Image*/ if (videoIn->capImage){ if((pim= malloc((pscreen->w)*(pscreen->h)*3))==NULL){/*24 bits -> 3bytes 32 bits ->4 bytes*/ printf("Couldn't allocate memory for: pim\n"); videoIn->signalquit=0; ret = 3; } //char *ppmheader = "P6\n# Generated by guvcview\n320 240\n255\n"; //FILE * out = fopen("Yimage.ppm", "wb"); //saving as ppm //fprintf(out, ppmheader); k=overlay->h; //printf("overlay->h is %i\n",overlay->h); //printf("and pitches[0] is %i\n",overlay->pitches[0]); for(j=0;j<(overlay->h);j++){ l=j*overlay->pitches[0];/*must add lines already writen=*/ /*pitches is the overlay number */ /*off bytes in a line (2*width) */ m=(k*3*overlay->pitches[0])>>1;/*must add lines already writen= */ /*for this case (rgb) every pixel */ /*as 3 bytes (3*width=3*pitches/2) */ /* >>1 = (/2) divide by 2 (?faster?)*/ for (i=0;i<((overlay->pitches[0])>>2);i++){ /*>>2 = (/4)*/ /*iterate every 4 bytes (32 bits)*/ /*Y-U-V-Y1 =>2 pixel (4 bytes) */ n=i<<2;/*<<2 = (*4) multiply by 4 (?faster?)*/ pix8[0] = p[n+l]; pix8[1] = p[n+1+l]; pix8[2] = p[n+2+l]; pix8[3] = p[n+3+l]; /*get RGB data*/ pix2=yuv2rgb(YUVMacroPix,0,pix2); /*In BitMaps lines are upside down and*/ /*pixel format is bgr */ o=i*6; /*first pixel*/ pim[o+m]=pix2->b; pim[o+1+m]=pix2->g; pim[o+2+m]=pix2->r; /*second pixel*/ pim[o+3+m]=pix2->b1; pim[o+4+m]=pix2->g1; pim[o+5+m]=pix2->r1; } k--; } /* SDL_LockSurface(ImageSurf); memcpy(pix, pim,(pscreen->w)*(pscreen->h)*3); //24 bits -> 3bytes 32 bits ->4 bytes SDL_UnlockSurface(ImageSurf);*/ if(SaveBPM(videoIn->ImageFName, width, height, 24, pim)) { fprintf (stderr,"Error: Couldn't capture Image to %s \n", videoIn->ImageFName); } else { printf ("Capture Image to %s \n",videoIn->ImageFName); } free(pim); videoIn->capImage=FALSE; } /*capture AVI */ if (videoIn->capAVI && videoIn->signalquit){ long framesize; switch (AVIFormat) { case 1: framesize=(pscreen->w)*(pscreen->h)*2; /*YUY2 -> 2 bytes per pixel */ if (AVI_write_frame (AviOut, p, framesize) < 0) printf ("write error on avi out \n"); break; case 2: framesize=(pscreen->w)*(pscreen->h)*3; /*DIB 24/32 -> 3/4 bytes per pixel*/ if((pim= malloc(framesize))==NULL){ printf("Couldn't allocate memory for: pim\n"); videoIn->signalquit=0; ret = 4; } k=overlay->h; for(j=0;j<(overlay->h);j++){ l=j*overlay->pitches[0];/*must add lines already writen=*/ /*pitches is the overlay number */ /*off bytes in a line (2*width) */ m=(k*3*overlay->pitches[0])>>1;/*must add lines already writen= */ /*for this case (rgb) every pixel */ /*as 3 bytes (3*width=3*pitches/2) */ /* >>1 = (/2) divide by 2 (?faster?)*/ for (i=0;i<((overlay->pitches[0])>>2);i++){ /*>>2 = (/4)*/ /*iterate every 4 bytes (32 bits)*/ /*Y-U-V-Y1 =>2 pixel (4 bytes) */ n=i<<2;/*<<2 = (*4) multiply by 4 (?faster?)*/ pix8[0] = p[n+l]; pix8[1] = p[n+1+l]; pix8[2] = p[n+2+l]; pix8[3] = p[n+3+l]; /*get RGB data*/ pix2=yuv2rgb(YUVMacroPix,0,pix2); /*In BitMaps lines are upside down and*/ /*pixel format is bgr */ o=i*6; /*first pixel*/ pim[o+m]=pix2->b; pim[o+1+m]=pix2->g; pim[o+2+m]=pix2->r; /*second pixel*/ pim[o+3+m]=pix2->b1; pim[o+4+m]=pix2->g1; pim[o+5+m]=pix2->r1; } k--; } if (AVI_write_frame (AviOut, pim, framesize) < 0) printf ("write error on avi out \n"); free(pim); break; } framecount++; } SDL_Delay(SDL_WAIT_TIME); }
int main(int argc, char *argv[]) { #ifndef EMBEDED_X210 //PC platform const SDL_VideoInfo *info; char driver[128]; SDL_Surface *pscreen; SDL_Overlay *overlay; SDL_Rect drect; SDL_Event sdlevent; SDL_Thread *mythread; SDL_mutex *affmutex; Uint32 currtime; Uint32 lasttime; #endif int status; unsigned char *p = NULL; int hwaccel = 0; const char *videodevice = NULL; const char *mode = NULL; int format = V4L2_PIX_FMT_MJPEG; int i; int grabmethod = 1; int width = 320; int height = 240; int fps = 15; unsigned char frmrate = 0; char *avifilename = NULL; int queryformats = 0; int querycontrols = 0; int readconfigfile = 0; char *separateur; char *sizestring = NULL; char *fpsstring = NULL; int enableRawStreamCapture = 0; int enableRawFrameCapture = 0; char * pRGBData=NULL; printf("luvcview version %s \n", version); for (i = 1; i < argc; i++) { /* skip bad arguments */ if (argv[i] == NULL || *argv[i] == 0 || *argv[i] != '-') { continue; } if (strcmp(argv[i], "-d") == 0) { if (i + 1 >= argc) { printf("No parameter specified with -d, aborting.\n"); exit(1); } videodevice = strdup(argv[i + 1]); } if (strcmp(argv[i], "-g") == 0) { /* Ask for read instead default mmap */ grabmethod = 0; } if (strcmp(argv[i], "-w") == 0) { /* disable hw acceleration */ hwaccel = 1; } if (strcmp(argv[i], "-f") == 0) { if (i + 1 >= argc) { printf("No parameter specified with -f, aborting.\n"); exit(1); } mode = strdup(argv[i + 1]); if (strncmp(mode, "yuv", 3) == 0) { format = V4L2_PIX_FMT_YUYV; } else if (strncmp(mode, "jpg", 3) == 0) { format = V4L2_PIX_FMT_MJPEG; } else { format = V4L2_PIX_FMT_MJPEG; } } if (strcmp(argv[i], "-s") == 0) { if (i + 1 >= argc) { printf("No parameter specified with -s, aborting.\n"); exit(1); } sizestring = strdup(argv[i + 1]); width = strtoul(sizestring, &separateur, 10); if (*separateur != 'x') { printf("Error in size use -s widthxheight \n"); exit(1); } else { ++separateur; height = strtoul(separateur, &separateur, 10); if (*separateur != 0) printf("hmm.. dont like that!! trying this height \n"); printf(" size width: %d height: %d \n", width, height); } } if (strcmp(argv[i], "-i") == 0){ if (i + 1 >= argc) { printf("No parameter specified with -i, aborting. \n"); exit(1); } fpsstring = strdup(argv[i + 1]); fps = strtoul(fpsstring, &separateur, 10); printf(" interval: %d fps \n", fps); } if (strcmp(argv[i], "-S") == 0) { /* Enable raw stream capture from the start */ enableRawStreamCapture = 1; } if (strcmp(argv[i], "-c") == 0) { /* Enable raw frame capture for the first frame */ enableRawFrameCapture = 1; } if (strcmp(argv[i], "-C") == 0) { /* Enable raw frame stream capture from the start*/ enableRawFrameCapture = 2; } if (strcmp(argv[i], "-o") == 0) { /* set the avi filename */ if (i + 1 >= argc) { printf("No parameter specified with -o, aborting.\n"); exit(1); } avifilename = strdup(argv[i + 1]); } if (strcmp(argv[i], "-L") == 0) { /* query list of valid video formats */ queryformats = 1; } if (strcmp(argv[i], "-l") == 0) { /* query list of valid video formats */ querycontrols = 1; } if (strcmp(argv[i], "-r") == 0) { /* query list of valid video formats */ readconfigfile = 1; } if (strcmp(argv[i], "-h") == 0) { printf("usage: uvcview [-h -d -g -f -s -i -c -o -C -S -L -l -r] \n"); printf("-h print this message \n"); printf("-d /dev/videoX use videoX device\n"); printf("-g use read method for grab instead mmap \n"); printf("-w disable SDL hardware accel. \n"); printf("-f video format default jpg others options are yuv jpg \n"); printf("-i fps use specified frame interval \n"); printf("-s widthxheight use specified input size \n"); printf("-c enable raw frame capturing for the first frame\n"); printf("-C enable raw frame stream capturing from the start\n"); printf("-S enable raw stream capturing from the start\n"); printf("-o avifile create avifile, default video.avi\n"); printf("-L query valid video formats\n"); printf("-l query valid controls and settings\n"); printf("-r read and set control settings from luvcview.cfg\n"); exit(0); } } #ifndef EMBEDED_X210 //PC platform /************* Test SDL capabilities ************/ if (SDL_Init(SDL_INIT_VIDEO) < 0) { fprintf(stderr, "Couldn't initialize SDL: %s\n", SDL_GetError()); exit(1); } /* For this version, we'll be save and disable hardware acceleration */ if(hwaccel) { if ( ! getenv("SDL_VIDEO_YUV_HWACCEL") ) { putenv("SDL_VIDEO_YUV_HWACCEL=0"); } } if (SDL_VideoDriverName(driver, sizeof(driver))) { printf("Video driver: %s\n", driver); } info = SDL_GetVideoInfo(); if (info->wm_available) { printf("A window manager is available\n"); } if (info->hw_available) { printf("Hardware surfaces are available (%dK video memory)\n", info->video_mem); SDL_VIDEO_Flags |= SDL_HWSURFACE; } if (info->blit_hw) { printf("Copy blits between hardware surfaces are accelerated\n"); SDL_VIDEO_Flags |= SDL_ASYNCBLIT; } if (info->blit_hw_CC) { printf ("Colorkey blits between hardware surfaces are accelerated\n"); } if (info->blit_hw_A) { printf("Alpha blits between hardware surfaces are accelerated\n"); } if (info->blit_sw) { printf ("Copy blits from software surfaces to hardware surfaces are accelerated\n"); } if (info->blit_sw_CC) { printf ("Colorkey blits from software surfaces to hardware surfaces are accelerated\n"); } if (info->blit_sw_A) { printf ("Alpha blits from software surfaces to hardware surfaces are accelerated\n"); } if (info->blit_fill) { printf("Color fills on hardware surfaces are accelerated\n"); } if (!(SDL_VIDEO_Flags & SDL_HWSURFACE)) SDL_VIDEO_Flags |= SDL_SWSURFACE; #endif if (videodevice == NULL || *videodevice == 0) { videodevice = "/dev/video0"; } if (avifilename == NULL || *avifilename == 0) { avifilename = "video.avi"; } videoIn = (struct vdIn *) calloc(1, sizeof(struct vdIn)); if ( queryformats ) { /* if we're supposed to list the video formats, do that now and go out */ check_videoIn(videoIn,(char *) videodevice); free(videoIn); #ifndef EMBEDED_X210 SDL_Quit(); #endif exit(1); } if (init_videoIn(videoIn, (char *) videodevice, width, height, fps, format, grabmethod, avifilename) < 0) exit(1); /* if we're supposed to list the controls, do that now */ if ( querycontrols ) enum_controls(videoIn->fd); /* if we're supposed to read the control settings from a configfile, do that now */ if ( readconfigfile ) load_controls(videoIn->fd); #ifdef EMBEDED_X210 #ifdef SOFT_COLOR_CONVERT init_framebuffer(); #else x6410_init_Draw(videoIn->width,videoIn->height); #endif #else pscreen = SDL_SetVideoMode(videoIn->width, videoIn->height+30 , 0,SDL_VIDEO_Flags); overlay =SDL_CreateYUVOverlay(videoIn->width, videoIn->height+30 , SDL_YUY2_OVERLAY, pscreen); p = (unsigned char *) overlay->pixels[0]; drect.x = 0; drect.y = 0; drect.w =pscreen->w; drect.h = pscreen->h; #endif if (enableRawStreamCapture) { videoIn->captureFile = fopen("stream.raw", "wb"); if(videoIn->captureFile == NULL) { perror("Unable to open file for raw stream capturing"); } else { printf("Starting raw stream capturing to stream.raw ...\n"); } } if (enableRawFrameCapture) videoIn->rawFrameCapture = enableRawFrameCapture; initLut(); #ifndef EMBEDED_X210 SDL_WM_SetCaption(title_act[A_VIDEO].title, NULL); lasttime = SDL_GetTicks(); creatButt(videoIn->width, 32); SDL_LockYUVOverlay(overlay); memcpy(p + (videoIn->width * (videoIn->height) * 2), YUYVbutt, videoIn->width * 64); SDL_UnlockYUVOverlay(overlay); /* initialize thread data */ ptdata.ptscreen = &pscreen; ptdata.ptvideoIn = videoIn; ptdata.ptsdlevent = &sdlevent; ptdata.drect = &drect; affmutex = SDL_CreateMutex(); ptdata.affmutex = affmutex; mythread = SDL_CreateThread(eventThread, (void *) &ptdata); #endif pRGBData = (unsigned char *)malloc(videoIn->width*videoIn->width*4*sizeof(char)); if(pRGBData==NULL) { return ; } /* main big loop */ while (videoIn->signalquit) { #ifndef EMBEDED_X210 currtime = SDL_GetTicks(); if (currtime - lasttime > 0) { frmrate = 1000/(currtime - lasttime); } lasttime = currtime; #endif if (uvcGrab(videoIn) < 0) { printf("Error grabbing \n"); break; } /* if we're grabbing video, show the frame rate */ if (videoIn->toggleAvi) printf("\rframe rate: %d ",frmrate); #ifndef EMBEDED_X210 SDL_LockYUVOverlay(overlay); memcpy(p, videoIn->framebuffer, videoIn->width * (videoIn->height) * 2); SDL_UnlockYUVOverlay(overlay); SDL_DisplayYUVOverlay(overlay, &drect); #endif #ifdef EMBEDED_X210 #ifdef SOFT_COLOR_CONVERT // yuv to rgb565 ,and to frambuffer process_image(videoIn->framebuffer,fbp,videoIn->width,videoIn->height,vinfo,finfo); // convertYUYVtoRGB565(videoIn->framebuffer,pRGBData,videoIn->width,videoIn->height); // Pyuv422torgb24(videoIn->framebuffer, pRGBData, videoIn->width, videoIn->height); // memcpy(fbp,pRGBData,videoIn->width*videoIn->height*2); #else //X6410 post processor convert yuv to rgb,X210 not suport now. /* memcpy(pInbuffer, videoIn->framebuffer, videoIn->width * (videoIn->height) * 2); ioctl(dev_fb0, GET_FB_INFO, &fb_info); pp_param.SrcFrmSt = ioctl(dev_pp, S3C_PP_GET_RESERVED_MEM_ADDR_PHY); //must be physical adress pp_param.DstFrmSt = fb_info.map_dma_f1; //must be physical adress ioctl(dev_pp, S3C_PP_SET_PARAMS, &pp_param); ioctl(dev_pp, S3C_PP_SET_DST_BUF_ADDR_PHY, &pp_param); ioctl(dev_pp, S3C_PP_SET_SRC_BUF_ADDR_PHY, &pp_param); ioctl(dev_pp, S3C_PP_START); */ #endif #endif if (videoIn->getPict) { switch(videoIn->formatIn){ case V4L2_PIX_FMT_MJPEG: get_picture(videoIn->tmpbuffer,videoIn->buf.bytesused); break; case V4L2_PIX_FMT_YUYV: get_pictureYV2(videoIn->framebuffer,videoIn->width,videoIn->height); break; default: break; } videoIn->getPict = 0; printf("get picture !\n"); } #ifndef EMBEDED_X210 SDL_LockMutex(affmutex); ptdata.frmrate = frmrate; SDL_WM_SetCaption(videoIn->status, NULL); SDL_UnlockMutex(affmutex); #endif #ifdef EMBEDED_X210 usleep(10); #else SDL_Delay(10); #endif } #ifndef EMBEDED_X210 SDL_WaitThread(mythread, &status); SDL_DestroyMutex(affmutex); #endif /* if avifile is defined, we made a video: compute the exact fps and set it in the video */ if (videoIn->avifile != NULL) { float fps=(videoIn->framecount/(videoIn->recordtime/1000)); fprintf(stderr,"setting fps to %f\n",fps); AVI_set_video(videoIn->avifile, videoIn->width, videoIn->height, fps, "MJPG"); AVI_close(videoIn->avifile); } close_v4l2(videoIn); #ifdef EMBEDED_X210 #ifdef SOFT_COLOR_CONVERT close_frambuffer(); #else x6410_DeInit_Draw(); #endif #endif free(pRGBData); free(videoIn); destroyButt(); freeLut(); printf(" Clean Up done Quit \n"); #ifndef EMBEDED_X210 SDL_Quit(); #endif }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread(void *arg) { g_settings.init(); setCameraExposure(); CVideoFrame* pFrame = NULL; #ifndef TEST_USE_JPEGS_NOT_CAMERA int width = VIEW_PIXEL_X_WIDTH; int height = VIEW_PIXEL_Y_HEIGHT; IplImage * img = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3); // obraz OpenCV #endif frameGrinder.init(); #ifdef TEST_USE_JPEGS_NOT_CAMERA std::string sBasePath = "/home/"; sBasePath += HOME_NAME; std::string sPath = sBasePath; sPath += "/0243-20150125-22-21-46.jpg"; //sPath += "/0007-20150125-22-36-25.jpg"; cv::Mat frame1 = cv::imread(sPath.c_str(), CV_LOAD_IMAGE_COLOR); if (frame1.empty()) { dbgMsg_s("Failed to read image data from a file1\n"); } sPath = sBasePath; sPath += "/0243-20150125-22-21-46.jpg"; //sPath += "/0007-20150125-22-36-25.jpg"; cv::Mat frame2 = cv::imread(sPath.c_str(), CV_LOAD_IMAGE_COLOR); if (frame2.empty()) { dbgMsg_s("Failed to read image data from a file2\n"); } bool toggle = false; #endif context *pcontext = (context*) arg; pglobal = pcontext->pglobal; /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, pcontext); while (!pglobal->stop) { while (pcontext->videoIn->streamingState == STREAMING_PAUSED) { usleep(1); // maybe not the best way so FIXME } #ifdef TEST_USE_JPEGS_NOT_CAMERA if (frameGrinder.safeGetFreeFrame(&pFrame)) { if (toggle) { pFrame->m_frame = frame1; } else { pFrame->m_frame = frame2; } toggle = (!toggle); if (!pFrame->m_frame.empty()) { frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT); } else { dbgMsg_s("Frame is empty\n"); frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_FREE); } frameGrinder.m_testMonitor.m_nTasksDone[CTestMonitor::TASK_DONE_CAMERA]++; } #else /* grab a frame */ if (uvcGrab(pcontext->videoIn) < 0) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->buf.bytesused, pcontext->id); /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if (pcontext->videoIn->buf.bytesused < minimum_size) { DBG("dropping too small frame, assuming it as broken\n"); continue; } if (g_settings.isDynamicSettingsEnabled()) { g_settings.getValueFromFile(CSetting::SETTING_EXPOSURE); } if(g_settings.isValueChanged(CSetting::SETTING_EXPOSURE)) { setCameraExposure(); } #ifdef NO_CV_JUST_STREAM_THE_CAMERA /* copy JPG picture to global buffer */ pthread_mutex_lock(&pglobal->in[pcontext->id].db); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ if (pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) { DBG("compressing frame from input: %d\n", (int) pcontext->id); pglobal->in[pcontext->id].size = compress_yuyv_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, gquality); } else { DBG("copying frame from input: %d\n", (int) pcontext->id); pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->buf.bytesused); } /* copy this frame's timestamp to user space */ pglobal->in[pcontext->id].timestamp = pcontext->videoIn->buf.timestamp; /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update); pthread_mutex_unlock(&pglobal->in[pcontext->id].db); #else // #ifndef NO_CV_JUST_STREAM_THE_CAMERA if (frameGrinder.safeGetFreeFrame(&pFrame)) { std::vector<uchar> vectordata(pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbuffer + (height * width)); cv::Mat data_mat(vectordata, false); cv::Mat image(cv::imdecode(data_mat, 1)); //put 0 if you want greyscale pFrame->m_frame = image; if (!pFrame->m_frame.empty()) { frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT); } else { dbgMsg_s("Frame is empty\n"); frameGrinder.safeAddTail(pFrame, CVideoFrame::FRAME_QUEUE_FREE); } frameGrinder.m_testMonitor.m_nTasksDone[CTestMonitor::TASK_DONE_CAMERA]++; } #endif // #ifndef NO_CV_JUST_STREAM_THE_CAMERA #endif // TEST_USE_JPEGS_NOT_CAMERA } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }
int main (int argc, char *argv[]) { char *videodevice = "/dev/video0"; char *outputfile = "snap.jpg"; char *post_capture_command[3]; int format = V4L2_PIX_FMT_MJPEG; int grabmethod = 1; int width = 320; int height = 240; int brightness = 0, contrast = 0, saturation = 0, gain = 0; int verbose = 0; int delay = 0; int quality = 95; int post_capture_command_wait = 0; time_t ref_time; struct vdIn *videoIn; FILE *file; (void) signal (SIGINT, sigcatch); (void) signal (SIGQUIT, sigcatch); (void) signal (SIGKILL, sigcatch); (void) signal (SIGTERM, sigcatch); (void) signal (SIGABRT, sigcatch); (void) signal (SIGTRAP, sigcatch); // set post_capture_command to default values post_capture_command[0] = NULL; post_capture_command[1] = NULL; post_capture_command[2] = NULL; //Options Parsing (FIXME) while ((argc > 1) && (argv[1][0] == '-')) { switch (argv[1][1]) { case 'v': verbose++; break; case 'o': outputfile = &argv[1][2]; break; case 'd': videodevice = &argv[1][2]; break; case 'x': width = atoi (&argv[1][2]); break; case 'y': height = atoi (&argv[1][2]); break; case 'r': grabmethod = 0; break; case 'm': format = V4L2_PIX_FMT_YUYV; break; case 't': delay = atoi (&argv[1][2]); break; case 'c': post_capture_command[0] = &argv[1][2]; break; case 'w': post_capture_command_wait = 1; break; case 'B': brightness = atoi (&argv[1][2]); break; case 'C': contrast = atoi (&argv[1][2]); break; case 'S': saturation = atoi (&argv[1][2]); break; case 'G': gain = atoi (&argv[1][2]); break; case 'q': quality = atoi (&argv[1][2]); break; case 'h': usage (); break; default: fprintf (stderr, "Unknown option %s \n", argv[1]); usage (); } ++argv; --argc; } if ((width > 960) || (height > 720) || (quality != 95)) format = V4L2_PIX_FMT_YUYV; if (post_capture_command[0]) post_capture_command[1] = outputfile; if (verbose >= 1) { fprintf (stderr, "Using videodevice: %s\n", videodevice); fprintf (stderr, "Saving images to: %s\n", outputfile); fprintf (stderr, "Image size: %dx%d\n", width, height); fprintf (stderr, "Taking snapshot every %d seconds\n", delay); if (grabmethod == 1) fprintf (stderr, "Taking images using mmap\n"); else fprintf (stderr, "Taking images using read\n"); if (post_capture_command[0]) fprintf (stderr, "Executing '%s' after each image capture\n", post_capture_command[0]); } videoIn = (struct vdIn *) calloc (1, sizeof (struct vdIn)); if (init_videoIn (videoIn, (char *) videodevice, width, height, format, grabmethod) < 0) exit (1); //Reset all camera controls if (verbose >= 1) fprintf (stderr, "Resetting camera settings\n"); v4l2ResetControl (videoIn, V4L2_CID_BRIGHTNESS); v4l2ResetControl (videoIn, V4L2_CID_CONTRAST); v4l2ResetControl (videoIn, V4L2_CID_SATURATION); v4l2ResetControl (videoIn, V4L2_CID_GAIN); //Setup Camera Parameters if (brightness != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera brightness to %d\n", brightness); v4l2SetControl (videoIn, V4L2_CID_BRIGHTNESS, brightness); } else if (verbose >= 1) { fprintf (stderr, "Camera brightness level is %d\n", v4l2GetControl (videoIn, V4L2_CID_BRIGHTNESS)); } if (contrast != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera contrast to %d\n", contrast); v4l2SetControl (videoIn, V4L2_CID_CONTRAST, contrast); } else if (verbose >= 1) { fprintf (stderr, "Camera contrast level is %d\n", v4l2GetControl (videoIn, V4L2_CID_CONTRAST)); } if (saturation != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera saturation to %d\n", saturation); v4l2SetControl (videoIn, V4L2_CID_SATURATION, saturation); } else if (verbose >= 1) { fprintf (stderr, "Camera saturation level is %d\n", v4l2GetControl (videoIn, V4L2_CID_SATURATION)); } if (gain != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera gain to %d\n", gain); v4l2SetControl (videoIn, V4L2_CID_GAIN, gain); } else if (verbose >= 1) { fprintf (stderr, "Camera gain level is %d\n", v4l2GetControl (videoIn, V4L2_CID_GAIN)); } ref_time = time (NULL); while (run) { if (verbose >= 2) fprintf (stderr, "Grabbing frame\n"); if (uvcGrab (videoIn) < 0) { fprintf (stderr, "Error grabbing\n"); close_v4l2 (videoIn); free (videoIn); exit (1); } if ((difftime (time (NULL), ref_time) > delay) || delay == 0) { if (verbose >= 1) fprintf (stderr, "Saving image to: %s\n", outputfile); file = fopen (outputfile, "wb"); if (file != NULL) { switch (videoIn->formatIn) { case V4L2_PIX_FMT_YUYV: compress_yuyv_to_jpeg (videoIn, file, quality); break; default: fwrite (videoIn->tmpbuffer, videoIn->buf.bytesused + DHT_SIZE, 1, file); break; } fclose (file); videoIn->getPict = 0; } if (post_capture_command[0]) { if (verbose >= 1) fprintf (stderr, "Executing '%s %s'\n", post_capture_command[0], post_capture_command[1]); if (spawn (post_capture_command, post_capture_command_wait, verbose)) { fprintf (stderr, "Command exited with error\n"); close_v4l2 (videoIn); free (videoIn); exit (1); } } ref_time = time (NULL); } if (delay == 0) break; } close_v4l2 (videoIn); free (videoIn); return 0; }
void startVideoSrvr() { pthread_t videoSocketThread; /* alloc mameory for the videoIn struct & initialize */ videoIn = (struct vdIn *) calloc (1, sizeof (struct vdIn)); if (init_videoIn (videoIn, (char *) videodevice, width, height, format, grabmethod) < 0) exit (1); /* alloc memory for the control struct & video out array & initialize */ ctrlStruct ctrl, *pc; if ((ctrl.imgArray = malloc(3 * width * height)) < 0) // enough space for rgb exit(-1); ctrl.doCapture = 0; //Reset all camera controls if (verbose >= 1) fprintf (stderr, "Resetting camera settings\n"); v4l2ResetControl (videoIn, V4L2_CID_BRIGHTNESS); v4l2ResetControl (videoIn, V4L2_CID_CONTRAST); v4l2ResetControl (videoIn, V4L2_CID_SATURATION); v4l2ResetControl (videoIn, V4L2_CID_GAIN); //Setup Camera Parameters if (brightness != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera brightness to %d\n", brightness); v4l2SetControl (videoIn, V4L2_CID_BRIGHTNESS, brightness); } else if (verbose >= 1) { fprintf (stderr, "Camera brightness level is %d\n", v4l2GetControl (videoIn, V4L2_CID_BRIGHTNESS)); } if (contrast != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera contrast to %d\n", contrast); v4l2SetControl (videoIn, V4L2_CID_CONTRAST, contrast); } else if (verbose >= 1) { fprintf (stderr, "Camera contrast level is %d\n", v4l2GetControl (videoIn, V4L2_CID_CONTRAST)); } if (saturation != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera saturation to %d\n", saturation); v4l2SetControl (videoIn, V4L2_CID_SATURATION, saturation); } else if (verbose >= 1) { fprintf (stderr, "Camera saturation level is %d\n", v4l2GetControl (videoIn, V4L2_CID_SATURATION)); } if (gain != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera gain to %d\n", gain); v4l2SetControl (videoIn, V4L2_CID_GAIN, gain); } else if (verbose >= 1) { fprintf (stderr, "Camera gain level is %d\n", v4l2GetControl (videoIn, V4L2_CID_GAIN)); } // wait for a video client to connect before proceeding fprintf (stderr, "waiting for video client connection on port %d\n", port); if ((ctrl.videoSocket = wait4client(port)) <= 0) { fprintf (stderr, "error connecting to client: %d\n", ctrl.videoSocket); exit(-1); } // start the thread that handles the video client requests pthread_create(&videoSocketThread, NULL, (void *)cmdHandler, (void *)&ctrl); while (run) { if (verbose >= 2) fprintf (stderr, "."); if (uvcGrab (videoIn) < 0) { fprintf (stderr, "Error grabbing\n"); close_v4l2 (videoIn); free (videoIn); exit (1); } if (ctrl.doCapture == 1) { if (verbose >= 1) { fprintf (stderr, "captured %d byte image at 0x%x %dx%d\n", videoIn->framesizeIn, videoIn->framebuffer, videoIn->width, videoIn->height); } else { fprintf (stderr, "."); } if (outputType == 0) yuyv2Y(videoIn, &ctrl); else yuyv2rgb(videoIn, &ctrl); if (verbose >=1) fprintf (stderr, "converted image to luminance in buffer at 0x%x\n", ctrl.imgArray); videoIn->getPict = 0; ctrl.doCapture = 0; } } close_v4l2 (videoIn); free (videoIn); return; }
int main(int argc, char *argv[]) { char driver[128]; int status; //Uint32 currtime; //Uint32 lasttime; unsigned char *p = NULL; int hwaccel = 0; const char *videodevice = NULL; const char *mode = NULL; int format = V4L2_PIX_FMT_MJPEG; int i; int grabmethod = 1; int width = 320; int height = 240; int fps = 15; unsigned char frmrate = 0; char *avifilename = NULL; int queryformats = 0; int querycontrols = 0; int readconfigfile = 0; char *separateur; char *sizestring = NULL; char *fpsstring = NULL; int enableRawStreamCapture = 0; int enableRawFrameCapture = 0; printf("luvcview version %s \n", version); for (i = 1; i < argc; i++) { /* skip bad arguments */ if (argv[i] == NULL || *argv[i] == 0 || *argv[i] != '-') { continue; } if (strcmp(argv[i], "-d") == 0) { if (i + 1 >= argc) { printf("No parameter specified with -d, aborting.\n"); exit(1); } videodevice = strdup(argv[i + 1]); } if (strcmp(argv[i], "-g") == 0) { /* Ask for read instead default mmap */ grabmethod = 0; } if (strcmp(argv[i], "-w") == 0) { /* disable hw acceleration */ hwaccel = 1; } if (strcmp(argv[i], "-f") == 0) { if (i + 1 >= argc) { printf("No parameter specified with -f, aborting.\n"); exit(1); } mode = strdup(argv[i + 1]); if (strncmp(mode, "yuv", 3) == 0) { format = V4L2_PIX_FMT_YUYV; } else if (strncmp(mode, "jpg", 3) == 0) { format = V4L2_PIX_FMT_MJPEG; } else { format = V4L2_PIX_FMT_JPEG; } } if (strcmp(argv[i], "-s") == 0) { if (i + 1 >= argc) { printf("No parameter specified with -s, aborting.\n"); exit(1); } sizestring = strdup(argv[i + 1]); width = strtoul(sizestring, &separateur, 10); if (*separateur != 'x') { printf("Error in size use -s widthxheight \n"); exit(1); } else { ++separateur; height = strtoul(separateur, &separateur, 10); if (*separateur != 0) printf("hmm.. dont like that!! trying this height \n"); printf(" size width: %d height: %d \n", width, height); } } if (strcmp(argv[i], "-i") == 0) { if (i + 1 >= argc) { printf("No parameter specified with -i, aborting. \n"); exit(1); } fpsstring = strdup(argv[i + 1]); fps = strtoul(fpsstring, &separateur, 10); printf(" interval: %d fps \n", fps); } if (strcmp(argv[i], "-S") == 0) { /* Enable raw stream capture from the start */ enableRawStreamCapture = 1; } if (strcmp(argv[i], "-c") == 0) { /* Enable raw frame capture for the first frame */ enableRawFrameCapture = 1; } if (strcmp(argv[i], "-C") == 0) { /* Enable raw frame stream capture from the start*/ enableRawFrameCapture = 2; } if (strcmp(argv[i], "-o") == 0) { /* set the avi filename */ if (i + 1 >= argc) { printf("No parameter specified with -o, aborting.\n"); exit(1); } avifilename = strdup(argv[i + 1]); } if (strcmp(argv[i], "-L") == 0) { /* query list of valid video formats */ queryformats = 1; } if (strcmp(argv[i], "-l") == 0) { /* query list of valid video formats */ querycontrols = 1; } if (strcmp(argv[i], "-r") == 0) { /* query list of valid video formats */ readconfigfile = 1; } if (strcmp(argv[i], "-O") == 0) { /* get picture */ getpictureflag = 1; } if (strcmp(argv[i], "-h") == 0) { printf( "usage: uvcview [-h -d -g -f -s -i -c -o -C -S -L -l -r] \n"); printf("-h print this message \n"); printf("-d /dev/videoX use videoX device\n"); printf("-g use read method for grab instead mmap \n"); printf("-w disable SDL hardware accel. \n"); printf( "-f video format default jpg others options are yuv jpg \n"); printf("-i fps use specified frame interval \n"); printf("-s widthxheight use specified input size \n"); printf("-c enable raw frame capturing for the first frame\n"); printf("-C enable raw frame stream capturing from the start\n"); printf("-S enable raw stream capturing from the start\n"); printf("-o avifile create avifile, default video.avi\n"); printf("-L query valid video formats\n"); printf("-l query valid controls and settings\n"); printf("-r read and set control settings from luvcview.cfg\n"); printf("-O get picture.\n"); exit(0); } } if (videodevice == NULL || *videodevice == 0) { videodevice = "/dev/video0"; } if (avifilename == NULL || *avifilename == 0) { avifilename = "video.avi"; } videoIn = (struct vdIn *) calloc(1, sizeof(struct vdIn)); if (queryformats) { /* if we're supposed to list the video formats, do that now and go out */ check_videoIn(videoIn, (char *) videodevice); free(videoIn); exit(1); } if (init_videoIn(videoIn, (char *) videodevice, width, height, fps, format, grabmethod, avifilename) < 0) exit(1); /* if we're supposed to list the controls, do that now */ if (querycontrols) enum_controls(videoIn->fd); /* if we're supposed to read the control settings from a configfile, do that now */ if (readconfigfile) load_controls(videoIn->fd); printf("Enable Raw Stream Capture\n"); if (enableRawStreamCapture) { videoIn->captureFile = fopen("stream.raw", "wb"); if (videoIn->captureFile == NULL) { perror("Unable to open file for raw stream capturing"); } else { printf("Starting raw stream capturing to stream.raw ...\n"); } } if (enableRawFrameCapture) videoIn->rawFrameCapture = enableRawFrameCapture; initLut(); printf("Begin main big loop\n"); int loopNum = 0; /* main big loop */ while (videoIn->signalquit) { // if (uvcGrab(videoIn) < 0) { printf("Error grabbing \n"); break; } // /* if we're grabbing video, show the frame rate */ if (videoIn->toggleAvi) printf("\rframe rate: %d ", frmrate); // if (getpictureflag) { //if (videoIn->getPict) { switch (videoIn->formatIn) { case V4L2_PIX_FMT_MJPEG: get_picture(videoIn->tmpbuffer, videoIn->buf.bytesused); break; case V4L2_PIX_FMT_YUYV: printf("get picture yuv...\n"); get_pictureYV2(videoIn->framebuffer, videoIn->width, videoIn->height); break; default: break; } videoIn->getPict = 0; printf("get picture !\n"); } printf("loop number %d\n",loopNum); loopNum ++; } /* if avifile is defined, we made a video: compute the exact fps and set it in the video */ if (videoIn->avifile != NULL) { float fps = (videoIn->framecount / (videoIn->recordtime / 1000)); fprintf(stderr, "setting fps to %f\n", fps); AVI_set_video(videoIn->avifile, videoIn->width, videoIn->height, fps, "MJPG"); AVI_close(videoIn->avifile); } close_v4l2(videoIn); free(videoIn); freeLut(); printf(" Clean Up done Quit \n"); }
int luvcview() { const SDL_VideoInfo *info; char driver[128]; SDL_Surface *pscreen; SDL_Surface *pparent; SDL_Overlay *overlay; SDL_Rect drect; int status; Uint32 currtime; Uint32 lasttime; unsigned char *p = NULL; int hwaccel = 0; const char *videodevice = NULL; const char *mode = NULL; int format = V4L2_PIX_FMT_MJPEG; int i; int grabmethod = 1; int width = 320; int height = 240; int fps = 15; unsigned char frmrate = 0; char *avifilename = NULL; int queryformats = 0; int querycontrols = 0; int readconfigfile = 0; char *separateur; char *sizestring = NULL; char *fpsstring = NULL; int enableRawStreamCapture = 0; int enableRawFrameCapture = 0; format = V4L2_PIX_FMT_YUYV; videodevice = "/dev/video0"; printf("luvcview version %s \n", version); if (SDL_VideoDriverName(driver, sizeof(driver))) { printf("Video driver: %s\n", driver); } info = SDL_GetVideoInfo(); if (info->wm_available) { printf("A window manager is available\n"); } if (info->hw_available) { printf("Hardware surfaces are available (%dK video memory)\n", info->video_mem); SDL_VIDEO_Flags |= SDL_HWSURFACE; } if (info->blit_hw) { printf("Copy blits between hardware surfaces are accelerated\n"); SDL_VIDEO_Flags |= SDL_ASYNCBLIT; } if (info->blit_hw_CC) { printf ("Colorkey blits between hardware surfaces are accelerated\n"); } if (info->blit_hw_A) { printf("Alpha blits between hardware surfaces are accelerated\n"); } if (info->blit_sw) { printf ("Copy blits from software surfaces to hardware surfaces are accelerated\n"); } if (info->blit_sw_CC) { printf ("Colorkey blits from software surfaces to hardware surfaces are accelerated\n"); } if (info->blit_sw_A) { printf ("Alpha blits from software surfaces to hardware surfaces are accelerated\n"); } if (info->blit_fill) { printf("Color fills on hardware surfaces are accelerated\n"); } if (!(SDL_VIDEO_Flags & SDL_HWSURFACE)) SDL_VIDEO_Flags |= SDL_SWSURFACE; if (avifilename == NULL || *avifilename == 0) { avifilename = "video.avi"; } videoIn = (struct vdIn *) calloc(1, sizeof(struct vdIn)); if (init_videoIn (videoIn, (char *) videodevice, width, height, fps, format, grabmethod, avifilename) < 0) exit(1); pscreen = SDL_GetVideoSurface(); overlay = SDL_CreateYUVOverlay(videoIn->width, videoIn->height , SDL_YUY2_OVERLAY, pscreen); p = (unsigned char *) overlay->pixels[0]; drect.x = 0; drect.y = 0; drect.w = pscreen->w; drect.h = pscreen->h; initLut(); lasttime = SDL_GetTicks(); int loop = 0; /* main big loop */ while (videoIn->signalquit) { currtime = SDL_GetTicks(); if (currtime - lasttime > 0) { frmrate = 1000/(currtime - lasttime); } lasttime = currtime; if (uvcGrab(videoIn) < 0) { printf("Error grabbing \n"); break; } SDL_LockYUVOverlay(overlay); memcpy(p, videoIn->framebuffer, videoIn->width * (videoIn->height) * 2); SDL_UnlockYUVOverlay(overlay); SDL_DisplayYUVOverlay(overlay, &drect); if (loop > 35) { printf( "loop: %d\n", loop); break; } ++loop; SDL_Delay(10); } close_v4l2(videoIn); free(videoIn); freeLut(); printf(" Clean Up done Quit \n"); }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread(void *arg) { context *pcontext = arg; pglobal = pcontext->pglobal; /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, pcontext); while(!pglobal->stop) { while(pcontext->videoIn->streamingState == STREAMING_PAUSED) { usleep(1); // maybe not the best way so FIXME } if(stop_camera == 1) { /* check active outputs */ pthread_mutex_lock(&pglobal->in[pcontext->id].out); if(pglobal->in[pcontext->id].num_outs == 0) { /* stop camera */ uvcStopGrab(pcontext->videoIn); /* wait for active outputs */ pthread_cond_wait(&pglobal->in[pcontext->id].out_update, &pglobal->in[pcontext->id].out); } /* allow others to access the global buffer again */ pthread_mutex_unlock(&pglobal->in[pcontext->id].out); } /* grab a frame */ if(uvcGrab(pcontext->videoIn) < 0) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } DBG("received frame of size: %d from plugin: %d\n", pcontext->videoIn->buf.bytesused, pcontext->id); /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if(pcontext->videoIn->buf.bytesused < minimum_size) { DBG("dropping too small frame, assuming it as broken\n"); continue; } /* copy JPG picture to global buffer */ pthread_mutex_lock(&pglobal->in[pcontext->id].db); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ if(pcontext->videoIn->formatIn == V4L2_PIX_FMT_YUYV) { DBG("compressing frame from input: %d\n", (int)pcontext->id); pglobal->in[pcontext->id].size = compress_yuyv_to_jpeg(pcontext->videoIn, pglobal->in[pcontext->id].buf, pcontext->videoIn->framesizeIn, gquality); } else { DBG("copying frame from input: %d\n", (int)pcontext->id); pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->buf.bytesused); } #if 0 /* motion detection can be done just by comparing the picture size, but it is not very accurate!! */ if((prev_size - global->size)*(prev_size - global->size) > 4 * 1024 * 1024) { DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024); } prev_size = global->size; #endif /* copy this frame's timestamp to user space */ pglobal->in[pcontext->id].timestamp = pcontext->videoIn->buf.timestamp; /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update); pthread_mutex_unlock(&pglobal->in[pcontext->id].db); /* only use usleep if the fps is below 5, otherwise the overhead is too long */ if(pcontext->videoIn->fps < 5) { DBG("waiting for next frame for %d us\n", 1000 * 1000 / pcontext->videoIn->fps); usleep(1000 * 1000 / pcontext->videoIn->fps); } else { DBG("waiting for next frame\n"); } } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread(void *arg) { int cid = 0, id = (int)arg; context *activecam, *pcontext = &cams[id]; pglobal = pcontext->pglobal; activecam = pcontext; /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, pcontext); while(!pglobal->stop) { if(cid != camera) { video_pause(activecam->videoIn); cid = camera; activecam = &cams[id + cid]; IPRINT("Switch to camera..: %s\n", mdev[cid]); video_unpause(activecam->videoIn); } while(activecam->videoIn->streamingState == STREAMING_PAUSED) { usleep(1); // maybe not the best way so FIXME } /* grab a frame */ if(uvcGrab(activecam->videoIn) < 0) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } DBG("received frame of size: %d from plugin: %d\n", activecam->videoIn->buf.bytesused, activecam->id); /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if(activecam->videoIn->buf.bytesused < minimum_size) { DBG("dropping too small frame, assuming it as broken\n"); continue; } /* copy JPG picture to global buffer */ pthread_mutex_lock(&pglobal->in[pcontext->id].db); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ if(activecam->videoIn->formatIn == V4L2_PIX_FMT_YUYV) { DBG("compressing frame from input: %d\n", (int)activecam->id); pglobal->in[pcontext->id].size = compress_yuyv_to_jpeg(activecam->videoIn, pglobal->in[pcontext->id].buf, activecam->videoIn->framesizeIn, gquality); } else { DBG("copying frame from input: %d\n", (int)activecam->id); pglobal->in[pcontext->id].size = memcpy_picture(pglobal->in[pcontext->id].buf, activecam->videoIn->tmpbuffer, activecam->videoIn->buf.bytesused); } /* copy this frame's timestamp to user space */ pglobal->in[pcontext->id].timestamp = activecam->videoIn->buf.timestamp; /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->in[pcontext->id].db_update); pthread_mutex_unlock(&pglobal->in[pcontext->id].db); /* only use usleep if the fps is below 5, otherwise the overhead is too long */ if(activecam->videoIn->fps < 5) { DBG("waiting for next frame for %d us\n", 1000 * 1000 / activecam->videoIn->fps); usleep(1000 * 1000 / activecam->videoIn->fps); } else { DBG("waiting for next frame\n"); } } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }
/* run in a thread (SDL overlay)*/ void *main_loop(void *data) { struct ALL_DATA *all_data = (struct ALL_DATA *) data; struct VidState *s = all_data->s; struct paRecordData *pdata = all_data->pdata; struct GLOBAL *global = all_data->global; struct focusData *AFdata = all_data->AFdata; struct vdIn *videoIn = all_data->videoIn; struct particle* particles = NULL; //for the particles video effect SDL_Event event; /*the main SDL surface*/ SDL_Surface *pscreen = NULL; SDL_Overlay *overlay = NULL; SDL_Rect drect; int width = global->width; int height = global->height; int format = global->format; SAMPLE vuPeak[2]; // The maximum vuLevel seen recently int vuPeakFreeze[2]; // The vuPeak values will be frozen for this many frames. vuPeak[0] = vuPeak[1] = 0; vuPeakFreeze[0] = vuPeakFreeze[1] = 0; BYTE *p = NULL; Control *focus_control = NULL; int last_focus = 0; if (global->AFcontrol) { focus_control = get_ctrl_by_id(s->control_list, AFdata->id); get_ctrl(videoIn->fd, s->control_list, AFdata->id, all_data); last_focus = focus_control->value; /*make sure we wait for focus to settle on first check*/ if (last_focus < 0) last_focus = AFdata->f_max; } gboolean capVid = FALSE; gboolean signalquit = FALSE; /*------------------------------ SDL init video ---------------------*/ if(!global->no_display) { overlay = video_init(data, &(pscreen)); if(overlay == NULL) { g_print("FATAL: Couldn't create yuv overlay - please disable hardware accelaration\n"); signalquit = TRUE; /*exit video thread*/ } else { p = (unsigned char *) overlay->pixels[0]; drect.x = 0; drect.y = 0; drect.w = pscreen->w; drect.h = pscreen->h; } } while (!signalquit) { __LOCK_MUTEX(__VMUTEX); capVid = videoIn->capVid; signalquit = videoIn->signalquit; __UNLOCK_MUTEX(__VMUTEX); /*-------------------------- Grab Frame ----------------------------------*/ if (uvcGrab(videoIn, format, width, height, &global->fps, &global->fps_num) < 0) { g_printerr("Error grabbing image \n"); continue; } else { if(!videoIn->timestamp) { global->skip_n++; //skip this frame } if(capVid) { if(global->framecount < 1) { /*reset video start time to first frame capture time */ global->Vidstarttime = videoIn->timestamp; /** set current time for audio ts(0) reference (MONOTONIC) * only used if we have no audio capture before video */ __LOCK_MUTEX(__AMUTEX); pdata->ts_ref = ns_time_monotonic(); __UNLOCK_MUTEX(__AMUTEX); //printf("video ts ref: %llu audio ts_ ref: %llu\n",global->Vidstarttime, pdata->ts_ref); global->v_ts = 0; } else { global->v_ts = videoIn->timestamp - global->Vidstarttime; /*always use the last frame time stamp for video stop time*/ global->Vidstoptime = videoIn->timestamp; } } if (global->FpsCount && !global->no_display) {/* sets fps count in window title bar */ global->frmCount++; if (global->DispFps>0) { /*set every 2 sec*/ g_snprintf(global->WVcaption,24,"GUVCVideo - %3.2f fps",global->DispFps); SDL_WM_SetCaption(global->WVcaption, NULL); global->frmCount=0;/*resets*/ global->DispFps=0; } } /*---------------- autofocus control ------------------*/ if (global->AFcontrol && (global->autofocus || AFdata->setFocus)) { /*AFdata = NULL if no focus control*/ if (AFdata->focus < 0) { /*starting autofocus*/ AFdata->focus = AFdata->left; /*start left*/ focus_control->value = AFdata->focus; if (set_ctrl (videoIn->fd, s->control_list, AFdata->id) != 0) g_printerr("ERROR: couldn't set focus to %d\n", AFdata->focus); /*number of frames until focus is stable*/ /*1.4 ms focus time - every 1 step*/ AFdata->focus_wait = (int) abs(AFdata->focus-last_focus)*1.4/(1000/global->fps)+1; last_focus = AFdata->focus; } else { if (AFdata->focus_wait == 0) { AFdata->sharpness=getSharpness (videoIn->framebuffer, width, height, 5); if (global->debug) g_print("sharp=%d focus_sharp=%d foc=%d right=%d left=%d ind=%d flag=%d\n", AFdata->sharpness,AFdata->focus_sharpness, AFdata->focus, AFdata->right, AFdata->left, AFdata->ind, AFdata->flag); AFdata->focus=getFocusVal (AFdata); if ((AFdata->focus != last_focus)) { focus_control->value = AFdata->focus; if (set_ctrl (videoIn->fd, s->control_list, AFdata->id) != 0) g_printerr("ERROR: couldn't set focus to %d\n", AFdata->focus); /*number of frames until focus is stable*/ /*1.4 ms focus time - every 1 step*/ AFdata->focus_wait = (int) abs(AFdata->focus-last_focus)*1.4/(1000/global->fps)+1; } last_focus = AFdata->focus; } else { AFdata->focus_wait--; if (global->debug) g_print("Wait Frame: %d\n",AFdata->focus_wait); } } } } /*------------------------- Filter Frame ---------------------------------*/ __LOCK_MUTEX(__GMUTEX); if(global->Frame_Flags>0) { if((global->Frame_Flags & YUV_PARTICLES)==YUV_PARTICLES) particles = particles_effect(videoIn->framebuffer, width, height, 20, 4, particles); if((global->Frame_Flags & YUV_MIRROR)==YUV_MIRROR) yuyv_mirror(videoIn->framebuffer, width, height); if((global->Frame_Flags & YUV_UPTURN)==YUV_UPTURN) yuyv_upturn(videoIn->framebuffer, width, height); if((global->Frame_Flags & YUV_NEGATE)==YUV_NEGATE) yuyv_negative (videoIn->framebuffer, width, height); if((global->Frame_Flags & YUV_MONOCR)==YUV_MONOCR) yuyv_monochrome (videoIn->framebuffer, width, height); if((global->Frame_Flags & YUV_PIECES)==YUV_PIECES) pieces (videoIn->framebuffer, width, height, 16 ); } __UNLOCK_MUTEX(__GMUTEX); /*-------------------------capture Image----------------------------------*/ if (videoIn->capImage) { /* * format and resolution can change(enabled) while capturing the frame * but you would need to be speedy gonzalez to press two buttons * at almost the same time :D */ int ret = 0; if((ret=store_picture(all_data)) < 0) g_printerr("saved image to:%s ...Failed \n",videoIn->ImageFName); else if (!ret && global->debug) g_print("saved image to:%s ...OK \n",videoIn->ImageFName); videoIn->capImage=FALSE; } /*---------------------------capture Video---------------------------------*/ if (capVid && !(global->skip_n)) { __LOCK_MUTEX(__VMUTEX); if(videoIn->VidCapStop) videoIn->VidCapStop = FALSE; __UNLOCK_MUTEX(__VMUTEX); int res=0; /* format and resolution don't change(disabled) while capturing video * store_video_frame may sleep if needed to avoid buffer overrun */ if((res=store_video_frame(all_data))<0) g_printerr("WARNING: droped frame (%i)\n",res); } /*video and audio capture have stopped */ else { __LOCK_MUTEX(__VMUTEX); if(!(videoIn->VidCapStop)) videoIn->VidCapStop=TRUE; __UNLOCK_MUTEX(__VMUTEX); } /* decrease skip frame count */ if (global->skip_n > 0) { if (global->debug && capVid) g_print("skiping frame %d...\n", global->skip_n); global->skip_n--; } __LOCK_MUTEX( __AMUTEX ); if (global->Sound_enable && capVid) pdata->skip_n = global->skip_n; __UNLOCK_MUTEX( __AMUTEX ); /*------------------------- Display Frame --------------------------------*/ if(!global->no_display) { if (global->osdFlags && pdata->audio_buff[0]) { draw_vu_meter(width, height, vuPeak, vuPeakFreeze, data); } SDL_LockYUVOverlay(overlay); memcpy(p, videoIn->framebuffer, width * height * 2); SDL_UnlockYUVOverlay(overlay); SDL_DisplayYUVOverlay(overlay, &drect); /*------------------------- Read Key events ------------------------------*/ /* Poll for events */ while( SDL_PollEvent(&event) ) { //printf("event type:%i event key:%i\n", event.type, event.key.keysym.scancode); if(event.type==SDL_KEYDOWN) { if (videoIn->PanTilt) { switch( event.key.keysym.sym ) { /* Keyboard event */ /* Pass the event data onto PrintKeyInfo() */ case SDLK_DOWN: /*Tilt Down*/ uvcPanTilt (videoIn->fd, s->control_list, 0, 1); break; case SDLK_UP: /*Tilt UP*/ uvcPanTilt (videoIn->fd, s->control_list, 0, -1); break; case SDLK_LEFT: /*Pan Left*/ uvcPanTilt (videoIn->fd, s->control_list, 1, 1); break; case SDLK_RIGHT: /*Pan Right*/ uvcPanTilt (videoIn->fd, s->control_list, 1, -1); break; default: break; } } switch( event.key.keysym.scancode ) { case 220: /*webcam button*/ //gdk_threads_enter(); if (all_data->global->default_action == 0) g_main_context_invoke(NULL, image_capture_callback, (gpointer) all_data); else g_main_context_invoke(NULL, video_capture_callback, (gpointer) all_data); break; } switch( event.key.keysym.sym ) { case SDLK_q: //shutDown g_timeout_add(200, shutd_timer, all_data); g_print("q pressed - Quiting...\n"); break; case SDLK_SPACE: { if(global->AFcontrol > 0) setfocus_clicked(NULL, all_data); } break; case SDLK_i: g_main_context_invoke(NULL, image_capture_callback, (gpointer) all_data); break; case SDLK_v: g_main_context_invoke(NULL, video_capture_callback, (gpointer) all_data); break; default: break; } } if(event.type==SDL_VIDEORESIZE) { pscreen = SDL_SetVideoMode(event.resize.w, event.resize.h, global->bpp, SDL_VIDEO_Flags); drect.w = event.resize.w; drect.h = event.resize.h; } if(event.type==SDL_QUIT) { //shutDown g_timeout_add(200, shutd_timer, all_data); } } } /* if set make the thread sleep - default no sleep (full throttle)*/ if(global->vid_sleep) sleep_ms(global->vid_sleep); /*------------------------------------------*/ /* restart video (new resolution/format) */ /*------------------------------------------*/ if (global->change_res) { g_print("setting new resolution (%d x %d)\n", global->width, global->height); /*clean up */ if(particles) g_free(particles); particles = NULL; if (global->debug) g_print("cleaning buffer allocations\n"); fflush(NULL);//flush all output buffers if(!global->no_display) { SDL_FreeYUVOverlay(overlay); overlay = NULL; } /*init device*/ restart_v4l2(videoIn, global); /*set new resolution for video thread*/ width = global->width; height = global->height; format = global->format; /* restart SDL with new values*/ if(!global->no_display) { overlay = video_init(data, &(pscreen)); if(overlay == NULL) { g_print("FATAL: Couldn't create yuv overlay - please disable hardware accelaration\n"); signalquit = TRUE; /*exit video thread*/ } else { if (global->debug) g_print("yuv overlay created (%ix%i).\n", overlay->w, overlay->h); p = (unsigned char *) overlay->pixels[0]; drect.x = 0; drect.y = 0; drect.w = pscreen->w; drect.h = pscreen->h; global->change_res = FALSE; } } else global->change_res = FALSE; } }/*loop end*/ __LOCK_MUTEX(__VMUTEX); capVid = videoIn->capVid; __UNLOCK_MUTEX(__VMUTEX); /*check if thread exited while in Video capture mode*/ if (capVid) { /*stop capture*/ if (global->debug) g_print("stoping Video capture\n"); //global->Vidstoptime = ns_time_monotonic(); /*this is set in IO thread*/ videoIn->VidCapStop=TRUE; capVid = FALSE; __LOCK_MUTEX(__VMUTEX); videoIn->capVid = capVid; __UNLOCK_MUTEX(__VMUTEX); __LOCK_MUTEX(__AMUTEX); pdata->capVid = capVid; __UNLOCK_MUTEX(__AMUTEX); /*join IO thread*/ if (global->debug) g_print("Shuting Down IO Thread\n"); __THREAD_JOIN( all_data->IO_thread ); if (global->debug) g_print("IO Thread finished\n"); } if (global->debug) g_print("Thread terminated...\n"); p = NULL; if(particles) g_free(particles); particles=NULL; if (global->debug) g_print("cleaning Thread allocations: 100%%\n"); fflush(NULL);//flush all output buffers if(!global->no_display) { if(overlay) SDL_FreeYUVOverlay(overlay); //SDL_FreeSurface(pscreen); SDL_Quit(); } if (global->debug) g_print("Video thread completed\n"); global = NULL; AFdata = NULL; videoIn = NULL; return ((void *) 0); }
/****************************************************************************** Description.: this thread worker grabs a frame and copies it to the global buffer Input Value.: unused Return Value: unused, always NULL ******************************************************************************/ void *cam_thread(void *arg) { input *in = (input*)arg; context *pcontext = in->context; unsigned int every_count = 0; DBG("Enter\n"); /* set cleanup handler to cleanup allocated resources */ pthread_cleanup_push(cam_cleanup, in); while(!stop) { while(pcontext->videoIn->streamingState == STREAMING_PAUSED) { usleep(1); // maybe not the best way so FIXME } /* grab a frame */ if(uvcGrab(pcontext->videoIn) < 0) { LOG("Error grabbing frames\n"); exit(EXIT_FAILURE); } if ( every_count < every - 1 ) { DBG("dropping %d frame for every=%d\n", every_count + 1, every); ++every_count; continue; } else { every_count = 0; } /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if(pcontext->videoIn->tmpbytesused < minimum_size) { DBG("dropping too small frame, assuming it as broken\n"); continue; } // use software frame dropping on low fps if (pcontext->videoIn->soft_framedrop == 1) { unsigned long last = in->timestamp.tv_sec * 1000 + (in->timestamp.tv_usec/1000); // convert to ms unsigned long current = pcontext->videoIn->buf.timestamp.tv_sec * 1000 + pcontext->videoIn->buf.timestamp.tv_usec/1000; // convert to ms // if the requested time did not esplashed skip the frame if ((current - last) < pcontext->videoIn->frame_period_time) { //DBG("Last frame taken %d ms ago so drop it\n", (current - last)); continue; } DBG("Lagg: %ld\n", (current - last) - pcontext->videoIn->frame_period_time); } /* copy JPG picture to global buffer */ pthread_mutex_lock(&in->db); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ DBG("copying frame from input:\n"); in->size = memcpy_picture(in->buf, pcontext->videoIn->tmpbuffer, pcontext->videoIn->tmpbytesused); /* copy this frame's timestamp to user space */ in->timestamp = pcontext->videoIn->tmptimestamp; /* signal fresh_frame */ pthread_cond_broadcast(&in->db_update); pthread_mutex_unlock(&in->db); } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }