/**************************************************************************** * @func : JPEG save Thread main funtion * @arg1 : void * @return : void ***************************************************************************/ void *jpegsaveThread(void) { unsigned char* rgb; char outfile[100] = {0}; FILE *fp; SERVER_CONFIG *serverConfig = GetServerConfig(); while(!KillJpegsaveThread) { while(serverConfig->jpeg.framebuff == NULL) { usleep(100); } get_image_filename(outfile,serverConfig->image.type); fp = fopen(outfile, "w"); switch(serverConfig->image.type) { case 0: apply_algo((char *)serverConfig->jpeg.framebuff,serverConfig->algo_type); rgb = yuyv2rgb(serverConfig->jpeg.framebuff, serverConfig->capture.width, serverConfig->capture.height); jpeg(fp, rgb, serverConfig->capture.width, serverConfig->capture.height, serverConfig->jpeg.quality); free(rgb); break; case 1: fwrite(serverConfig->jpeg.framebuff,serverConfig->capture.framesize,1,fp); break; } free(serverConfig->jpeg.framebuff); serverConfig->jpeg.framebuff = NULL; fclose(fp); } return 0; }
void V4LIn::_readFrame_async() { struct v4l2_buffer buf; for(;;) { fd_set fds; FD_ZERO(&fds); FD_SET(fd, &fds); struct timeval tv = {2,0}; int r = select(fd + 1, &fds, NULL, NULL, &tv); if (r == -1) { if (EINTR == errno) continue; errno_exit("select"); } else if (r == 0) { fprintf(stderr, "select timeout\n"); exit(EXIT_FAILURE); } memset(&buf, 0, sizeof(buf)); buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) { if(errno == EAGAIN) continue; if(errno != EIO) errno_exit("VIDIOC_DQBUF"); } else break; } assert(buf.index < n_buffers); pthread_mutex_lock(&mut); if(!data) {data = new unsigned char[w*h*3]; tmpdata = new unsigned char[w*h*3];} yuyv2rgb(buffers[buf.index].start, buf.bytesused, tmpdata); unsigned char* tmp = tmpdata; tmpdata = data; data = tmp; pthread_mutex_unlock(&mut); sem_post(&sem); if (-1 == xioctl(fd, VIDIOC_QBUF, &buf)) errno_exit("VIDIOC_QBUF"); }
static void process_image(const void * src, int len, usb_cam_camera_image_t *dest) { if(pixelformat==V4L2_PIX_FMT_YUYV) yuyv2rgb((char*)src, dest->image, dest->width*dest->height); else if(pixelformat==V4L2_PIX_FMT_UYVY) uyvy2rgb((char*)src, dest->image, dest->width*dest->height); else if(pixelformat==V4L2_PIX_FMT_MJPEG) mjpeg2rgb((char*)src, len, dest->image, dest->width*dest->height); }
Image ColorConvert::apply(Image im, string from, string to) { // check for the trivial case assert(from != to, "color conversion from %s to %s is pointless\n", from.c_str(), to.c_str()); // unsupported destination color spaces if (to == "yuyv" || to == "uyvy") { panic("Unsupported destination color space: %s\n", to.c_str()); } // direct conversions that don't have to go via rgb if (from == "yuyv" && to == "yuv") { return yuyv2yuv(im); } else if (from == "uyvy" && to == "yuv") { return uyvy2yuv(im); } else if (from == "xyz" && to == "lab") { return xyz2lab(im); } else if (from == "lab" && to == "xyz") { return lab2xyz(im); } else if (from == "argb" && to == "xyz") { return argb2xyz(im); } else if (from == "xyz" && to == "argb") { return xyz2argb(im); } else if (from != "rgb" && to != "rgb") { // conversions that go through rgb Image halfway = apply(im, from, "rgb"); return apply(halfway, "rgb", to); } else if (from == "rgb") { // from rgb if (to == "hsv" || to == "hsl" || to == "hsb") { return rgb2hsv(im); } else if (to == "yuv") { return rgb2yuv(im); } else if (to == "xyz") { return rgb2xyz(im); } else if (to == "y" || to == "gray" || to == "grayscale" || to == "luminance") { return rgb2y(im); } else if (to == "lab") { return rgb2lab(im); } else if (to == "argb") { return rgb2argb(im); } else { panic("Unknown color space %s\n", to.c_str()); } } else { //(to == "rgb") if (from == "hsv" || from == "hsl" || from == "hsb") { return hsv2rgb(im); } else if (from == "yuv") { return yuv2rgb(im); } else if (from == "xyz") { return xyz2rgb(im); } else if (from == "y" || from == "gray" || from == "grayscale" || from == "luminance") { return y2rgb(im); } else if (from == "lab") { return lab2rgb(im); } else if (from == "uyvy") { return uyvy2rgb(im); } else if (from == "yuyv") { return yuyv2rgb(im); } else if (from == "argb") { return argb2rgb(im); } else { panic("Unknown color space %s\n", from.c_str()); } } // keep the compiler happy return Image(); }
void startVideoSrvr() { pthread_t videoSocketThread; /* alloc mameory for the videoIn struct & initialize */ videoIn = (struct vdIn *) calloc (1, sizeof (struct vdIn)); if (init_videoIn (videoIn, (char *) videodevice, width, height, format, grabmethod) < 0) exit (1); /* alloc memory for the control struct & video out array & initialize */ ctrlStruct ctrl, *pc; if ((ctrl.imgArray = malloc(3 * width * height)) < 0) // enough space for rgb exit(-1); ctrl.doCapture = 0; //Reset all camera controls if (verbose >= 1) fprintf (stderr, "Resetting camera settings\n"); v4l2ResetControl (videoIn, V4L2_CID_BRIGHTNESS); v4l2ResetControl (videoIn, V4L2_CID_CONTRAST); v4l2ResetControl (videoIn, V4L2_CID_SATURATION); v4l2ResetControl (videoIn, V4L2_CID_GAIN); //Setup Camera Parameters if (brightness != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera brightness to %d\n", brightness); v4l2SetControl (videoIn, V4L2_CID_BRIGHTNESS, brightness); } else if (verbose >= 1) { fprintf (stderr, "Camera brightness level is %d\n", v4l2GetControl (videoIn, V4L2_CID_BRIGHTNESS)); } if (contrast != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera contrast to %d\n", contrast); v4l2SetControl (videoIn, V4L2_CID_CONTRAST, contrast); } else if (verbose >= 1) { fprintf (stderr, "Camera contrast level is %d\n", v4l2GetControl (videoIn, V4L2_CID_CONTRAST)); } if (saturation != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera saturation to %d\n", saturation); v4l2SetControl (videoIn, V4L2_CID_SATURATION, saturation); } else if (verbose >= 1) { fprintf (stderr, "Camera saturation level is %d\n", v4l2GetControl (videoIn, V4L2_CID_SATURATION)); } if (gain != 0) { if (verbose >= 1) fprintf (stderr, "Setting camera gain to %d\n", gain); v4l2SetControl (videoIn, V4L2_CID_GAIN, gain); } else if (verbose >= 1) { fprintf (stderr, "Camera gain level is %d\n", v4l2GetControl (videoIn, V4L2_CID_GAIN)); } // wait for a video client to connect before proceeding fprintf (stderr, "waiting for video client connection on port %d\n", port); if ((ctrl.videoSocket = wait4client(port)) <= 0) { fprintf (stderr, "error connecting to client: %d\n", ctrl.videoSocket); exit(-1); } // start the thread that handles the video client requests pthread_create(&videoSocketThread, NULL, (void *)cmdHandler, (void *)&ctrl); while (run) { if (verbose >= 2) fprintf (stderr, "."); if (uvcGrab (videoIn) < 0) { fprintf (stderr, "Error grabbing\n"); close_v4l2 (videoIn); free (videoIn); exit (1); } if (ctrl.doCapture == 1) { if (verbose >= 1) { fprintf (stderr, "captured %d byte image at 0x%x %dx%d\n", videoIn->framesizeIn, videoIn->framebuffer, videoIn->width, videoIn->height); } else { fprintf (stderr, "."); } if (outputType == 0) yuyv2Y(videoIn, &ctrl); else yuyv2rgb(videoIn, &ctrl); if (verbose >=1) fprintf (stderr, "converted image to luminance in buffer at 0x%x\n", ctrl.imgArray); videoIn->getPict = 0; ctrl.doCapture = 0; } } close_v4l2 (videoIn); free (videoIn); return; }
static void process_image(const void * src, int len, usb_cam_camera_image_t *dest) { yuyv2rgb((char*)src, dest->image, dest->width * dest->height); }
int main() { int i, ret; // 打开设备 fd=open_device(); // 获取驱动信息 //struct v4l2_capability cap; get_capability(); //获取当前视频设备支持的视频格式 //struct v4l2_fmtdesc fmtdesc; memset(&fmtdesc,0,sizeof(fmtdesc)); get_format(); // 设置视频格式 //struct v4l2_format fmt; //memset在一段内存块中填充某个给定的值,它是对较大的结构体或数组进行清零操作的一种最快的方法 memset(&fmt, 0, sizeof(fmt));//将fmt中的前sizeof(fmt)字节用0替换并返回fmt set_format(); // 请求分配内存 //struct v4l2_requestbuffers reqbuf; request_buf(); // 获取空间,并将其映射到用户空间,然后投放到视频输入队列 //struct v4l2_buffer buf; query_map_qbuf(); // 开始录制 enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* 控制命令VIDIOC_STREAMON 功能:启动视频采集命令,应用程序调用VIDIOC_STREAMON启动视频采集命令后,视频设备驱动程序开始采集视频数据,并把采集到的视频数据保存到视频驱动的视频缓冲区中 参数说明:参数类型为V4L2的视频缓冲区类型 enum v4l2_buf_type ; enum v4l2_buf_type { V4L2_BUF_TYPE_VIDEO_CAPTURE = 1, V4L2_BUF_TYPE_VIDEO_OUTPUT = 2, V4L2_BUF_TYPE_VIDEO_OVERLAY = 3, V4L2_BUF_TYPE_VBI_CAPTURE = 4, V4L2_BUF_TYPE_VBI_OUTPUT = 5, V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6, V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7, #if 1 //// Experimental //// V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8, #endif V4L2_BUF_TYPE_PRIVATE = 0x80, }; 返回值说明: 执行成功时,函数返回值为 0;函数执行成功后,视频设备驱动程序开始采集视频数据,此时应用程序一般通过调用select函数来判断一帧视频数据是否采集完成,当视频设备驱动完成一帧视频数据采集并保存到视频缓冲区中时,select函数返回,应用程序接着可以读取视频数据;否则select函数阻塞直到视频数据采集完成 */ ret = ioctl(fd, VIDIOC_STREAMON, &type); if (ret < 0) { printf("VIDIOC_STREAMON failed (%d)\n", ret); return ret; } // Get frame /* 控制命令VIDIOC_DQBUF 功能:从视频缓冲区的输出队列中取得一个已经保存有一帧视频数据的视频缓冲区 参数说明:参数类型为V4L2缓冲区数据结构类型struct v4l2_buffer; 返回值说明: 执行成功时,函数返回值为 0;函数执行成功后,相应的内核视频缓冲区中保存有当前拍摄到的视频数据,应用程序可以通过访问用户空间来读取该视频数据(前面已经通过调用函数 mmap做了用户空间和内核空间的内存映射). 说明: VIDIOC_DQBUF命令结果, 使从队列删除的缓冲帧信息传给了此buf V4L2_buffer结构体的作用就相当于申请的缓冲帧的代理,找缓冲帧的都要先问问它,通过它来联系缓冲帧,起了中间桥梁的作用 */ ret = ioctl(fd, VIDIOC_DQBUF, &buf);//VIDIOC_DQBUF命令结果, 使从队列删除的缓冲帧信息传给了此buf if (ret < 0) { printf("VIDIOC_DQBUF failed (%d)\n", ret); return ret; } // Process the frame 此时我们需要进行数据格式的改变 store_yuyv(); //对采集的数据进行转变,变换成RGB24模式,然后进行存储 /* (1)开辟出来一段内存区域来存放转换后的数据 (2)循环读取buf内存段的内容,进行转换,转换后放入到新开辟的内存区域中 (3)将新开辟出来的内存区的内容读到文件中 */ printf("********************************************\n"); int n_len; n_len=framebuf[buf.index].length*3/2; newBuf=calloc((unsigned int)n_len,sizeof(unsigned char)); if(!newBuf) { printf("cannot assign the memory !\n"); exit(0); } printf("the information about the new buffer:\n start Address:0x%x,length=%d\n\n",(unsigned int)newBuf,n_len); printf("----------------------------------\n"); //YUYV to RGB starter=(unsigned char *)framebuf[buf.index].start; yuyv2rgb();//还是这个采集的图片的效果比较好 move_noise(); //yuyv2rgb1(); //设置bmp文件的头和bmp文件的一些信息 create_bmp_header(); store_bmp(n_len); // Re-queen buffer ret = ioctl(fd, VIDIOC_QBUF, &buf); if (ret < 0) { printf("VIDIOC_QBUF failed (%d)\n", ret); return ret; } printf("re-queen buffer end\n"); // Release the resource /* 表头文件 #include<unistd.h> #include<sys/mman.h> 定义函数 int munmap(void *start,size_t length); 函数说明 munmap()用来取消参数start所指的映射内存起始地址,参数length则是欲取消的内存大小。当进程结束或利用exec相关函数来执行其他程序时,映射内存会自动解除,但关闭对应的文件描述词时不会解除映射 返回值 如果解除映射成功则返回0,否则返回-1 */ for (i=0; i< 4; i++) { munmap(framebuf[i].start, framebuf[i].length); } //free(starter); printf("free starter end\n"); //free(newBuf); printf("free newBuf end\n"); close(fd); printf("Camera test Done.\n"); return 0; }
/** use free() to free data. */ int store_picture_to_buffer(guint8 **data, guint *data_len) { struct GLOBAL *global = all_data.global; struct vdIn *videoIn = all_data.videoIn; struct JPEG_ENCODER_STRUCTURE *jpeg_struct = NULL; BYTE *pim = NULL; BYTE *jpeg = NULL; int jpeg_size = 0; /* TODO snapshot by rock. I save raw (no filter, no image processing) frame in snapshot. So here should save picture from snapshot, not framebuffer. */ switch(global->imgFormat) { case 0:/*jpg*/ /* Save directly from MJPG frame */ #if 0 // TODO #1432, some webcam's mjpeg frame could not directly save as jpeg. if((global->Frame_Flags==0) && (global->format==V4L2_PIX_FMT_MJPEG)) { if(SaveJPG(videoIn->ImageFName,videoIn->buf.bytesused,videoIn->tmpbuffer)) { g_printerr ("Error: Couldn't capture Image to %s \n", videoIn->ImageFName); return(-1); } } else #endif if ((global->Frame_Flags==0) && (global->format==V4L2_PIX_FMT_JPEG)) { *data_len = videoIn->buf.bytesused; *data = malloc(videoIn->buf.bytesused); memmove(*data, videoIn->tmpbuffer, videoIn->buf.bytesused); #if 0 if (SaveBuff(videoIn->ImageFName,videoIn->buf.bytesused,videoIn->tmpbuffer)) { g_printerr ("Error: Couldn't capture Image to %s \n", videoIn->ImageFName); return(-1); } #endif } else { /* use built in encoder */ jpeg = g_new0(BYTE, ((global->width)*(global->height))>>1); jpeg_struct = g_new0(struct JPEG_ENCODER_STRUCTURE, 1); /* Initialization of JPEG control structure */ initialization (jpeg_struct,global->width,global->height); /* Initialization of Quantization Tables */ initialize_quantization_tables (jpeg_struct); //jpeg_size = encode_image(videoIn->framebuffer, jpeg, jpeg_size = encode_image(videoIn->snapshot, jpeg, jpeg_struct, 1, global->width, global->height); *data_len = jpeg_size; *data = malloc(jpeg_size); memmove(*data, jpeg, jpeg_size); #if 0 if(SaveBuff(videoIn->ImageFName, jpeg_size, jpeg)) { g_printerr ("Error: Couldn't capture Image to %s \n", videoIn->ImageFName); return(-1); } #endif } break; case 1:/*bmp*/ /*24 bits -> 3bytes 32 bits ->4 bytes*/ pim = g_new0(BYTE, (global->width)*(global->height)*3); //yuyv2bgr(videoIn->framebuffer,pim,global->width,global->height); yuyv2bgr(videoIn->snapshot,pim,global->width,global->height); _store_picture_to_buffer_bmp(global->width, global->height, 24, pim, data, data_len); #if 0 if(SaveBPM(videoIn->ImageFName, global->width, global->height, 24, pim)) { g_printerr ("Error: Couldn't capture Image to %s \n", videoIn->ImageFName); return(-1); } #endif break; case 2:/*png*/ /*24 bits -> 3bytes 32 bits ->4 bytes*/ pim = g_new0(BYTE, (global->width)*(global->height)*3); //yuyv2rgb(videoIn->framebuffer,pim,global->width,global->height); yuyv2rgb(videoIn->snapshot,pim,global->width,global->height); _store_picture_to_buffer_png(global->width, global->height, pim, data, data_len); //write_png(videoIn->ImageFName, global->width, global->height, pim); break; case 3:/*raw*/ videoIn->cap_raw = 1; return 1; } if(jpeg_struct) g_free(jpeg_struct); jpeg_struct=NULL; if(jpeg) g_free(jpeg); jpeg = NULL; if(pim) g_free(pim); pim=NULL; return 0; }
void yuyv_to_rgb(unsigned char* rgb, unsigned char* yuyv) { yuyv2rgb(yuyv, rgb, w, h); }
int store_picture(void *data) { struct ALL_DATA *all_data = (struct ALL_DATA *) data; struct GLOBAL *global = all_data->global; struct vdIn *videoIn = all_data->videoIn; struct JPEG_ENCODER_STRUCTURE *jpeg_struct = NULL; BYTE *pim = NULL; BYTE *jpeg = NULL; int jpeg_size = 0; int rc = 0; /* TODO snapshot by rock. I save raw (no filter, no image processing) frame in snapshot. So here should save picture from snapshot, not framebuffer. */ switch(global->imgFormat) { case IMG_FORMAT_JPG:/*jpg*/ /* Save directly from MJPG frame */ #if 0 // TODO #1432, some webcam's mjpeg frame could not directly save as jpeg. if((global->Frame_Flags==0) && (global->format==V4L2_PIX_FMT_MJPEG)) { if(SaveJPG(videoIn->ImageFName,videoIn->buf.bytesused,videoIn->tmpbuffer)) { g_printerr ("Error: Couldn't capture Image to %s \n", videoIn->ImageFName); return(-1); } } else #endif if ((global->Frame_Flags==0) && (global->format==V4L2_PIX_FMT_JPEG)) { if (SaveBuff(videoIn->ImageFName,videoIn->buf.bytesused,videoIn->tmpbuffer)) { g_printerr ("Error: Couldn't capture Image to %s \n", videoIn->ImageFName); //return(-1); rc = -1; goto end_func; } } else { /* use built in encoder */ jpeg = g_new0(BYTE, ((global->width)*(global->height))>>1); jpeg_struct = g_new0(struct JPEG_ENCODER_STRUCTURE, 1); /* Initialization of JPEG control structure */ initialization (jpeg_struct,global->width,global->height); /* Initialization of Quantization Tables */ initialize_quantization_tables (jpeg_struct); //jpeg_size = encode_image(videoIn->framebuffer, jpeg, jpeg_size = encode_image(videoIn->snapshot, jpeg, jpeg_struct, 1, global->width, global->height); if(SaveBuff(videoIn->ImageFName, jpeg_size, jpeg)) { g_printerr ("Error: Couldn't capture Image to %s \n", videoIn->ImageFName); //return(-1); rc = -1; goto end_func; } } break; case IMG_FORMAT_BMP:/*bmp*/ /*24 bits -> 3bytes 32 bits ->4 bytes*/ pim = g_new0(BYTE, (global->width)*(global->height)*3); //yuyv2bgr(videoIn->framebuffer,pim,global->width,global->height); yuyv2bgr(videoIn->snapshot,pim,global->width,global->height); if(SaveBPM(videoIn->ImageFName, global->width, global->height, 24, pim)) { g_printerr ("Error: Couldn't capture Image to %s \n", videoIn->ImageFName); //return(-1); rc = -1; goto end_func; } break; case IMG_FORMAT_PNG:/*png*/ /*24 bits -> 3bytes 32 bits ->4 bytes*/ pim = g_new0(BYTE, (global->width)*(global->height)*3); //yuyv2rgb(videoIn->framebuffer,pim,global->width,global->height); yuyv2rgb(videoIn->snapshot,pim,global->width,global->height); write_png(videoIn->ImageFName, global->width, global->height, pim); break; case IMG_FORMAT_RAW:/*raw*/ videoIn->cap_raw = 1; //return 1; rc = 1; } end_func: if(jpeg_struct) g_free(jpeg_struct); jpeg_struct=NULL; if(jpeg) g_free(jpeg); jpeg = NULL; if(pim) g_free(pim); pim=NULL; return rc; }
//将采集好的数据放到文件中 int process_image(void *addr,int length) { FILE *fp; static int num = 0; char picture_name[20]; printf("process-image len=%d\n",length); //#define YUYV_2_JPG_FILE #ifdef YUYV_2_JPG_FILE //jpg压缩文件 sprintf(picture_name,"picture%d.jpg",num++); u8 s[640*480*3]; int i=0;int j=0;int k=0; u8 y1,u,y2,v; //依次读取4字节/2像素 for(i=0;i<c_hight;i++) //行 for(j=0;j<c_width*2;){ //列 y1=*(int*)(addr+i*c_width*2+j+0); u=*(int*)(addr+i*c_width*2+j+1); y2=*(int*)(addr+i*c_width*2+j+2); v=*(int*)(addr+i*c_width*2+j+3); j+=4;//source :move to next 2 pixels (4byte) yuyv2rgb(y1,u,v ,&s[k+0],&s[k+1],&s[k+2]); yuyv2rgb(y1,u,v ,&s[k+3],&s[k+4],&s[k+5]); k+=6;//detct :move to next 2 pixels (6byte) } // char *d[c_width*c_hight*3]; // int i=0;int j=0; // //YUYV ->YUV YUV(yuv422) // for(i=0;i<length;i+=4,j+=6){ // d[j+0]=*((char *)addr+i+0); //Y1 // d[j+1]=*((char *)addr+i+1); //U1 // d[j+2]=*((char *)addr+i+3); //V1 // d[j+3]=*((char *)addr+i+2); //Y2 // d[j+4]=*((char *)addr+i+1); //U1 // d[j+5]=*((char *)addr+i+3); //V1 // } //数据格式RGB24 write_JPEG_file(s,c_width,c_hight,picture_name,100); usleep(500); #else #define CHANGE_PIC_FORMAT_TO_BMP //转换图像格式YUYV RGB888 bmpfile #ifdef CHANGE_PIC_FORMAT_TO_BMP //bmp位图格式文件 sprintf(picture_name,"picture%d.bmp",num++); if((fp = fopen(picture_name,"w")) == NULL){ perror("Fail to fopen"); exit(EXIT_FAILURE); } //每次读取4字节(2像素)的YUYV格式:Y0 U0 Y1 V0 //写入6字节(2像素) BGR BGR u8 s[640*480*3]; int i=0;int j=0;int k=0; u8 y1,u,y2,v; //依次读取4字节/2像素 //printf("size of head=%d",sizeof(head));// write_file_head(&fp,c_width,c_hight); //文件头 for(i=c_hight-1;i>=0;i--) //行 从最 底行->顶行 *** bottom -> top for(j=0;j<c_width*2;){ //列 2byte/pix lift -> right y1=*(int*)(addr+i*c_width*2+j+0); u=*(int*)(addr+i*c_width*2+j+1); y2=*(int*)(addr+i*c_width*2+j+2); v=*(int*)(addr+i*c_width*2+j+3); j+=4;//source :move to next 2 pixs (4byte) //RGB->BGR(windows bmp file format!) yuyv2rgb(y1,u,v ,&s[k+2],&s[k+1],&s[k+0]); yuyv2rgb(y1,u,v ,&s[k+5],&s[k+4],&s[k+3]); k+=6;//detct :move to next 2 pixs (6byte) } if(fwrite(s,sizeof(s),1,fp)<=0){ perror("write data "); } #else //原始格式图片 sprintf(picture_name,"picture%d.raw",num++); if((fp = fopen(picture_name,"w")) == NULL){ perror("Fail to fopen"); exit(EXIT_FAILURE); } fwrite(addr,length,1,fp); #endif usleep(500); fclose(fp); #endif printf("end of process-image\n"); return 0; }
void UsbCam::process_image(const void * src, int len, camera_image_t *dest) { yuyv2rgb((char*)src, dest->image, dest->width * dest->height); }
static struct data *_get( const void *buf, size_t len ) { struct data *object_interface = & _interface; FILE *fp = NULL; long sizeof_png = 0; #ifdef HAVE_VIDEO const int W = VIDEO_FORMATS[0].width; const int H = VIDEO_FORMATS[0].height; #else const int W = DEFAULT_VIDEO_WIDTH; const int H = DEFAULT_VIDEO_HEIGHT; #endif #ifdef HAVE_TIMESTAMP static const char *TIME_FORMAT = "%y-%m-%d_%I:%M:%S%p"; // "2013-11-11 6:06:24PM" is 21 characters char tbuf[32]; const time_t ctime = time(NULL); struct tm ltime; localtime_r( &ctime, <ime ); if( strftime( tbuf, 32, TIME_FORMAT, <ime ) == 0 ) warnx( "strftime failed" ); #endif printf( "%s( %p, %ld )\n", __func__, buf, len ); #ifdef HAVE_CRYPTO crypto_init_iv( buf, len ); // currently entire Get payload is IV. #endif #ifdef HAVE_VIDEO if( _vci->snap( _vci, &_sizeof_vid_buffer, (uint8_t**)&_vid_buffer ) ) { warnx( "failed snapshot" ); return NULL; } yuyv2rgb( (const uint16_t *)_vid_buffer, W, H, _obj_buffer ); #else // Write "snow" into the raster line buffer. for(int i = 0; i < _sizeof_obj_buffer; i++ ) { _obj_buffer[i] = (uint8_t)( rand() % 256 ); } #endif #ifdef HAVE_TIMESTAMP _overlay_time_rgb( tbuf, _obj_buffer + ((H-charcell_height-2)*W + 2)*_samples_per_dcs_pixel, // assuming 8x17 W*_samples_per_dcs_pixel /* stride */); #endif fp = tmpfile(); if( NULL == fp ) { warn( "failed creating tmpfile" ); return NULL; } png_write( fp, _obj_buffer, W, H, "", _samples_per_dcs_pixel ); sizeof_png = ftell( fp ); _sizeof_obj // may be > sizeof_png... #ifdef HAVE_CRYPTO = crypto_sizeof_ciphertext( sizeof_png ); #else = sizeof_png;
unsigned char* V4Linux2Camera::getFrame() { if (dev_handle<0) return NULL; if (ioctl(dev_handle, VIDIOC_DQBUF, &v4l2_buf)<0) { running = false; return NULL; } unsigned char *raw_buffer = (unsigned char*)buffers[v4l2_buf.index].start; if (raw_buffer==NULL) return NULL; if(cfg->color) { if (cfg->frame) { if (pixelformat==V4L2_PIX_FMT_YUYV) crop_yuyv2rgb(cfg->cam_width,raw_buffer,frm_buffer); else if (pixelformat==V4L2_PIX_FMT_UYVY) crop_uyvy2rgb(cfg->cam_width,raw_buffer,frm_buffer); else if (pixelformat==V4L2_PIX_FMT_YUV420) { //TODO } else if (pixelformat==V4L2_PIX_FMT_YUV410) { //TODO } else if (pixelformat==V4L2_PIX_FMT_GREY) crop_gray2rgb(cfg->cam_width,raw_buffer, frm_buffer); else if ((pixelformat == V4L2_PIX_FMT_MJPEG) || (pixelformat == V4L2_PIX_FMT_JPEG)) { int jpegSubsamp; tjDecompressHeader2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, &cfg->cam_width, &cfg->cam_height, &jpegSubsamp); tjDecompress2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, cam_buffer, cfg->cam_width, 0, cfg->cam_height, TJPF_RGB, TJFLAG_FASTDCT); crop(cfg->cam_width, cfg->cam_height,cam_buffer,frm_buffer,3); } } else { if (pixelformat==V4L2_PIX_FMT_YUYV) yuyv2rgb(cfg->cam_width,cfg->cam_height,raw_buffer,cam_buffer); else if (pixelformat==V4L2_PIX_FMT_UYVY) uyvy2rgb(cfg->cam_width,cfg->cam_height,raw_buffer,cam_buffer); else if (pixelformat==V4L2_PIX_FMT_YUV420) { //TODO } else if (pixelformat==V4L2_PIX_FMT_YUV410) { //TODO } else if (pixelformat==V4L2_PIX_FMT_GREY) gray2rgb(cfg->cam_width,cfg->cam_height,raw_buffer,cam_buffer); else if ((pixelformat == V4L2_PIX_FMT_MJPEG) || (pixelformat == V4L2_PIX_FMT_JPEG)) { int jpegSubsamp; tjDecompressHeader2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, &cfg->cam_width, &cfg->cam_height, &jpegSubsamp); tjDecompress2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, cam_buffer, cfg->cam_width, 0, cfg->cam_height, TJPF_RGB, TJFLAG_FASTDCT); } } } else { if (cfg->frame) { if (pixelformat==V4L2_PIX_FMT_YUYV) crop_yuyv2gray(cfg->cam_width,raw_buffer,frm_buffer); else if (pixelformat==V4L2_PIX_FMT_UYVY) crop_uyvy2gray(cfg->cam_width,raw_buffer,frm_buffer); else if (pixelformat==V4L2_PIX_FMT_YUV420) crop(cfg->cam_width, cfg->cam_height,raw_buffer,frm_buffer,1); else if (pixelformat==V4L2_PIX_FMT_YUV410) crop(cfg->cam_width, cfg->cam_height,raw_buffer,frm_buffer,1); else if (pixelformat==V4L2_PIX_FMT_GREY) crop(cfg->cam_width, cfg->cam_height,raw_buffer,frm_buffer,1); else if ((pixelformat == V4L2_PIX_FMT_MJPEG) || (pixelformat == V4L2_PIX_FMT_JPEG)) { int jpegSubsamp; tjDecompressHeader2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, &cfg->cam_width, &cfg->cam_height, &jpegSubsamp); tjDecompress2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, cam_buffer, cfg->cam_width, 0, cfg->cam_height, TJPF_GRAY, TJFLAG_FASTDCT); crop(cfg->cam_width, cfg->cam_height,cam_buffer,frm_buffer,1); } } else { if (pixelformat==V4L2_PIX_FMT_YUYV) yuyv2gray(cfg->cam_width, cfg->cam_height, raw_buffer, cam_buffer); else if (pixelformat==V4L2_PIX_FMT_UYVY) uyvy2gray(cfg->cam_width, cfg->cam_height, raw_buffer, cam_buffer); else if (pixelformat==V4L2_PIX_FMT_YUV420) memcpy(cam_buffer,raw_buffer,cfg->cam_width*cfg->cam_height); else if (pixelformat==V4L2_PIX_FMT_YUV410) memcpy(cam_buffer,raw_buffer,cfg->cam_width*cfg->cam_height); //else if (pixelformat==V4L2_PIX_FMT_GREY) memcpy(cam_buffer,raw_buffer,cam_width*cam_height); else if ((pixelformat == V4L2_PIX_FMT_MJPEG) || (pixelformat == V4L2_PIX_FMT_JPEG)) { int jpegSubsamp; tjDecompressHeader2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, &cfg->cam_width, &cfg->cam_height, &jpegSubsamp); tjDecompress2(_jpegDecompressor, raw_buffer, v4l2_buf.bytesused, cam_buffer, cfg->cam_width, 0, cfg->cam_height, TJPF_GRAY, TJFLAG_FASTDCT); } } } if (-1 == ioctl (dev_handle, VIDIOC_QBUF, &v4l2_buf)) { printf("cannot unqueue buffer: %s\n", strerror(errno)); return NULL; } if (cfg->frame) return frm_buffer; else if ((!cfg->color) && (pixelformat==V4L2_PIX_FMT_GREY)) return raw_buffer; else return cam_buffer; }