int main(int argc, char **argv) { int i, c, interlace, frames, err; int ywidth, yheight, uvwidth, uvheight, ylen, uvlen; int verbose = 0, fdin; int NlumaX = 4, NlumaY = 4, NchromaX = 4, NchromaY = 4; float BWlumaX = 0.8, BWlumaY = 0.8, BWchromaX = 0.7, BWchromaY = 0.7; struct filter *lumaXtaps, *lumaYtaps, *chromaXtaps, *chromaYtaps; u_char *yuvinout[3]; float *yuvtmp1,*yuvtmp2; y4m_stream_info_t istream, ostream; y4m_frame_info_t iframe; fdin = fileno(stdin); y4m_accept_extensions(1); /* read command line */ opterr = 0; while ((c = getopt(argc, argv, "hvL:C:x:X:y:Y:")) != EOF) { switch (c) { case 'L': sscanf(optarg,"%d,%f,%d,%f",&NlumaX,&BWlumaX,&NlumaY,&BWlumaY); break; case 'C': sscanf(optarg,"%d,%f,%d,%f",&NchromaX,&BWchromaX,&NchromaY,&BWchromaY); break; case 'x': sscanf(optarg,"%d,%f",&NchromaX,&BWchromaX); break; case 'X': sscanf(optarg,"%d,%f",&NlumaX,&BWlumaX); break; case 'y': sscanf(optarg,"%d,%f",&NchromaY,&BWchromaY); break; case 'Y': sscanf(optarg,"%d,%f",&NlumaY,&BWlumaY); break; case 'v': verbose++; break; case '?': case 'h': default: usage(); } } if (BWlumaX <= 0.0 || BWlumaX > 1.0) mjpeg_error_exit1("Horizontal luma bandwidth '%f' not >0 and <=1.0", BWlumaX); if (BWlumaY <= 0.0 || BWlumaY > 1.0) mjpeg_error_exit1("Vertical luma bandwidth '%f' not >0 and <=1.0", BWlumaY); if (BWchromaX <= 0.0 || BWchromaX > 1.0) mjpeg_error_exit1("Horizontal chroma bandwidth '%f' not >0 and <=1.0", BWchromaX); if (BWchromaY <= 0.0 || BWchromaY > 1.0) mjpeg_error_exit1("Vertical chroma bandwidth '%f' not >0 and <=1.0", BWchromaY); /* initialize input stream and check chroma subsampling and interlacing */ y4m_init_stream_info(&istream); y4m_init_frame_info(&iframe); err = y4m_read_stream_header(fdin, &istream); if (err != Y4M_OK) mjpeg_error_exit1("Input stream error: %s\n", y4m_strerr(err)); if (y4m_si_get_plane_count(&istream) != 3) mjpeg_error_exit1("Only the 3 plane formats supported"); i = y4m_si_get_interlace(&istream); switch (i) { case Y4M_ILACE_NONE: interlace = 0; break; case Y4M_ILACE_BOTTOM_FIRST: case Y4M_ILACE_TOP_FIRST: interlace = 1; break; default: mjpeg_warn("Unknown interlacing '%d', assuming non-interlaced", i); interlace = 0; break; } ywidth = y4m_si_get_width(&istream); /* plane 0 = Y */ yheight = y4m_si_get_height(&istream); ylen = ywidth * yheight; uvwidth = y4m_si_get_plane_width(&istream, 1); /* planes 1&2 = U+V */ uvheight = y4m_si_get_plane_height(&istream, 1); uvlen = y4m_si_get_plane_length(&istream, 1); /* initialize output stream */ y4m_init_stream_info(&ostream); y4m_copy_stream_info(&ostream, &istream); y4m_write_stream_header(fileno(stdout), &ostream); /* allocate input and output buffers */ yuvinout[0] = my_malloc(ylen*sizeof(u_char)); yuvinout[1] = my_malloc(uvlen*sizeof(u_char)); yuvinout[2] = my_malloc(uvlen*sizeof(u_char)); yuvtmp1 = my_malloc(MAX(ylen,uvlen)*sizeof(float)); yuvtmp2 = my_malloc(MAX(ylen,uvlen)*sizeof(float)); /* get filter taps */ lumaXtaps = get_coeff(NlumaX, BWlumaX); lumaYtaps = get_coeff(NlumaY, BWlumaY); chromaXtaps = get_coeff(NchromaX, BWchromaX); chromaYtaps = get_coeff(NchromaY, BWchromaY); set_accel(uvwidth,uvheight); if (verbose) y4m_log_stream_info(mjpeg_loglev_t("info"), "", &istream); /* main processing loop */ for (frames=0; y4m_read_frame(fdin,&istream,&iframe,yuvinout) == Y4M_OK; frames++) { if (verbose && ((frames % 100) == 0)) mjpeg_info("Frame %d\n", frames); convolveFrame(yuvinout[0],ywidth,yheight,interlace,lumaXtaps,lumaYtaps,yuvtmp1,yuvtmp2); convolveFrame(yuvinout[1],uvwidth,uvheight,interlace,chromaXtaps,chromaYtaps,yuvtmp1,yuvtmp2); convolveFrame(yuvinout[2],uvwidth,uvheight,interlace,chromaXtaps,chromaYtaps,yuvtmp1,yuvtmp2); y4m_write_frame(fileno(stdout), &ostream, &iframe, yuvinout); } /* clean up */ y4m_fini_frame_info(&iframe); y4m_fini_stream_info(&istream); y4m_fini_stream_info(&ostream); exit(0); }
int main(int argc, char *argv[]) { int verbosity = 1; double time_between_frames = 0.0; double frame_rate = 0.0; struct timeval time_now; int n, frame; unsigned char *yuv[3]; int in_fd = 0; int screenwidth=0, screenheight=0; y4m_stream_info_t streaminfo; y4m_frame_info_t frameinfo; int frame_width; int frame_height; int wait_for_sync = 1; char *window_title = NULL; while ((n = getopt(argc, argv, "hs:t:f:cv:")) != EOF) { switch (n) { case 'c': wait_for_sync = 0; break; case 's': if (sscanf(optarg, "%dx%d", &screenwidth, &screenheight) != 2) { mjpeg_error_exit1( "-s option needs two arguments: -s 10x10"); exit(1); } break; case 't': window_title = optarg; break; case 'f': frame_rate = atof(optarg); if( frame_rate <= 0.0 || frame_rate > 200.0 ) mjpeg_error_exit1( "-f option needs argument > 0.0 and < 200.0"); break; case 'v': verbosity = atoi(optarg); if ((verbosity < 0) || (verbosity > 2)) mjpeg_error_exit1("-v needs argument from {0, 1, 2} (not %d)", verbosity); break; case 'h': case '?': usage(); exit(1); break; default: usage(); exit(1); } } mjpeg_default_handler_verbosity(verbosity); y4m_accept_extensions(1); y4m_init_stream_info(&streaminfo); y4m_init_frame_info(&frameinfo); if ((n = y4m_read_stream_header(in_fd, &streaminfo)) != Y4M_OK) { mjpeg_error("Couldn't read YUV4MPEG2 header: %s!", y4m_strerr(n)); exit (1); } switch (y4m_si_get_chroma(&streaminfo)) { case Y4M_CHROMA_420JPEG: case Y4M_CHROMA_420MPEG2: case Y4M_CHROMA_420PALDV: break; default: mjpeg_error_exit1("Cannot handle non-4:2:0 streams yet!"); } frame_width = y4m_si_get_width(&streaminfo); frame_height = y4m_si_get_height(&streaminfo); if ((screenwidth <= 0) || (screenheight <= 0)) { /* no user supplied screen size, so let's use the stream info */ y4m_ratio_t aspect = y4m_si_get_sampleaspect(&streaminfo); if (!(Y4M_RATIO_EQL(aspect, y4m_sar_UNKNOWN))) { /* if pixel aspect ratio present, use it */ #if 1 /* scale width, but maintain height (line count) */ screenheight = frame_height; screenwidth = frame_width * aspect.n / aspect.d; #else if ((frame_width * aspect.d) < (frame_height * aspect.n)) { screenwidth = frame_width; screenheight = frame_width * aspect.d / aspect.n; } else { screenheight = frame_height; screenwidth = frame_height * aspect.n / aspect.d; } #endif } else { /* unknown aspect ratio -- assume square pixels */ screenwidth = frame_width; screenheight = frame_height; } } /* Initialize the SDL library */ if( SDL_Init(SDL_INIT_VIDEO) < 0 ) { mjpeg_error("Couldn't initialize SDL: %s", SDL_GetError()); exit(1); } /* set window title */ SDL_WM_SetCaption(window_title, NULL); /* yuv params */ yuv[0] = malloc(frame_width * frame_height * sizeof(unsigned char)); yuv[1] = malloc(frame_width * frame_height / 4 * sizeof(unsigned char)); yuv[2] = malloc(frame_width * frame_height / 4 * sizeof(unsigned char)); screen = SDL_SetVideoMode(screenwidth, screenheight, 0, SDL_SWSURFACE); if ( screen == NULL ) { mjpeg_error("SDL: Couldn't set %dx%d: %s", screenwidth, screenheight, SDL_GetError()); exit(1); } else { mjpeg_debug("SDL: Set %dx%d @ %d bpp", screenwidth, screenheight, screen->format->BitsPerPixel); } /* since IYUV ordering is not supported by Xv accel on maddog's system * (Matrox G400 --- although, the alias I420 is, but this is not * recognized by SDL), we use YV12 instead, which is identical, * except for ordering of Cb and Cr planes... * we swap those when we copy the data to the display buffer... */ yuv_overlay = SDL_CreateYUVOverlay(frame_width, frame_height, SDL_YV12_OVERLAY, screen); if ( yuv_overlay == NULL ) { mjpeg_error("SDL: Couldn't create SDL_yuv_overlay: %s", SDL_GetError()); exit(1); } if ( yuv_overlay->hw_overlay ) mjpeg_debug("SDL: Using hardware overlay."); rect.x = 0; rect.y = 0; rect.w = screenwidth; rect.h = screenheight; SDL_DisplayYUVOverlay(yuv_overlay, &rect); signal (SIGINT, sigint_handler); frame = 0; if ( frame_rate == 0.0 ) { /* frame rate has not been set from command-line... */ if (Y4M_RATIO_EQL(y4m_fps_UNKNOWN, y4m_si_get_framerate(&streaminfo))) { mjpeg_info("Frame-rate undefined in stream... assuming 25Hz!" ); frame_rate = 25.0; } else { frame_rate = Y4M_RATIO_DBL(y4m_si_get_framerate(&streaminfo)); } } time_between_frames = 1.e6 / frame_rate; gettimeofday(&time_now,0); while ((n = y4m_read_frame(in_fd, &streaminfo, &frameinfo, yuv)) == Y4M_OK && (!got_sigint)) { /* Lock SDL_yuv_overlay */ if ( SDL_MUSTLOCK(screen) ) { if ( SDL_LockSurface(screen) < 0 ) break; } if (SDL_LockYUVOverlay(yuv_overlay) < 0) break; /* let's draw the data (*yuv[3]) on a SDL screen (*screen) */ memcpy(yuv_overlay->pixels[0], yuv[0], frame_width * frame_height); memcpy(yuv_overlay->pixels[1], yuv[2], frame_width * frame_height / 4); memcpy(yuv_overlay->pixels[2], yuv[1], frame_width * frame_height / 4); /* Unlock SDL_yuv_overlay */ if ( SDL_MUSTLOCK(screen) ) { SDL_UnlockSurface(screen); } SDL_UnlockYUVOverlay(yuv_overlay); /* Show, baby, show! */ SDL_DisplayYUVOverlay(yuv_overlay, &rect); mjpeg_info("Playing frame %4.4d - %s", frame, print_status(frame, frame_rate)); if (wait_for_sync) while(get_time_diff(time_now) < time_between_frames) { usleep(1000); } frame++; gettimeofday(&time_now,0); } if ((n != Y4M_OK) && (n != Y4M_ERR_EOF)) mjpeg_error("Couldn't read frame: %s", y4m_strerr(n)); for (n=0; n<3; n++) { free(yuv[n]); } mjpeg_info("Played %4.4d frames (%s)", frame, print_status(frame, frame_rate)); SDL_FreeYUVOverlay(yuv_overlay); SDL_Quit(); y4m_fini_frame_info(&frameinfo); y4m_fini_stream_info(&streaminfo); return 0; }
static void process( int fdIn , y4m_stream_info_t *inStrInfo, int fdOut, y4m_stream_info_t *outStrInfo, int max,int search, int noshift) { y4m_frame_info_t in_frame ; uint8_t *yuv_data[3],*yuv_odata[3]; // int result[720]; // will change to malloc based on max shift int *lineresult; int y_frame_data_size, uv_frame_data_size ; int read_error_code = Y4M_OK; int write_error_code = Y4M_OK ; int src_frame_counter ; int x,y,w,h,cw,ch; h = y4m_si_get_plane_height(inStrInfo,0); ch = y4m_si_get_plane_height(inStrInfo,1); lineresult = (int *) malloc(sizeof(int) * h); chromalloc(yuv_data,inStrInfo); // initialise and read the first number of frames y4m_init_frame_info( &in_frame ); read_error_code = y4m_read_frame(fdIn,inStrInfo,&in_frame,yuv_data ); while( Y4M_ERR_EOF != read_error_code && write_error_code == Y4M_OK ) { for (y=0; y<h-1; y++) lineresult[y] = search_video(max,search,y,yuv_data,inStrInfo); if (noshift) { /* graphing this would be nice */ for (x=0; x < h; x++) { if (x!=0) printf(", "); printf ("%d",lineresult[x]); } printf("\n"); } else { int shifter = 0; for (y=0; y<h-1; y++) { // shifter += lineresult[y]; shifter = -lineresult[y]; shift_video(shifter,y,yuv_data,inStrInfo); } write_error_code = y4m_write_frame( fdOut, outStrInfo, &in_frame, yuv_data ); } y4m_fini_frame_info( &in_frame ); y4m_init_frame_info( &in_frame ); read_error_code = y4m_read_frame(fdIn,inStrInfo,&in_frame,yuv_data ); ++src_frame_counter ; } // Clean-up regardless an error happened or not y4m_fini_frame_info( &in_frame ); free (lineresult); chromafree(yuv_data); if( read_error_code != Y4M_ERR_EOF ) mjpeg_error_exit1 ("Error reading from input stream!"); if( write_error_code != Y4M_OK ) mjpeg_error_exit1 ("Error writing output stream!"); }
static void filter( int fdIn , y4m_stream_info_t *inStrInfo, int qual, char *format ) { y4m_frame_info_t in_frame ; uint8_t *yuv_data[3] ; int read_error_code ; int write_error_code ; FILE *fh; char filename[1024]; // has potential for buffer overflow int frame_count=1; // to be moved to command line parameters //char *format = "frame%03d.jpg"; //int qual = 95; // Allocate memory for the YUV channels if (chromalloc(yuv_data,inStrInfo)) mjpeg_error_exit1 ("Could'nt allocate memory for the YUV4MPEG data!"); /* Initialize counters */ write_error_code = Y4M_OK ; y4m_init_frame_info( &in_frame ); read_error_code = y4m_read_frame(fdIn, inStrInfo,&in_frame,yuv_data ); while( Y4M_ERR_EOF != read_error_code && write_error_code == Y4M_OK ) { // do work if (read_error_code == Y4M_OK) { // fprintf(stderr,"sprintf filename\n"); sprintf(filename,format,frame_count); // fprintf(stderr,"fopen filename\n"); // fprintf(stderr,"filename: %s\n",filename); fh = fopen(filename , "w"); // should check for error // fprintf(stderr,"call put_jpeg_yuv420p_file\n"); if (fh != NULL) { put_jpeg_yuv420p_file(fh,yuv_data,inStrInfo,qual); fclose (fh); } else { perror ("fopen jpeg file"); } } y4m_fini_frame_info( &in_frame ); y4m_init_frame_info( &in_frame ); read_error_code = y4m_read_frame(fdIn, inStrInfo,&in_frame,yuv_data ); frame_count++; } // Clean-up regardless an error happened or not y4m_fini_frame_info( &in_frame ); free( yuv_data[0] ); free( yuv_data[1] ); free( yuv_data[2] ); if( read_error_code != Y4M_ERR_EOF ) mjpeg_error_exit1 ("Error reading from input stream!"); }
int main(int argc, char **argv) { int c, err, ilace; int fd_in = fileno(stdin), fd_out = fileno(stdout); y4m_ratio_t rate; y4m_stream_info_t si, so; y4m_frame_info_t fi; uint8_t *top1[3], *bot1[3], *top2[3], *bot2[3]; opterr = 0; while ((c = getopt(argc, argv, "h")) != EOF) { switch (c) { case 'h': case '?': default: usage(); } } y4m_accept_extensions(1); y4m_init_stream_info(&si); y4m_init_stream_info(&so); y4m_init_frame_info(&fi); err = y4m_read_stream_header(fd_in, &si); if (err != Y4M_OK) mjpeg_error_exit1("Input stream error: %s\n", y4m_strerr(err)); if (y4m_si_get_plane_count(&si) != 3) mjpeg_error_exit1("only 3 plane formats supported"); rate = y4m_si_get_framerate(&si); if (!Y4M_RATIO_EQL(rate, y4m_fps_NTSC)) mjpeg_error_exit1("input stream not NTSC 30000:1001"); ilace = y4m_si_get_interlace(&si); if (ilace != Y4M_ILACE_BOTTOM_FIRST && ilace != Y4M_ILACE_TOP_FIRST) mjpeg_error_exit1("input stream not interlaced"); top1[0] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,0) / 2); top1[1] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,1) / 2); top1[2] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,2) / 2); bot1[0] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,0) / 2); bot1[1] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,1) / 2); bot1[2] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,2) / 2); top2[0] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,0) / 2); top2[1] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,1) / 2); top2[2] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,2) / 2); bot2[0] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,0) / 2); bot2[1] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,1) / 2); bot2[2] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,2) / 2); y4m_copy_stream_info(&so, &si); y4m_si_set_framerate(&so, y4m_fps_NTSC_FILM); y4m_si_set_interlace(&so, Y4M_ILACE_NONE); /* * At this point the input stream has been verified to be interlaced NTSC, * the output stream rate set to NTSC_FILM, interlacing tag changed to * progressive, and the field buffers allocated. * * Time to write the output stream header and commence processing input. */ y4m_write_stream_header(fd_out, &so); while (1) { err = y4m_read_fields(fd_in, &si, &fi, top1, bot1); if (err != Y4M_OK) goto done; y4m_write_fields(fd_out, &so, &fi, top1, bot1); /* A */ err = y4m_read_fields(fd_in, &si, &fi, top1, bot1); if (err != Y4M_OK) goto done; y4m_write_fields(fd_out, &so, &fi, top1, bot1); /* B */ err = y4m_read_fields(fd_in, &si, &fi, top1, bot1); if (err != Y4M_OK) goto done; err = y4m_read_fields(fd_in, &si, &fi, top2, bot2); if (err != Y4M_OK) { /* * End of input when reading the 2nd "mixed field" frame (C+D). The previous * frame was the first "mixed field" frame (B+C). Rather than emit a mixed * interlaced frame duplicate a field and output the previous frame. */ if (ilace == Y4M_ILACE_BOTTOM_FIRST) y4m_write_fields(fd_out, &so, &fi, bot1,bot1); else y4m_write_fields(fd_out, &so, &fi, top1,top1); goto done; } /* * Now the key part of the processing - effectively discarding the first mixed * frame with fields from frames B + C and creating the C frame from the two * mixed frames. For a BOTTOM FIELD FIRST stream use the 'top' field from * frame 3 and the 'bottom' fields from frame 4. With a TOP FIELD FIRST stream * it's the other way around - use the 'bottom' field from frame 3 and the * 'top' field from frame 4. */ if (ilace == Y4M_ILACE_BOTTOM_FIRST) y4m_write_fields(fd_out, &so, &fi, top1, bot2); /* C */ else y4m_write_fields(fd_out, &so, &fi, top2, bot1); /* C */ err = y4m_read_fields(fd_in, &si, &fi, top1, bot1); y4m_write_fields(fd_out, &so, &fi, top1, bot1); /* D */ } done: y4m_fini_frame_info(&fi); y4m_fini_stream_info(&si); y4m_fini_stream_info(&so); exit(0); }
static void convolve( int fdIn , y4m_stream_info_t *inStrInfo, int fdOut, y4m_stream_info_t *outStrInfo, int *mat, int div, int mlen) { y4m_frame_info_t in_frame ; uint8_t *yuv_data[3],*yuv_odata[3]; int read_error_code ; int write_error_code ; int src_frame_counter ; float vy,vu,vv; int x,y,w,h,cw,ch,mx,my,count; w = y4m_si_get_plane_width(inStrInfo,0); h = y4m_si_get_plane_height(inStrInfo,0); cw = y4m_si_get_plane_width(inStrInfo,1); ch = y4m_si_get_plane_height(inStrInfo,1); if (chromalloc(yuv_data, inStrInfo)) mjpeg_error_exit1 ("Could'nt allocate memory for the YUV4MPEG data!"); if (chromalloc(yuv_odata, inStrInfo)) mjpeg_error_exit1 ("Could'nt allocate memory for the YUV4MPEG data!"); write_error_code = Y4M_OK ; src_frame_counter = 0 ; // initialise and read the first number of frames y4m_init_frame_info( &in_frame ); read_error_code = y4m_read_frame(fdIn,inStrInfo,&in_frame,yuv_data ); while( Y4M_ERR_EOF != read_error_code && write_error_code == Y4M_OK ) { for (x=0; x<w; x++) { for (y=0; y<h; y++) { // perform magic vy = 0; count = 0; // need to be handled differently for interlace for (my=-mlen/2;my <=mlen/2; my++) { for (mx=-mlen/2;mx <=mlen/2; mx++) { // fprintf (stderr," x %d - y %d\n",mx,my); if ((x + mx >=0) && (x + mx <w) && (y + my >=0) && (y + my <h) ) { // fprintf (stderr,"matrix: %d => %d\n", count,mat[count]); vy += *(yuv_data[0]+x+mx+(y+my)*w) * mat[count]; } count++; } } vy /= div; if (vy < 16) vy = 16; if (vy > 240) vy= 240; *(yuv_odata[0]+x+y*w) = vy; if ((x < cw) && (y<ch)) { vu = 0; vv = 0; count = 0; // may need to be handled differently for interlace for (my=-mlen/2;my <=mlen/2; my++) { for (mx=-mlen/2;mx <=mlen/2; mx++) { if ((x + mx >=0) && (x + mx <cw) && (y + my >=0) && (y + my <ch) ) { vu += (*(yuv_data[1]+x+mx+(y+my)*cw) -128) * mat[count]; vv += (*(yuv_data[2]+x+mx+(y+my)*cw) -128) * mat[count]; } count ++; } } vu /= div; vv /= div; if (vu < -112) vu = -112; if (vu > 112) vu = 112; if (vv < -112) vv = -112; if (vv > 112) vv = 112; *(yuv_odata[1]+x+y*cw) = vu + 128; *(yuv_odata[2]+x+y*cw) = vv + 128; } } } write_error_code = y4m_write_frame( fdOut, outStrInfo, &in_frame, yuv_odata ); y4m_fini_frame_info( &in_frame ); y4m_init_frame_info( &in_frame ); read_error_code = y4m_read_frame(fdIn,inStrInfo,&in_frame,yuv_data ); ++src_frame_counter ; } // Clean-up regardless an error happened or not y4m_fini_frame_info( &in_frame ); free( yuv_data[0] ); free( yuv_data[1] ); free( yuv_data[2] ); free( yuv_odata[0] ); free( yuv_odata[1] ); free( yuv_odata[2] ); if( read_error_code != Y4M_ERR_EOF ) mjpeg_error_exit1 ("Error reading from input stream!"); if( write_error_code != Y4M_OK ) mjpeg_error_exit1 ("Error writing output stream!"); }
static int yw_open_video(YWPrivateData *pd, vob_t *vob) { int errnum = Y4M_OK; int ch_mode = 0; /* initialize stream-information */ y4m_accept_extensions(1); y4m_init_stream_info(&pd->streaminfo); y4m_init_frame_info(&pd->frameinfo); if (vob->im_v_codec == CODEC_YUV) { pd->dstfmt = IMG_YUV_DEFAULT; } else if (vob->im_v_codec == CODEC_RGB) { pd->dstfmt = IMG_RGB_DEFAULT; } else { tc_log_error(MOD_NAME, "unsupported video format %d", vob->im_v_codec); return(TC_EXPORT_ERROR); } /* we trust autoprobing */ pd->width = vob->im_v_width; pd->height = vob->im_v_height; pd->fd_vid = open(vob->video_in_file, O_RDONLY); if (pd->fd_vid == -1) { tc_log_error(MOD_NAME, "can't open video source '%s'" " (reason: %s)", vob->video_in_file, strerror(errno)); } else { if (verbose >= TC_DEBUG) { tc_log_info(MOD_NAME, "using video source: %s", vob->video_in_file); } } pd->tcvhandle = tcv_init(); if (!pd->tcvhandle) { tc_log_error(MOD_NAME, "image conversion init failed"); return(TC_EXPORT_ERROR); } errnum = y4m_read_stream_header(pd->fd_vid, &pd->streaminfo); if (errnum != Y4M_OK) { tc_log_error(MOD_NAME, "Couldn't read YUV4MPEG header: %s!", y4m_strerr(errnum)); tcv_free(pd->tcvhandle); close(pd->fd_vid); return(TC_IMPORT_ERROR); } if (y4m_si_get_plane_count(&pd->streaminfo) != 3) { tc_log_error(MOD_NAME, "Only 3-plane formats supported"); close(pd->fd_vid); return(TC_IMPORT_ERROR); } ch_mode = y4m_si_get_chroma(&pd->streaminfo); if (ch_mode != Y4M_CHROMA_420JPEG && ch_mode != Y4M_CHROMA_420MPEG2 && ch_mode != Y4M_CHROMA_420PALDV) { tc_log_error(MOD_NAME, "sorry, chroma mode `%s' (%i) not supported", y4m_chroma_description(ch_mode), ch_mode); tcv_free(pd->tcvhandle); close(pd->fd_vid); return(TC_IMPORT_ERROR); } if (verbose) { tc_log_info(MOD_NAME, "chroma mode: %s", y4m_chroma_description(ch_mode)); } return(TC_IMPORT_OK); }
int main(int argc, char **argv) { int sts, c, width = 640, height = 480, noheader = 0; int Y = 16, U = 128, V = 128, chroma_mode = Y4M_CHROMA_420MPEG2; int numframes = 1, force = 0; y4m_ratio_t rate_ratio = y4m_fps_NTSC; y4m_ratio_t aspect_ratio = y4m_sar_SQUARE; int plane_length[3]; u_char *yuv[3]; y4m_stream_info_t ostream; y4m_frame_info_t oframe; char interlace = Y4M_ILACE_NONE; opterr = 0; y4m_accept_extensions(1); while ((c = getopt(argc, argv, "Hfx:w:h:r:i:a:Y:U:V:n:")) != EOF) { switch (c) { case 'H': noheader = 1; break; case 'a': sts = y4m_parse_ratio(&aspect_ratio, optarg); if (sts != Y4M_OK) mjpeg_error_exit1("Invalid aspect: %s", optarg); break; case 'w': width = atoi(optarg); break; case 'h': height = atoi(optarg); break; case 'r': sts = y4m_parse_ratio(&rate_ratio, optarg); if (sts != Y4M_OK) mjpeg_error_exit1("Invalid rate: %s", optarg); break; case 'Y': Y = atoi(optarg); break; case 'U': U = atoi(optarg); break; case 'V': V = atoi(optarg); break; case 'i': switch (optarg[0]) { case 'p': interlace = Y4M_ILACE_NONE; break; case 't': interlace = Y4M_ILACE_TOP_FIRST; break; case 'b': interlace = Y4M_ILACE_BOTTOM_FIRST; break; default: usage(); } break; case 'x': chroma_mode = y4m_chroma_parse_keyword(optarg); if (chroma_mode == Y4M_UNKNOWN) { if (strcmp(optarg, "help") != 0) mjpeg_error("Invalid -x arg '%s'", optarg); chroma_usage(); } break; case 'f': force = 1; break; case 'n': numframes = atoi(optarg); break; case '?': default: usage(); } } if (width <= 0) mjpeg_error_exit1("Invalid Width: %d", width); if (height <= 0) mjpeg_error_exit1("Invalid Height: %d", height); if (!force && (Y < 16 || Y > 235)) mjpeg_error_exit1("16 < Y < 235"); if (!force && (U < 16 || U > 240)) mjpeg_error_exit1("16 < U < 240"); if (!force && (V < 16 || V > 240)) mjpeg_error_exit1("16 < V < 240"); y4m_init_stream_info(&ostream); y4m_init_frame_info(&oframe); y4m_si_set_width(&ostream, width); y4m_si_set_height(&ostream, height); y4m_si_set_interlace(&ostream, interlace); y4m_si_set_framerate(&ostream, rate_ratio); y4m_si_set_sampleaspect(&ostream, aspect_ratio); y4m_si_set_chroma(&ostream, chroma_mode); if (y4m_si_get_plane_count(&ostream) != 3) mjpeg_error_exit1("Only the 3 plane formats supported"); plane_length[0] = y4m_si_get_plane_length(&ostream, 0); plane_length[1] = y4m_si_get_plane_length(&ostream, 1); plane_length[2] = y4m_si_get_plane_length(&ostream, 2); yuv[0] = malloc(plane_length[0]); yuv[1] = malloc(plane_length[1]); yuv[2] = malloc(plane_length[2]); /* * Now fill the array once with black but use the provided Y, U and V values */ memset(yuv[0], Y, plane_length[0]); memset(yuv[1], U, plane_length[1]); memset(yuv[2], V, plane_length[2]); if (noheader == 0) y4m_write_stream_header(fileno(stdout), &ostream); while (numframes--) y4m_write_frame(fileno(stdout), &ostream, &oframe, yuv); free(yuv[0]); free(yuv[1]); free(yuv[2]); y4m_fini_stream_info(&ostream); y4m_fini_frame_info(&oframe); exit(0); }
int main(int argc, char **argv) { int fdin, fdout, err, c, i, verbose = 1; y4m_stream_info_t istream, ostream; y4m_frame_info_t iframe; fdin = fileno(stdin); fdout = fileno(stdout); y4m_accept_extensions(1); y4m_init_stream_info(&istream); y4m_init_frame_info(&iframe); while ((c = getopt(argc, argv, "L:C:hv:N")) != EOF) { switch (c) { case 'N': lowuv = lowy = 0; lowuv = highy = 255; break; case 'L': i = sscanf(optarg, "%lf,%lf,%d", &y_radius, &y_amount, &y_threshold); if (i != 3) { mjpeg_error("-L r,a,t"); usage(); } break; case 'C': i = sscanf(optarg, "%lf,%lf,%d", &uv_radius, &uv_amount, &uv_threshold); if (i != 3) { mjpeg_error("-C r,a,t"); usage(); } break; case 'v': verbose = atoi(optarg); if (verbose < 0 || verbose > 2) mjpeg_error_exit1("-v 0|1|2"); break; case 'h': default: usage(); break; } } if (isatty(fdout)) mjpeg_error_exit1("stdout must not be a terminal"); mjpeg_default_handler_verbosity(verbose); err = y4m_read_stream_header(fdin, &istream); if (err != Y4M_OK) mjpeg_error_exit1("Couldn't read input stream header"); switch (y4m_si_get_interlace(&istream)) { case Y4M_ILACE_NONE: interlaced = 0; break; case Y4M_ILACE_BOTTOM_FIRST: case Y4M_ILACE_TOP_FIRST: interlaced = 1; break; default: mjpeg_error_exit1("Unsupported/unknown interlacing"); } if (y4m_si_get_plane_count(&istream) != 3) mjpeg_error_exit1("Only 3 plane formats supported"); yheight = y4m_si_get_plane_height(&istream, 0); uvheight = y4m_si_get_plane_height(&istream, 1); ywidth = y4m_si_get_plane_width(&istream, 0); uvwidth = y4m_si_get_plane_width(&istream, 1); ylen = y4m_si_get_plane_length(&istream, 0); uvlen = y4m_si_get_plane_length(&istream, 1); /* Input and output frame buffers */ i_yuv[0] = (u_char *)malloc(ylen); i_yuv[1] = (u_char *)malloc(uvlen); i_yuv[2] = (u_char *)malloc(uvlen); o_yuv[0] = (u_char *)malloc(ylen); o_yuv[1] = (u_char *)malloc(uvlen); o_yuv[2] = (u_char *)malloc(uvlen); /* * general purpose row/column scratch buffers. Slightly over allocated to * simplify life. */ cur_col = (u_char *)malloc(MAX(ywidth, yheight)); dest_col = (u_char *)malloc(MAX(ywidth, yheight)); cur_row = (u_char *)malloc(MAX(ywidth, yheight)); dest_row = (u_char *)malloc(MAX(ywidth, yheight)); /* * Generate the convolution matrices. The generation routine allocates the * memory and returns the length. */ cmatrix_y_len = gen_convolve_matrix(y_radius, &cmatrix_y); cmatrix_uv_len = gen_convolve_matrix(uv_radius, &cmatrix_uv); ctable_y = gen_lookup_table(cmatrix_y, cmatrix_y_len); ctable_uv = gen_lookup_table(cmatrix_uv, cmatrix_uv_len); y4m_init_stream_info(&ostream); y4m_copy_stream_info(&ostream, &istream); y4m_write_stream_header(fileno(stdout), &ostream); mjpeg_info("Luma radius: %f", y_radius); mjpeg_info("Luma amount: %f", y_amount); mjpeg_info("Luma threshold: %d", y_threshold); if (uv_radius != -1.0) { mjpeg_info("Chroma radius: %f", uv_radius); mjpeg_info("Chroma amount: %f", uv_amount); mjpeg_info("Chroma threshold: %d", uv_threshold); } for (frameno = 0; y4m_read_frame(fdin, &istream, &iframe, i_yuv) == Y4M_OK; frameno++) { y4munsharp(); err = y4m_write_frame(fdout, &ostream, &iframe, o_yuv); if (err != Y4M_OK) { mjpeg_error("y4m_write_frame err at frame %d", frameno); break; } } y4m_fini_frame_info(&iframe); y4m_fini_stream_info(&istream); y4m_fini_stream_info(&ostream); exit(0); }
static int generate_YUV4MPEG(parameters_t *param) { uint32_t frame; //size_t pngsize; char pngname[FILENAME_MAX]; uint8_t *yuv[3]; /* buffer for Y/U/V planes of decoded PNG */ y4m_stream_info_t streaminfo; y4m_frame_info_t frameinfo; if ((param->width % 2) == 0) param->new_width = param->width; else { param->new_width = ((param->width >> 1) + 1) << 1; printf("Setting new, even image width %d", param->new_width); } mjpeg_info("Now generating YUV4MPEG stream."); y4m_init_stream_info(&streaminfo); y4m_init_frame_info(&frameinfo); y4m_si_set_width(&streaminfo, param->new_width); y4m_si_set_height(&streaminfo, param->height); y4m_si_set_interlace(&streaminfo, param->interlace); y4m_si_set_framerate(&streaminfo, param->framerate); y4m_si_set_chroma(&streaminfo, param->ss_mode); yuv[0] = (uint8_t *)malloc(param->new_width * param->height * sizeof(yuv[0][0])); yuv[1] = (uint8_t *)malloc(param->new_width * param->height * sizeof(yuv[1][0])); yuv[2] = (uint8_t *)malloc(param->new_width * param->height * sizeof(yuv[2][0])); y4m_write_stream_header(STDOUT_FILENO, &streaminfo); for (frame = param->begin; (frame < param->numframes + param->begin) || (param->numframes == -1); frame++) { // if (frame < 25) // else //snprintf(pngname, sizeof(pngname), param->pngformatstr, frame - 25); snprintf(pngname, sizeof(pngname), param->pngformatstr, frame); raw0 = yuv[0]; raw1 = yuv[1]; raw2 = yuv[2]; if (decode_png(pngname, 1, param) == -1) { mjpeg_info("Read from '%s' failed: %s", pngname, strerror(errno)); if (param->numframes == -1) { mjpeg_info("No more frames. Stopping."); break; /* we are done; leave 'while' loop */ } else { mjpeg_info("Rewriting latest frame instead."); } } else { #if 0 mjpeg_debug("Preparing frame"); /* Now open this PNG file, and examine its header to retrieve the YUV4MPEG info that shall be written */ if ((param->interlace == Y4M_ILACE_NONE) || (param->interleave == 1)) { mjpeg_info("Processing non-interlaced/interleaved %s.", pngname, pngsize); decode_png(imagedata, 0, 420, yuv[0], yuv[1], yuv[2], param->width, param->height, param->new_width); #if 0 if (param->make_z_alpha) { mjpeg_info("Writing Z/Alpha data.\n"); za_write(real_z_imagemap, param->width, param->height,z_alpha_fp,frame); } #endif } else { mjpeg_error_exit1("Can't handle interlaced PNG information (yet) since there is no standard for it.\n" "Use interleaved mode (-L option) to create interlaced material."); switch (param->interlace) { case Y4M_ILACE_TOP_FIRST: mjpeg_info("Processing interlaced, top-first %s", pngname); #if 0 decode_jpeg_raw(jpegdata, jpegsize, Y4M_ILACE_TOP_FIRST, 420, param->width, param->height, yuv[0], yuv[1], yuv[2]); #endif break; case Y4M_ILACE_BOTTOM_FIRST: mjpeg_info("Processing interlaced, bottom-first %s", pngname); #if 0 decode_jpeg_raw(jpegdata, jpegsize, Y4M_ILACE_BOTTOM_FIRST, 420, param->width, param->height, yuv[0], yuv[1], yuv[2]); #endif break; default: mjpeg_error_exit1("FATAL logic error?!?"); break; } } #endif mjpeg_debug("Converting frame to YUV format."); /* Transform colorspace, then subsample (in place) */ convert_RGB_to_YCbCr(yuv, param->height * param->new_width); chroma_subsample(param->ss_mode, yuv, param->new_width, param->height); mjpeg_debug("Frame decoded, now writing to output stream."); } mjpeg_debug("Frame decoded, now writing to output stream."); y4m_write_frame(STDOUT_FILENO, &streaminfo, &frameinfo, yuv); } #if 0 if (param->make_z_alpha) { za_write_end(z_alpha_fp); fclose(z_alpha_fp); } #endif y4m_fini_stream_info(&streaminfo); y4m_fini_frame_info(&frameinfo); free(yuv[0]); free(yuv[1]); free(yuv[2]); return 0; }
int main(int argc, char *argv[]) { int i; long long avg, total; int input_fd = 0; int output_fd = 1; int horz; int vert; int c; int frame_count; y4m_stream_info_t istream, ostream; y4m_frame_info_t iframe; y4m_accept_extensions(1); while((c = getopt(argc, argv, "r:R:t:T:v:S:hI:w:fc:")) != EOF) { switch(c) { case 'r': radius_luma = atoi(optarg); break; case 'R': radius_chroma = atoi(optarg); break; case 't': threshold_luma = atoi(optarg); break; case 'T': threshold_chroma = atoi(optarg); break; case 'I': interlace = atoi (optarg); if (interlace != 0 && interlace != 1) { Usage (argv[0]); exit (1); } break; case 'S': param_skip = atoi (optarg); break; case 'f': param_fast = 1; break; case 'w': if (strcmp (optarg, "8") == 0) param_weight_type = 1; else if (strcmp (optarg, "2.667") == 0) param_weight_type = 2; else if (strcmp (optarg, "13.333") == 0) param_weight_type = 3; else if (strcmp (optarg, "24") == 0) param_weight_type = 4; else param_weight_type = 0; param_weight = atof (optarg); break; case 'c': cutoff = atof(optarg); break; case 'v': verbose = atoi (optarg); if (verbose < 0 || verbose >2) { Usage (argv[0]); exit (1); } break; case 'h': Usage (argv[0]); default: exit(0); } } if( param_weight < 0 ) { if( param_fast ) param_weight = 8.0; else param_weight = 1.0; } for( i=1; i<NUMAVG; i++ ) { avg_replace[i]=0; divisor[i]=((1<<DIVISORBITS)+(i>>1))/i; divoffset[i]=divisor[i]*(i>>1)+(divisor[i]>>1); } #ifdef HAVE_ASM_MMX if( cpu_accel() & ACCEL_X86_MMXEXT ) domean8=1; #endif mjpeg_info ("fast %d, weight type %d\n", param_fast, param_weight_type); if (radius_luma <= 0 || radius_chroma <= 0) mjpeg_error_exit1("radius values must be > 0!"); if (threshold_luma < 0 || threshold_chroma < 0) mjpeg_error_exit1("threshold values must be >= 0!"); (void)mjpeg_default_handler_verbosity(verbose); y4m_init_stream_info(&istream); y4m_init_stream_info(&ostream); y4m_init_frame_info(&iframe); i = y4m_read_stream_header(input_fd, &istream); if (i != Y4M_OK) mjpeg_error_exit1("Input stream error: %s", y4m_strerr(i)); if (y4m_si_get_plane_count(&istream) != 3) mjpeg_error_exit1("Only 3 plane formats supported"); chroma_mode = y4m_si_get_chroma(&istream); SS_H = y4m_chroma_ss_x_ratio(chroma_mode).d; SS_V = y4m_chroma_ss_y_ratio(chroma_mode).d; mjpeg_debug("chroma subsampling: %dH %dV\n",SS_H,SS_V); if (interlace == -1) { i = y4m_si_get_interlace(&istream); switch (i) { case Y4M_ILACE_NONE: interlace = 0; break; case Y4M_ILACE_BOTTOM_FIRST: case Y4M_ILACE_TOP_FIRST: interlace = 1; break; default: mjpeg_warn("Unknown interlacing '%d', assuming non-interlaced", i); interlace = 0; break; } } if( interlace && y4m_si_get_height(&istream) % 2 != 0 ) mjpeg_error_exit1("Input images have odd number of lines - can't treats as interlaced!" ); horz = y4m_si_get_width(&istream); vert = y4m_si_get_height(&istream); mjpeg_debug("width=%d height=%d luma_r=%d chroma_r=%d luma_t=%d chroma_t=%d", horz, vert, radius_luma, radius_chroma, threshold_luma, threshold_chroma); y4m_copy_stream_info(&ostream, &istream); input_frame[0] = malloc(horz * vert); input_frame[1] = malloc((horz / SS_H) * (vert / SS_V)); input_frame[2] = malloc((horz / SS_H) * (vert / SS_V)); output_frame[0] = malloc(horz * vert); output_frame[1] = malloc((horz / SS_H) * (vert / SS_V)); output_frame[2] = malloc((horz / SS_H) * (vert / SS_V)); y4m_write_stream_header(output_fd, &ostream); frame_count = 0; while (y4m_read_frame(input_fd, &istream, &iframe, input_frame) == Y4M_OK) { frame_count++; if (frame_count > param_skip) { filter(horz, vert, input_frame, output_frame); y4m_write_frame(output_fd, &ostream, &iframe, output_frame); } else y4m_write_frame(output_fd, &ostream, &iframe, input_frame); } for (total=0, avg=0, i=0; i < NUMAVG; i++) { total += avg_replace[i]; avg += avg_replace[i] * i; } mjpeg_info("frames=%d avg=%3.1f", frame_count, ((double)avg)/((double)total)); for (i=0; i < NUMAVG; i++) { mjpeg_debug( "%02d: %6.2f", i, (((double)avg_replace[i]) * 100.0)/(double)(total)); } y4m_fini_stream_info(&istream); y4m_fini_stream_info(&ostream); y4m_fini_frame_info(&iframe); exit(0); }
int main (int argc, char *argv[]) { int verbose = 1; int in_fd = 0; /* stdin */ int out_fd = 1; /* stdout */ unsigned char *yuv0[3]; /* input 0 */ unsigned char *yuv1[3]; /* input 1 */ unsigned char *yuv[3]; /* output */ int w, h, len, lensr2; int i, j, opacity, opacity_range, frame, numframes, r = 0; unsigned int param_opacity0 = 0; /* opacity of input1 at the beginning */ unsigned int param_opacity1 = 255; /* opacity of input1 at the end */ unsigned int param_duration = 0; /* duration of transistion effect */ unsigned int param_skipframes = 0; /* # of frames to skip */ unsigned int param_numframes = 0; /* # of frames to (process - skip+num) * framerepeat <= duration */ unsigned int param_framerep = 1; /* # of repititions per frame */ y4m_stream_info_t streaminfo; y4m_frame_info_t frameinfo; y4m_init_stream_info (&streaminfo); y4m_init_frame_info (&frameinfo); while ((i = getopt(argc, argv, "v:o:O:d:s:n:r:")) != -1) { switch (i) { case 'v': verbose = atoi (optarg); if( verbose < 0 || verbose >2 ) { usage (); exit (1); } break; case 'o': param_opacity0 = atoi (optarg); if (param_opacity0 > 255) { mjpeg_warn( "start opacity > 255"); param_opacity0 = 255; } break; case 'O': param_opacity1 = atoi (optarg); if (param_opacity1 > 255) { mjpeg_warn( "end opacity > 255"); param_opacity1 = 255; } break; case 'd': param_duration = atoi (optarg); if (param_duration == 0) { mjpeg_error_exit1( "error: duration = 0 frames"); } break; case 's': param_skipframes = atoi (optarg); break; case 'n': param_numframes = atoi (optarg); break; case 'r': param_framerep = atoi (optarg); break; } } if (param_numframes == 0) param_numframes = (param_duration - param_skipframes) / param_framerep; if (param_duration == 0) { usage (); exit (1); } numframes = (param_skipframes + param_numframes) * param_framerep; if (numframes > param_duration) { mjpeg_error_exit1( "skip + num > duration"); } (void)mjpeg_default_handler_verbosity(verbose); i = y4m_read_stream_header (in_fd, &streaminfo); if (i != Y4M_OK) { fprintf (stderr, "%s: input stream error - %s\n", argv[0], y4m_strerr(i)); exit (1); } w = y4m_si_get_width(&streaminfo); h = y4m_si_get_height(&streaminfo); len = w*h; lensr2 = len >> 2; yuv[0] = malloc (len); yuv0[0] = malloc (len); yuv1[0] = malloc (len); yuv[1] = malloc (lensr2); yuv0[1] = malloc (lensr2); yuv1[1] = malloc (lensr2); yuv[2] = malloc (lensr2); yuv0[2] = malloc (lensr2); yuv1[2] = malloc (lensr2); y4m_write_stream_header (out_fd, &streaminfo); frame = param_skipframes; param_duration--; opacity_range = param_opacity1 - param_opacity0; while (1) { if (!r) { r = param_framerep; i = y4m_read_frame(in_fd, &streaminfo, &frameinfo, yuv0); if (i != Y4M_OK) exit (frame < numframes); j = y4m_read_frame(in_fd, &streaminfo, &frameinfo, yuv1); if (j != Y4M_OK) exit (frame < numframes); } r--; opacity = param_opacity0 + ((frame * opacity_range) / param_duration); blend (yuv0, yuv1, opacity, len, yuv); y4m_write_frame (out_fd, &streaminfo, &frameinfo, yuv); if (++frame == numframes) exit (0); } }
/** MAIN */ int main( int argc, char **argv) { int i, frame_count; int horz, vert; /* width and height of the frame */ uint8_t *frame[3]; /*pointer to the 3 color planes of the input frame */ struct area_s inarea; struct color_yuv coloryuv; int input_fd = 0; /* std in */ int output_fd = 1; /* std out */ int darker = 0; /* how much darker should the image be */ int copy_pixel = 0; /* how much pixels we should use for filling up the area */ int average_pixel = 0; /* how much pixel to use for average */ y4m_stream_info_t istream, ostream; y4m_frame_info_t iframe; inarea.width=0; inarea.height=0; inarea.voffset=0; inarea.hoffset=0; coloryuv.luma = LUMA; /*Setting the luma to black */ coloryuv.chroma_b = CHROMA; /*Setting the chroma to center, means white */ coloryuv.chroma_r = CHROMA; /*Setting the chroma to center, means white */ (void)mjpeg_default_handler_verbosity(verbose); /* processing commandline */ process_commandline(argc, argv, &inarea, &darker, ©_pixel, &coloryuv, &average_pixel); y4m_init_stream_info(&istream); y4m_init_stream_info(&ostream); y4m_init_frame_info(&iframe); /* First read the header of the y4m stream */ i = y4m_read_stream_header(input_fd, &istream); if ( i != Y4M_OK) /* a basic check if we really have y4m stream */ mjpeg_error_exit1("Input stream error: %s", y4m_strerr(i)); else { /* Here we copy the input stream info to the output stream info header */ y4m_copy_stream_info(&ostream, &istream); /* Here we write the new output header to the output fd */ y4m_write_stream_header(output_fd, &ostream); horz = y4m_si_get_width(&istream); /* get the width of the frame */ vert = y4m_si_get_height(&istream); /* get the height of the frame */ if ( (inarea.width + inarea.hoffset) > horz) mjpeg_error_exit1("Input width and offset larger than framewidth,exit"); if ( (inarea.height + inarea.voffset) > vert) mjpeg_error_exit1("Input height and offset larger than frameheight,exit"); /* Here we allocate the memory for on frame */ frame[0] = malloc( horz * vert ); frame[1] = malloc( (horz/2) * (vert/2) ); frame[2] = malloc( (horz/2) * (vert/2) ); /* Here we set the initial number of of frames */ /* We do not need it. Just for showing that is does something */ frame_count = 0 ; /* This is the main loop here can filters effects, scaling and so on be done with the video frames. Just up to your mind */ /* We read now a single frame with the header and check if it does not have any problems or we have alreaddy processed the last without data */ while(y4m_read_frame(input_fd, &istream, &iframe, frame) == Y4M_OK) { frame_count++; /* You can do something usefull here */ if (darker != 0) set_darker(inarea, horz, vert, frame, darker); else if (copy_pixel != 0) copy_area(inarea, horz, vert, frame, copy_pixel); else if (average_pixel != 0) average_area(inarea, horz, vert, frame, average_pixel); else set_inactive(inarea, horz, vert, frame, &coloryuv); /* Now we put out the read frame */ y4m_write_frame(output_fd, &ostream, &iframe, frame); } /* Cleaning up the data structures */ y4m_fini_stream_info(&istream); y4m_fini_stream_info(&ostream); y4m_fini_frame_info(&iframe); } /* giving back the memory to the system */ free(frame[0]); frame[0] = 0; free(frame[1]); frame[1] = 0; free(frame[2]); frame[2] = 0; exit(0); /* exiting */ }