static int yw_stop(TCModuleInstance *self) { YWPrivateData *pd = NULL; int verr, aerr; TC_MODULE_SELF_CHECK(self, "stop"); pd = self->userdata; if (pd->fd_vid != -1) { verr = close(pd->fd_vid); if (verr) { tc_log_error(MOD_NAME, "closing video file: %s", strerror(errno)); return TC_ERROR; } y4m_fini_frame_info(&pd->frameinfo); y4m_fini_stream_info(&(pd->streaminfo)); pd->fd_vid = -1; } if (pd->wav != NULL) { aerr = wav_close(pd->wav); if (aerr != 0) { tc_log_error(MOD_NAME, "closing audio file: %s", wav_strerror(wav_last_error(pd->wav))); return TC_ERROR; } pd->wav = NULL; } return TC_OK; }
static int y4m_reread_stream_header_line_cb(y4m_cb_reader_t *fd,const y4m_stream_info_t *si,char *line,int n) { y4m_stream_info_t i; int err=y4m_read_stream_header_line_cb(fd,&i,line,n); if( err==Y4M_OK && y4m_compare_stream_info(si,&i) ) err=Y4M_ERR_HEADER; y4m_fini_stream_info(&i); return err; }
/* errors not fatal (silently ignored) */ static int yw_close_video(YWPrivateData *pd) { if (pd->fd_vid != -1) { y4m_fini_frame_info(&pd->frameinfo); y4m_fini_stream_info(&pd->streaminfo); close(pd->fd_vid); pd->fd_vid = -1; } return(TC_IMPORT_OK); }
static void demux_close_y4m(demuxer_t *demuxer) { y4m_priv_t* priv = demuxer->priv; if(!priv) return; if (!priv->is_older) y4m_fini_stream_info(((y4m_priv_t*)demuxer->priv)->si); free(((y4m_priv_t*)demuxer->priv)->si); free(demuxer->priv); return; }
void lives_yuv_stream_stop_read(lives_yuv4m_t *yuv4mpeg) { y4m_fini_stream_info(&(yuv4mpeg->streaminfo)); y4m_fini_frame_info(&(yuv4mpeg->frameinfo)); yuv4mpeg->sar = y4m_sar_UNKNOWN; yuv4mpeg->dar = y4m_dar_4_3; if (yuv4mpeg->fd!=-1) close(yuv4mpeg->fd); if (yuv4mpeg->filename!=NULL) { unlink(yuv4mpeg->filename); lives_free(yuv4mpeg->filename); } if (yuv4mpeg->name!=NULL) lives_free(yuv4mpeg->name); if (yuv4mpeg->type==YUV4_TYPE_FW) fw_cards=lives_list_remove(fw_cards,LIVES_INT_TO_POINTER(yuv4mpeg->cardno)); if (yuv4mpeg->type==YUV4_TYPE_TV) mainw->videodevs=lives_list_remove(mainw->videodevs,LIVES_INT_TO_POINTER(yuv4mpeg->cardno)); }
static int tc_y4m_stop(TCModuleInstance *self) { Y4MPrivateData *pd = NULL; TC_MODULE_SELF_CHECK(self, "stop"); pd = self->userdata; if (pd->fd_vid != -1) { int err = close(pd->fd_vid); if (err) { tc_log_error(MOD_NAME, "closing video file: %s", strerror(errno)); return TC_ERROR; } y4m_fini_frame_info(&pd->frameinfo); y4m_fini_stream_info(&(pd->streaminfo)); pd->fd_vid = -1; } return TC_OK; }
int main(int argc, char **argv) { int fdin, fdout, err, c, i, verbose = 1; y4m_stream_info_t istream, ostream; y4m_frame_info_t iframe; fdin = fileno(stdin); fdout = fileno(stdout); y4m_accept_extensions(1); y4m_init_stream_info(&istream); y4m_init_frame_info(&iframe); while ((c = getopt(argc, argv, "L:C:hv:N")) != EOF) { switch (c) { case 'N': lowuv = lowy = 0; lowuv = highy = 255; break; case 'L': i = sscanf(optarg, "%lf,%lf,%d", &y_radius, &y_amount, &y_threshold); if (i != 3) { mjpeg_error("-L r,a,t"); usage(argv[0]); } break; case 'C': i = sscanf(optarg, "%lf,%lf,%d", &uv_radius, &uv_amount, &uv_threshold); if (i != 3) { mjpeg_error("-C r,a,t"); usage(argv[0]); } break; case 'v': verbose = atoi(optarg); if (verbose < 0 || verbose > 2) mjpeg_error_exit1("-v 0|1|2"); break; case 'h': default: usage(argv[0]); break; } } if (isatty(fdout)) mjpeg_error_exit1("stdout must not be a terminal"); mjpeg_default_handler_verbosity(verbose); err = y4m_read_stream_header(fdin, &istream); if (err != Y4M_OK) mjpeg_error_exit1("Couldn't read input stream header"); switch (y4m_si_get_interlace(&istream)) { case Y4M_ILACE_NONE: interlaced = 0; break; case Y4M_ILACE_BOTTOM_FIRST: case Y4M_ILACE_TOP_FIRST: interlaced = 1; break; default: mjpeg_error_exit1("Unsupported/unknown interlacing"); } if (y4m_si_get_plane_count(&istream) != 3) mjpeg_error_exit1("Only 3 plane formats supported"); yheight = y4m_si_get_plane_height(&istream, 0); uvheight = y4m_si_get_plane_height(&istream, 1); ywidth = y4m_si_get_plane_width(&istream, 0); uvwidth = y4m_si_get_plane_width(&istream, 1); ylen = y4m_si_get_plane_length(&istream, 0); uvlen = y4m_si_get_plane_length(&istream, 1); /* Input and output frame buffers */ i_yuv[0] = (u_char *)malloc(ylen); i_yuv[1] = (u_char *)malloc(uvlen); i_yuv[2] = (u_char *)malloc(uvlen); o_yuv[0] = (u_char *)malloc(ylen); o_yuv[1] = (u_char *)malloc(uvlen); o_yuv[2] = (u_char *)malloc(uvlen); /* * general purpose row/column scratch buffers. Slightly over allocated to * simplify life. */ cur_col = (u_char *)malloc(MAX(ywidth, yheight)); dest_col = (u_char *)malloc(MAX(ywidth, yheight)); cur_row = (u_char *)malloc(MAX(ywidth, yheight)); dest_row = (u_char *)malloc(MAX(ywidth, yheight)); /* * Generate the convolution matrices. The generation routine allocates the * memory and returns the length. */ cmatrix_y_len = gen_convolve_matrix(y_radius, &cmatrix_y); cmatrix_uv_len = gen_convolve_matrix(uv_radius, &cmatrix_uv); ctable_y = gen_lookup_table(cmatrix_y, cmatrix_y_len); ctable_uv = gen_lookup_table(cmatrix_uv, cmatrix_uv_len); y4m_init_stream_info(&ostream); y4m_copy_stream_info(&ostream, &istream); y4m_write_stream_header(fileno(stdout), &ostream); mjpeg_info("Luma radius: %f", y_radius); mjpeg_info("Luma amount: %f", y_amount); mjpeg_info("Luma threshold: %d", y_threshold); if (uv_radius != -1.0) { mjpeg_info("Chroma radius: %f", uv_radius); mjpeg_info("Chroma amount: %f", uv_amount); mjpeg_info("Chroma threshold: %d", uv_threshold); } for (frameno = 0; y4m_read_frame(fdin, &istream, &iframe, i_yuv) == Y4M_OK; frameno++) { y4munsharp(); err = y4m_write_frame(fdout, &ostream, &iframe, o_yuv); if (err != Y4M_OK) { mjpeg_error("y4m_write_frame err at frame %d", frameno); break; } } y4m_fini_frame_info(&iframe); y4m_fini_stream_info(&istream); y4m_fini_stream_info(&ostream); exit(0); }
void lives_yuv_stream_stop_write(lives_yuv4m_t *yuv4mpeg) { y4m_fini_stream_info(&(yuv4mpeg->streaminfo)); y4m_fini_frame_info(&(yuv4mpeg->frameinfo)); close(yuvout); }
int main(int argc, char *argv[]) { int i; long long avg, total; int input_fd = 0; int output_fd = 1; int horz; int vert; int c; int frame_count; y4m_stream_info_t istream, ostream; y4m_frame_info_t iframe; y4m_accept_extensions(1); while((c = getopt(argc, argv, "r:R:t:T:v:S:hI:w:fc:")) != EOF) { switch(c) { case 'r': radius_luma = atoi(optarg); break; case 'R': radius_chroma = atoi(optarg); break; case 't': threshold_luma = atoi(optarg); break; case 'T': threshold_chroma = atoi(optarg); break; case 'I': interlace = atoi (optarg); if (interlace != 0 && interlace != 1) { Usage (argv[0]); exit (1); } break; case 'S': param_skip = atoi (optarg); break; case 'f': param_fast = 1; break; case 'w': if (strcmp (optarg, "8") == 0) param_weight_type = 1; else if (strcmp (optarg, "2.667") == 0) param_weight_type = 2; else if (strcmp (optarg, "13.333") == 0) param_weight_type = 3; else if (strcmp (optarg, "24") == 0) param_weight_type = 4; else param_weight_type = 0; param_weight = atof (optarg); break; case 'c': cutoff = atof(optarg); break; case 'v': verbose = atoi (optarg); if (verbose < 0 || verbose >2) { Usage (argv[0]); exit (1); } break; case 'h': Usage (argv[0]); default: exit(0); } } if( param_weight < 0 ) { if( param_fast ) param_weight = 8.0; else param_weight = 1.0; } for( i=1; i<NUMAVG; i++ ) { avg_replace[i]=0; divisor[i]=((1<<DIVISORBITS)+(i>>1))/i; divoffset[i]=divisor[i]*(i>>1)+(divisor[i]>>1); } #ifdef HAVE_ASM_MMX if( cpu_accel() & ACCEL_X86_MMXEXT ) domean8=1; #endif mjpeg_info ("fast %d, weight type %d\n", param_fast, param_weight_type); if (radius_luma <= 0 || radius_chroma <= 0) mjpeg_error_exit1("radius values must be > 0!"); if (threshold_luma < 0 || threshold_chroma < 0) mjpeg_error_exit1("threshold values must be >= 0!"); (void)mjpeg_default_handler_verbosity(verbose); y4m_init_stream_info(&istream); y4m_init_stream_info(&ostream); y4m_init_frame_info(&iframe); i = y4m_read_stream_header(input_fd, &istream); if (i != Y4M_OK) mjpeg_error_exit1("Input stream error: %s", y4m_strerr(i)); if (y4m_si_get_plane_count(&istream) != 3) mjpeg_error_exit1("Only 3 plane formats supported"); chroma_mode = y4m_si_get_chroma(&istream); SS_H = y4m_chroma_ss_x_ratio(chroma_mode).d; SS_V = y4m_chroma_ss_y_ratio(chroma_mode).d; mjpeg_debug("chroma subsampling: %dH %dV\n",SS_H,SS_V); if (interlace == -1) { i = y4m_si_get_interlace(&istream); switch (i) { case Y4M_ILACE_NONE: interlace = 0; break; case Y4M_ILACE_BOTTOM_FIRST: case Y4M_ILACE_TOP_FIRST: interlace = 1; break; default: mjpeg_warn("Unknown interlacing '%d', assuming non-interlaced", i); interlace = 0; break; } } if( interlace && y4m_si_get_height(&istream) % 2 != 0 ) mjpeg_error_exit1("Input images have odd number of lines - can't treats as interlaced!" ); horz = y4m_si_get_width(&istream); vert = y4m_si_get_height(&istream); mjpeg_debug("width=%d height=%d luma_r=%d chroma_r=%d luma_t=%d chroma_t=%d", horz, vert, radius_luma, radius_chroma, threshold_luma, threshold_chroma); y4m_copy_stream_info(&ostream, &istream); input_frame[0] = malloc(horz * vert); input_frame[1] = malloc((horz / SS_H) * (vert / SS_V)); input_frame[2] = malloc((horz / SS_H) * (vert / SS_V)); output_frame[0] = malloc(horz * vert); output_frame[1] = malloc((horz / SS_H) * (vert / SS_V)); output_frame[2] = malloc((horz / SS_H) * (vert / SS_V)); y4m_write_stream_header(output_fd, &ostream); frame_count = 0; while (y4m_read_frame(input_fd, &istream, &iframe, input_frame) == Y4M_OK) { frame_count++; if (frame_count > param_skip) { filter(horz, vert, input_frame, output_frame); y4m_write_frame(output_fd, &ostream, &iframe, output_frame); } else y4m_write_frame(output_fd, &ostream, &iframe, input_frame); } for (total=0, avg=0, i=0; i < NUMAVG; i++) { total += avg_replace[i]; avg += avg_replace[i] * i; } mjpeg_info("frames=%d avg=%3.1f", frame_count, ((double)avg)/((double)total)); for (i=0; i < NUMAVG; i++) { mjpeg_debug( "%02d: %6.2f", i, (((double)avg_replace[i]) * 100.0)/(double)(total)); } y4m_fini_stream_info(&istream); y4m_fini_stream_info(&ostream); y4m_fini_frame_info(&iframe); exit(0); }
int main(int argc, char *argv[]) { int verbosity = 1; double time_between_frames = 0.0; double frame_rate = 0.0; struct timeval time_now; int n, frame; unsigned char *yuv[3]; int in_fd = 0; int screenwidth=0, screenheight=0; y4m_stream_info_t streaminfo; y4m_frame_info_t frameinfo; int frame_width; int frame_height; int wait_for_sync = 1; char *window_title = NULL; while ((n = getopt(argc, argv, "hs:t:f:cv:")) != EOF) { switch (n) { case 'c': wait_for_sync = 0; break; case 's': if (sscanf(optarg, "%dx%d", &screenwidth, &screenheight) != 2) { mjpeg_error_exit1( "-s option needs two arguments: -s 10x10"); exit(1); } break; case 't': window_title = optarg; break; case 'f': frame_rate = atof(optarg); if( frame_rate <= 0.0 || frame_rate > 200.0 ) mjpeg_error_exit1( "-f option needs argument > 0.0 and < 200.0"); break; case 'v': verbosity = atoi(optarg); if ((verbosity < 0) || (verbosity > 2)) mjpeg_error_exit1("-v needs argument from {0, 1, 2} (not %d)", verbosity); break; case 'h': case '?': usage(); exit(1); break; default: usage(); exit(1); } } mjpeg_default_handler_verbosity(verbosity); y4m_accept_extensions(1); y4m_init_stream_info(&streaminfo); y4m_init_frame_info(&frameinfo); if ((n = y4m_read_stream_header(in_fd, &streaminfo)) != Y4M_OK) { mjpeg_error("Couldn't read YUV4MPEG2 header: %s!", y4m_strerr(n)); exit (1); } switch (y4m_si_get_chroma(&streaminfo)) { case Y4M_CHROMA_420JPEG: case Y4M_CHROMA_420MPEG2: case Y4M_CHROMA_420PALDV: break; default: mjpeg_error_exit1("Cannot handle non-4:2:0 streams yet!"); } frame_width = y4m_si_get_width(&streaminfo); frame_height = y4m_si_get_height(&streaminfo); if ((screenwidth <= 0) || (screenheight <= 0)) { /* no user supplied screen size, so let's use the stream info */ y4m_ratio_t aspect = y4m_si_get_sampleaspect(&streaminfo); if (!(Y4M_RATIO_EQL(aspect, y4m_sar_UNKNOWN))) { /* if pixel aspect ratio present, use it */ #if 1 /* scale width, but maintain height (line count) */ screenheight = frame_height; screenwidth = frame_width * aspect.n / aspect.d; #else if ((frame_width * aspect.d) < (frame_height * aspect.n)) { screenwidth = frame_width; screenheight = frame_width * aspect.d / aspect.n; } else { screenheight = frame_height; screenwidth = frame_height * aspect.n / aspect.d; } #endif } else { /* unknown aspect ratio -- assume square pixels */ screenwidth = frame_width; screenheight = frame_height; } } /* Initialize the SDL library */ if( SDL_Init(SDL_INIT_VIDEO) < 0 ) { mjpeg_error("Couldn't initialize SDL: %s", SDL_GetError()); exit(1); } /* set window title */ SDL_WM_SetCaption(window_title, NULL); /* yuv params */ yuv[0] = malloc(frame_width * frame_height * sizeof(unsigned char)); yuv[1] = malloc(frame_width * frame_height / 4 * sizeof(unsigned char)); yuv[2] = malloc(frame_width * frame_height / 4 * sizeof(unsigned char)); screen = SDL_SetVideoMode(screenwidth, screenheight, 0, SDL_SWSURFACE); if ( screen == NULL ) { mjpeg_error("SDL: Couldn't set %dx%d: %s", screenwidth, screenheight, SDL_GetError()); exit(1); } else { mjpeg_debug("SDL: Set %dx%d @ %d bpp", screenwidth, screenheight, screen->format->BitsPerPixel); } /* since IYUV ordering is not supported by Xv accel on maddog's system * (Matrox G400 --- although, the alias I420 is, but this is not * recognized by SDL), we use YV12 instead, which is identical, * except for ordering of Cb and Cr planes... * we swap those when we copy the data to the display buffer... */ yuv_overlay = SDL_CreateYUVOverlay(frame_width, frame_height, SDL_YV12_OVERLAY, screen); if ( yuv_overlay == NULL ) { mjpeg_error("SDL: Couldn't create SDL_yuv_overlay: %s", SDL_GetError()); exit(1); } if ( yuv_overlay->hw_overlay ) mjpeg_debug("SDL: Using hardware overlay."); rect.x = 0; rect.y = 0; rect.w = screenwidth; rect.h = screenheight; SDL_DisplayYUVOverlay(yuv_overlay, &rect); signal (SIGINT, sigint_handler); frame = 0; if ( frame_rate == 0.0 ) { /* frame rate has not been set from command-line... */ if (Y4M_RATIO_EQL(y4m_fps_UNKNOWN, y4m_si_get_framerate(&streaminfo))) { mjpeg_info("Frame-rate undefined in stream... assuming 25Hz!" ); frame_rate = 25.0; } else { frame_rate = Y4M_RATIO_DBL(y4m_si_get_framerate(&streaminfo)); } } time_between_frames = 1.e6 / frame_rate; gettimeofday(&time_now,0); while ((n = y4m_read_frame(in_fd, &streaminfo, &frameinfo, yuv)) == Y4M_OK && (!got_sigint)) { /* Lock SDL_yuv_overlay */ if ( SDL_MUSTLOCK(screen) ) { if ( SDL_LockSurface(screen) < 0 ) break; } if (SDL_LockYUVOverlay(yuv_overlay) < 0) break; /* let's draw the data (*yuv[3]) on a SDL screen (*screen) */ memcpy(yuv_overlay->pixels[0], yuv[0], frame_width * frame_height); memcpy(yuv_overlay->pixels[1], yuv[2], frame_width * frame_height / 4); memcpy(yuv_overlay->pixels[2], yuv[1], frame_width * frame_height / 4); /* Unlock SDL_yuv_overlay */ if ( SDL_MUSTLOCK(screen) ) { SDL_UnlockSurface(screen); } SDL_UnlockYUVOverlay(yuv_overlay); /* Show, baby, show! */ SDL_DisplayYUVOverlay(yuv_overlay, &rect); mjpeg_info("Playing frame %4.4d - %s", frame, print_status(frame, frame_rate)); if (wait_for_sync) while(get_time_diff(time_now) < time_between_frames) { usleep(1000); } frame++; gettimeofday(&time_now,0); } if ((n != Y4M_OK) && (n != Y4M_ERR_EOF)) mjpeg_error("Couldn't read frame: %s", y4m_strerr(n)); for (n=0; n<3; n++) { free(yuv[n]); } mjpeg_info("Played %4.4d frames (%s)", frame, print_status(frame, frame_rate)); SDL_FreeYUVOverlay(yuv_overlay); SDL_Quit(); y4m_fini_frame_info(&frameinfo); y4m_fini_stream_info(&streaminfo); return 0; }
int main(int argc, char **argv) { int i, c, interlace, frames, err; int ywidth, yheight, uvwidth, uvheight, ylen, uvlen; int verbose = 0, fdin; int NlumaX = 4, NlumaY = 4, NchromaX = 4, NchromaY = 4; float BWlumaX = 0.8, BWlumaY = 0.8, BWchromaX = 0.7, BWchromaY = 0.7; struct filter *lumaXtaps, *lumaYtaps, *chromaXtaps, *chromaYtaps; u_char *yuvinout[3]; float *yuvtmp1,*yuvtmp2; y4m_stream_info_t istream, ostream; y4m_frame_info_t iframe; fdin = fileno(stdin); y4m_accept_extensions(1); /* read command line */ opterr = 0; while ((c = getopt(argc, argv, "hvL:C:x:X:y:Y:")) != EOF) { switch (c) { case 'L': sscanf(optarg,"%d,%f,%d,%f",&NlumaX,&BWlumaX,&NlumaY,&BWlumaY); break; case 'C': sscanf(optarg,"%d,%f,%d,%f",&NchromaX,&BWchromaX,&NchromaY,&BWchromaY); break; case 'x': sscanf(optarg,"%d,%f",&NchromaX,&BWchromaX); break; case 'X': sscanf(optarg,"%d,%f",&NlumaX,&BWlumaX); break; case 'y': sscanf(optarg,"%d,%f",&NchromaY,&BWchromaY); break; case 'Y': sscanf(optarg,"%d,%f",&NlumaY,&BWlumaY); break; case 'v': verbose++; break; case '?': case 'h': default: usage(); } } if (BWlumaX <= 0.0 || BWlumaX > 1.0) mjpeg_error_exit1("Horizontal luma bandwidth '%f' not >0 and <=1.0", BWlumaX); if (BWlumaY <= 0.0 || BWlumaY > 1.0) mjpeg_error_exit1("Vertical luma bandwidth '%f' not >0 and <=1.0", BWlumaY); if (BWchromaX <= 0.0 || BWchromaX > 1.0) mjpeg_error_exit1("Horizontal chroma bandwidth '%f' not >0 and <=1.0", BWchromaX); if (BWchromaY <= 0.0 || BWchromaY > 1.0) mjpeg_error_exit1("Vertical chroma bandwidth '%f' not >0 and <=1.0", BWchromaY); /* initialize input stream and check chroma subsampling and interlacing */ y4m_init_stream_info(&istream); y4m_init_frame_info(&iframe); err = y4m_read_stream_header(fdin, &istream); if (err != Y4M_OK) mjpeg_error_exit1("Input stream error: %s\n", y4m_strerr(err)); if (y4m_si_get_plane_count(&istream) != 3) mjpeg_error_exit1("Only the 3 plane formats supported"); i = y4m_si_get_interlace(&istream); switch (i) { case Y4M_ILACE_NONE: interlace = 0; break; case Y4M_ILACE_BOTTOM_FIRST: case Y4M_ILACE_TOP_FIRST: interlace = 1; break; default: mjpeg_warn("Unknown interlacing '%d', assuming non-interlaced", i); interlace = 0; break; } ywidth = y4m_si_get_width(&istream); /* plane 0 = Y */ yheight = y4m_si_get_height(&istream); ylen = ywidth * yheight; uvwidth = y4m_si_get_plane_width(&istream, 1); /* planes 1&2 = U+V */ uvheight = y4m_si_get_plane_height(&istream, 1); uvlen = y4m_si_get_plane_length(&istream, 1); /* initialize output stream */ y4m_init_stream_info(&ostream); y4m_copy_stream_info(&ostream, &istream); y4m_write_stream_header(fileno(stdout), &ostream); /* allocate input and output buffers */ yuvinout[0] = my_malloc(ylen*sizeof(u_char)); yuvinout[1] = my_malloc(uvlen*sizeof(u_char)); yuvinout[2] = my_malloc(uvlen*sizeof(u_char)); yuvtmp1 = my_malloc(MAX(ylen,uvlen)*sizeof(float)); yuvtmp2 = my_malloc(MAX(ylen,uvlen)*sizeof(float)); /* get filter taps */ lumaXtaps = get_coeff(NlumaX, BWlumaX); lumaYtaps = get_coeff(NlumaY, BWlumaY); chromaXtaps = get_coeff(NchromaX, BWchromaX); chromaYtaps = get_coeff(NchromaY, BWchromaY); set_accel(uvwidth,uvheight); if (verbose) y4m_log_stream_info(mjpeg_loglev_t("info"), "", &istream); /* main processing loop */ for (frames=0; y4m_read_frame(fdin,&istream,&iframe,yuvinout) == Y4M_OK; frames++) { if (verbose && ((frames % 100) == 0)) mjpeg_info("Frame %d\n", frames); convolveFrame(yuvinout[0],ywidth,yheight,interlace,lumaXtaps,lumaYtaps,yuvtmp1,yuvtmp2); convolveFrame(yuvinout[1],uvwidth,uvheight,interlace,chromaXtaps,chromaYtaps,yuvtmp1,yuvtmp2); convolveFrame(yuvinout[2],uvwidth,uvheight,interlace,chromaXtaps,chromaYtaps,yuvtmp1,yuvtmp2); y4m_write_frame(fileno(stdout), &ostream, &iframe, yuvinout); } /* clean up */ y4m_fini_frame_info(&iframe); y4m_fini_stream_info(&istream); y4m_fini_stream_info(&ostream); exit(0); }
// ************************************************************************************* // MAIN // ************************************************************************************* int main (int argc, char *argv[]) { int verbose = 1 ; // LOG_ERROR ? int drop_frames = 0; int fdIn = 0 ; int fdOut = 1 ; y4m_stream_info_t in_streaminfo,out_streaminfo; int src_interlacing = Y4M_UNKNOWN; y4m_ratio_t src_frame_rate; const static char *legal_flags = "v:m:s:n"; int max_shift = 0, search = 0; int noshift=0; int c; while ((c = getopt (argc, argv, legal_flags)) != -1) { switch (c) { case 'v': verbose = atoi (optarg); if (verbose < 0 || verbose > 2) mjpeg_error_exit1 ("Verbose level must be [0..2]"); break; case 'm': max_shift = atof(optarg); break; case 's': search = atof(optarg); break; case 'n': noshift=1; break; case '?': print_usage (argv); return 0 ; break; } } // mjpeg tools global initialisations mjpeg_default_handler_verbosity (verbose); // Initialize input streams y4m_init_stream_info (&in_streaminfo); y4m_init_stream_info (&out_streaminfo); // *************************************************************** // Get video stream informations (size, framerate, interlacing, aspect ratio). // The streaminfo structure is filled in // *************************************************************** // INPUT comes from stdin, we check for a correct file header if (y4m_read_stream_header (fdIn, &in_streaminfo) != Y4M_OK) mjpeg_error_exit1 ("Could'nt read YUV4MPEG header!"); src_frame_rate = y4m_si_get_framerate( &in_streaminfo ); y4m_copy_stream_info( &out_streaminfo, &in_streaminfo ); // Information output /* in that function we do all the important work */ if (!noshift) y4m_write_stream_header(fdOut,&out_streaminfo); process( fdIn,&in_streaminfo,fdOut,&out_streaminfo,max_shift,search,noshift); y4m_fini_stream_info (&in_streaminfo); y4m_fini_stream_info (&out_streaminfo); return 0; }
// ************************************************************************************* // MAIN // ************************************************************************************* int main (int argc, char *argv[]) { int verbose = 4; // LOG_ERROR ; int fdIn = 0 ; int fdOut = 1 ; y4m_stream_info_t in_streaminfo,out_streaminfo; const static char *legal_flags = "d:m:V:"; int c, *matrix,matlen; float divisor=0; while ((c = getopt (argc, argv, legal_flags)) != -1) { switch (c) { case 'V': verbose = atoi (optarg); if (verbose < 0 || verbose > 2) mjpeg_error_exit1 ("Verbose level must be [0..2]"); break; case 'd': divisor = atof(optarg); if (divisor == 0) { mjpeg_error_exit1 ("Divisor must not be 0"); } break; case 'm': // strlen should be longer than the matrix = (int *) malloc (sizeof(int) * strlen(optarg)); matlen = parse_matrix(optarg,matrix); if (matlen == 0) { mjpeg_error_exit1 ("Invalid matrix"); } break; case '?': print_usage (argv); return 0 ; break; } } if (divisor == 0) { divisor = sum_matrix(matrix,matlen); } if (divisor == 0) { mjpeg_warn("divisor defaulting to 1\n"); divisor = 1; } // mjpeg tools global initialisations mjpeg_default_handler_verbosity (verbose); // Initialize input streams y4m_init_stream_info (&in_streaminfo); y4m_init_stream_info (&out_streaminfo); // *************************************************************** // Get video stream informations (size, framerate, interlacing, aspect ratio). // The streaminfo structure is filled in // *************************************************************** // INPUT comes from stdin, we check for a correct file header if (y4m_read_stream_header (fdIn, &in_streaminfo) != Y4M_OK) mjpeg_error_exit1 ("Could'nt read YUV4MPEG header!"); y4m_ratio_t src_frame_rate = y4m_si_get_framerate( &in_streaminfo ); y4m_copy_stream_info( &out_streaminfo, &in_streaminfo ); // Information output mjpeg_info ("yuvconvolve (version " YUVRFPS_VERSION ") performs a convolution matrix on yuv streams"); mjpeg_info ("yuvconvolve -? for help"); y4m_write_stream_header(fdOut,&out_streaminfo); /* in that function we do all the important work */ fprintf (stderr,"matrix square: %d\n",matlen); convolve( fdIn,&in_streaminfo,fdOut,&out_streaminfo,matrix,divisor,matlen); y4m_fini_stream_info (&in_streaminfo); y4m_fini_stream_info (&out_streaminfo); return 0; }
int main(int argc, char **argv) { int c, err, ilace; int fd_in = fileno(stdin), fd_out = fileno(stdout); y4m_ratio_t rate; y4m_stream_info_t si, so; y4m_frame_info_t fi; uint8_t *top1[3], *bot1[3], *top2[3], *bot2[3]; opterr = 0; while ((c = getopt(argc, argv, "h")) != EOF) { switch (c) { case 'h': case '?': default: usage(); } } y4m_accept_extensions(1); y4m_init_stream_info(&si); y4m_init_stream_info(&so); y4m_init_frame_info(&fi); err = y4m_read_stream_header(fd_in, &si); if (err != Y4M_OK) mjpeg_error_exit1("Input stream error: %s\n", y4m_strerr(err)); if (y4m_si_get_plane_count(&si) != 3) mjpeg_error_exit1("only 3 plane formats supported"); rate = y4m_si_get_framerate(&si); if (!Y4M_RATIO_EQL(rate, y4m_fps_NTSC)) mjpeg_error_exit1("input stream not NTSC 30000:1001"); ilace = y4m_si_get_interlace(&si); if (ilace != Y4M_ILACE_BOTTOM_FIRST && ilace != Y4M_ILACE_TOP_FIRST) mjpeg_error_exit1("input stream not interlaced"); top1[0] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,0) / 2); top1[1] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,1) / 2); top1[2] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,2) / 2); bot1[0] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,0) / 2); bot1[1] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,1) / 2); bot1[2] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,2) / 2); top2[0] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,0) / 2); top2[1] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,1) / 2); top2[2] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,2) / 2); bot2[0] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,0) / 2); bot2[1] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,1) / 2); bot2[2] = (uint8_t *) malloc(y4m_si_get_plane_length(&si,2) / 2); y4m_copy_stream_info(&so, &si); y4m_si_set_framerate(&so, y4m_fps_NTSC_FILM); y4m_si_set_interlace(&so, Y4M_ILACE_NONE); /* * At this point the input stream has been verified to be interlaced NTSC, * the output stream rate set to NTSC_FILM, interlacing tag changed to * progressive, and the field buffers allocated. * * Time to write the output stream header and commence processing input. */ y4m_write_stream_header(fd_out, &so); while (1) { err = y4m_read_fields(fd_in, &si, &fi, top1, bot1); if (err != Y4M_OK) goto done; y4m_write_fields(fd_out, &so, &fi, top1, bot1); /* A */ err = y4m_read_fields(fd_in, &si, &fi, top1, bot1); if (err != Y4M_OK) goto done; y4m_write_fields(fd_out, &so, &fi, top1, bot1); /* B */ err = y4m_read_fields(fd_in, &si, &fi, top1, bot1); if (err != Y4M_OK) goto done; err = y4m_read_fields(fd_in, &si, &fi, top2, bot2); if (err != Y4M_OK) { /* * End of input when reading the 2nd "mixed field" frame (C+D). The previous * frame was the first "mixed field" frame (B+C). Rather than emit a mixed * interlaced frame duplicate a field and output the previous frame. */ if (ilace == Y4M_ILACE_BOTTOM_FIRST) y4m_write_fields(fd_out, &so, &fi, bot1,bot1); else y4m_write_fields(fd_out, &so, &fi, top1,top1); goto done; } /* * Now the key part of the processing - effectively discarding the first mixed * frame with fields from frames B + C and creating the C frame from the two * mixed frames. For a BOTTOM FIELD FIRST stream use the 'top' field from * frame 3 and the 'bottom' fields from frame 4. With a TOP FIELD FIRST stream * it's the other way around - use the 'bottom' field from frame 3 and the * 'top' field from frame 4. */ if (ilace == Y4M_ILACE_BOTTOM_FIRST) y4m_write_fields(fd_out, &so, &fi, top1, bot2); /* C */ else y4m_write_fields(fd_out, &so, &fi, top2, bot1); /* C */ err = y4m_read_fields(fd_in, &si, &fi, top1, bot1); y4m_write_fields(fd_out, &so, &fi, top1, bot1); /* D */ } done: y4m_fini_frame_info(&fi); y4m_fini_stream_info(&si); y4m_fini_stream_info(&so); exit(0); }
int main(int argc, char **argv) { int sts, c, width = 640, height = 480, noheader = 0; int Y = 16, U = 128, V = 128, chroma_mode = Y4M_CHROMA_420MPEG2; int numframes = 1, force = 0; y4m_ratio_t rate_ratio = y4m_fps_NTSC; y4m_ratio_t aspect_ratio = y4m_sar_SQUARE; int plane_length[3]; u_char *yuv[3]; y4m_stream_info_t ostream; y4m_frame_info_t oframe; char interlace = Y4M_ILACE_NONE; opterr = 0; y4m_accept_extensions(1); while ((c = getopt(argc, argv, "Hfx:w:h:r:i:a:Y:U:V:n:")) != EOF) { switch (c) { case 'H': noheader = 1; break; case 'a': sts = y4m_parse_ratio(&aspect_ratio, optarg); if (sts != Y4M_OK) mjpeg_error_exit1("Invalid aspect: %s", optarg); break; case 'w': width = atoi(optarg); break; case 'h': height = atoi(optarg); break; case 'r': sts = y4m_parse_ratio(&rate_ratio, optarg); if (sts != Y4M_OK) mjpeg_error_exit1("Invalid rate: %s", optarg); break; case 'Y': Y = atoi(optarg); break; case 'U': U = atoi(optarg); break; case 'V': V = atoi(optarg); break; case 'i': switch (optarg[0]) { case 'p': interlace = Y4M_ILACE_NONE; break; case 't': interlace = Y4M_ILACE_TOP_FIRST; break; case 'b': interlace = Y4M_ILACE_BOTTOM_FIRST; break; default: usage(); } break; case 'x': chroma_mode = y4m_chroma_parse_keyword(optarg); if (chroma_mode == Y4M_UNKNOWN) { if (strcmp(optarg, "help") != 0) mjpeg_error("Invalid -x arg '%s'", optarg); chroma_usage(); } break; case 'f': force = 1; break; case 'n': numframes = atoi(optarg); break; case '?': default: usage(); } } if (width <= 0) mjpeg_error_exit1("Invalid Width: %d", width); if (height <= 0) mjpeg_error_exit1("Invalid Height: %d", height); if (!force && (Y < 16 || Y > 235)) mjpeg_error_exit1("16 < Y < 235"); if (!force && (U < 16 || U > 240)) mjpeg_error_exit1("16 < U < 240"); if (!force && (V < 16 || V > 240)) mjpeg_error_exit1("16 < V < 240"); y4m_init_stream_info(&ostream); y4m_init_frame_info(&oframe); y4m_si_set_width(&ostream, width); y4m_si_set_height(&ostream, height); y4m_si_set_interlace(&ostream, interlace); y4m_si_set_framerate(&ostream, rate_ratio); y4m_si_set_sampleaspect(&ostream, aspect_ratio); y4m_si_set_chroma(&ostream, chroma_mode); if (y4m_si_get_plane_count(&ostream) != 3) mjpeg_error_exit1("Only the 3 plane formats supported"); plane_length[0] = y4m_si_get_plane_length(&ostream, 0); plane_length[1] = y4m_si_get_plane_length(&ostream, 1); plane_length[2] = y4m_si_get_plane_length(&ostream, 2); yuv[0] = malloc(plane_length[0]); yuv[1] = malloc(plane_length[1]); yuv[2] = malloc(plane_length[2]); /* * Now fill the array once with black but use the provided Y, U and V values */ memset(yuv[0], Y, plane_length[0]); memset(yuv[1], U, plane_length[1]); memset(yuv[2], V, plane_length[2]); if (noheader == 0) y4m_write_stream_header(fileno(stdout), &ostream); while (numframes--) y4m_write_frame(fileno(stdout), &ostream, &oframe, yuv); free(yuv[0]); free(yuv[1]); free(yuv[2]); y4m_fini_stream_info(&ostream); y4m_fini_frame_info(&oframe); exit(0); }
static int generate_YUV4MPEG(parameters_t *param) { uint32_t frame; //size_t pngsize; char pngname[FILENAME_MAX]; uint8_t *yuv[3]; /* buffer for Y/U/V planes of decoded PNG */ y4m_stream_info_t streaminfo; y4m_frame_info_t frameinfo; if ((param->width % 2) == 0) param->new_width = param->width; else { param->new_width = ((param->width >> 1) + 1) << 1; printf("Setting new, even image width %d", param->new_width); } mjpeg_info("Now generating YUV4MPEG stream."); y4m_init_stream_info(&streaminfo); y4m_init_frame_info(&frameinfo); y4m_si_set_width(&streaminfo, param->new_width); y4m_si_set_height(&streaminfo, param->height); y4m_si_set_interlace(&streaminfo, param->interlace); y4m_si_set_framerate(&streaminfo, param->framerate); y4m_si_set_chroma(&streaminfo, param->ss_mode); yuv[0] = (uint8_t *)malloc(param->new_width * param->height * sizeof(yuv[0][0])); yuv[1] = (uint8_t *)malloc(param->new_width * param->height * sizeof(yuv[1][0])); yuv[2] = (uint8_t *)malloc(param->new_width * param->height * sizeof(yuv[2][0])); y4m_write_stream_header(STDOUT_FILENO, &streaminfo); for (frame = param->begin; (frame < param->numframes + param->begin) || (param->numframes == -1); frame++) { // if (frame < 25) // else //snprintf(pngname, sizeof(pngname), param->pngformatstr, frame - 25); snprintf(pngname, sizeof(pngname), param->pngformatstr, frame); raw0 = yuv[0]; raw1 = yuv[1]; raw2 = yuv[2]; if (decode_png(pngname, 1, param) == -1) { mjpeg_info("Read from '%s' failed: %s", pngname, strerror(errno)); if (param->numframes == -1) { mjpeg_info("No more frames. Stopping."); break; /* we are done; leave 'while' loop */ } else { mjpeg_info("Rewriting latest frame instead."); } } else { #if 0 mjpeg_debug("Preparing frame"); /* Now open this PNG file, and examine its header to retrieve the YUV4MPEG info that shall be written */ if ((param->interlace == Y4M_ILACE_NONE) || (param->interleave == 1)) { mjpeg_info("Processing non-interlaced/interleaved %s.", pngname, pngsize); decode_png(imagedata, 0, 420, yuv[0], yuv[1], yuv[2], param->width, param->height, param->new_width); #if 0 if (param->make_z_alpha) { mjpeg_info("Writing Z/Alpha data.\n"); za_write(real_z_imagemap, param->width, param->height,z_alpha_fp,frame); } #endif } else { mjpeg_error_exit1("Can't handle interlaced PNG information (yet) since there is no standard for it.\n" "Use interleaved mode (-L option) to create interlaced material."); switch (param->interlace) { case Y4M_ILACE_TOP_FIRST: mjpeg_info("Processing interlaced, top-first %s", pngname); #if 0 decode_jpeg_raw(jpegdata, jpegsize, Y4M_ILACE_TOP_FIRST, 420, param->width, param->height, yuv[0], yuv[1], yuv[2]); #endif break; case Y4M_ILACE_BOTTOM_FIRST: mjpeg_info("Processing interlaced, bottom-first %s", pngname); #if 0 decode_jpeg_raw(jpegdata, jpegsize, Y4M_ILACE_BOTTOM_FIRST, 420, param->width, param->height, yuv[0], yuv[1], yuv[2]); #endif break; default: mjpeg_error_exit1("FATAL logic error?!?"); break; } } #endif mjpeg_debug("Converting frame to YUV format."); /* Transform colorspace, then subsample (in place) */ convert_RGB_to_YCbCr(yuv, param->height * param->new_width); chroma_subsample(param->ss_mode, yuv, param->new_width, param->height); mjpeg_debug("Frame decoded, now writing to output stream."); } mjpeg_debug("Frame decoded, now writing to output stream."); y4m_write_frame(STDOUT_FILENO, &streaminfo, &frameinfo, yuv); } #if 0 if (param->make_z_alpha) { za_write_end(z_alpha_fp); fclose(z_alpha_fp); } #endif y4m_fini_stream_info(&streaminfo); y4m_fini_frame_info(&frameinfo); free(yuv[0]); free(yuv[1]); free(yuv[2]); return 0; }
int main(int argc, char **argv) { int i, fdin, ss_v, ss_h, chroma_ss, textout; int do_vectorscope; int pwidth, pheight; /* Needed for the vectorscope */ int plane0_l, plane1_l, plane2_l; u_char *yuv[3], *cp; #ifdef HAVE_SDLgfx int j; int temp_x, temp_y; u_char *cpx, *cpy; #endif y4m_stream_info_t istream; y4m_frame_info_t iframe; do_vectorscope = 0; scalepercent = 0; #ifdef HAVE_SDLgfx textout = 0; #else textout = 1; #endif while ((i = getopt(argc, argv, "tps:")) != EOF) { switch (i) { case 't': textout = 1; break; case 'p': scalepercent = 1; break; case 's': do_vectorscope = atoi(optarg); break; default: usage(); } } #ifdef HAVE_SDLgfx if ( (do_vectorscope < 0) || (do_vectorscope >16) ) usage(); /* Initialize SDL */ desired_bpp = 8; video_flags = 0; video_flags |= SDL_DOUBLEBUF; number_of_frames = 1; memset(fy_stats, '\0', sizeof (fy_stats)); memset(ly_stats, '\0', sizeof (ly_stats)); if ( SDL_Init(SDL_INIT_VIDEO) < 0 ) mjpeg_error_exit1("Couldn't initialize SDL:%s",SDL_GetError()); atexit(SDL_Quit); /* Clean up on exit */ /* Initialize the display */ if (do_vectorscope == 0) screen = SDL_SetVideoMode(width,heigth,desired_bpp,video_flags); else screen=SDL_SetVideoMode(width_v,heigth,desired_bpp,video_flags); if (screen == NULL) mjpeg_error_exit1("Couldn't set %dx%dx%d video mode: %s", width, heigth, desired_bpp, SDL_GetError()); SDL_WM_SetCaption("y4mhistogram", "y4mhistogram"); y4m_init_area(screen); /* Here we draw the basic layout */ #endif /* HAVE_SDLgfx */ fdin = fileno(stdin); y4m_accept_extensions(1); y4m_init_stream_info(&istream); y4m_init_frame_info(&iframe); if (y4m_read_stream_header(fdin, &istream) != Y4M_OK) mjpeg_error_exit1("stream header error"); if (y4m_si_get_plane_count(&istream) != 3) mjpeg_error_exit1("Only 3 plane formats supported"); pwidth = y4m_si_get_width(&istream); pheight = y4m_si_get_height(&istream); chroma_ss = y4m_si_get_chroma(&istream); ss_h = y4m_chroma_ss_x_ratio(chroma_ss).d; ss_v = y4m_chroma_ss_y_ratio(chroma_ss).d; plane0_l = y4m_si_get_plane_length(&istream, 0); plane1_l = y4m_si_get_plane_length(&istream, 1); plane2_l = y4m_si_get_plane_length(&istream, 2); yuv[0] = malloc(plane0_l); if (yuv[0] == NULL) mjpeg_error_exit1("malloc(%d) plane 0", plane0_l); yuv[1] = malloc(plane1_l); if (yuv[1] == NULL) mjpeg_error_exit1(" malloc(%d) plane 1", plane1_l); yuv[2] = malloc(plane2_l); if (yuv[2] == NULL) mjpeg_error_exit1(" malloc(%d) plane 2\n", plane2_l); while (y4m_read_frame(fdin,&istream,&iframe,yuv) == Y4M_OK) { for (i = 0, cp = yuv[0]; i < plane0_l; i++, cp++) y_stats[*cp]++; /* Y' */ for (i = 0, cp = yuv[1]; i < plane1_l; i++, cp++) u_stats[*cp]++; /* U */ for (i = 0, cp = yuv[2]; i < plane2_l; i++, cp++) v_stats[*cp]++; /* V */ #ifdef HAVE_SDLgfx if (do_vectorscope >= 1 ) { for (i=0; i<260; i++) /* Resetting the vectorfield */ for (j=0;j<260;j++) vectorfield[i][j]=0; cpx = yuv[1]; cpy = yuv[2]; for (i=0; i < (pheight/ss_h); i++) { for (j = 0; j < (pwidth/ss_v); j++) { cpx++; cpy++; /* Have no idea why I have to multiply it with that values But than the vectorsscope works correct. If someone has a explantion or better fix tell me. Bernhard */ temp_x = round( 128+ ((*cpx-128) * 0.7857) ); temp_y = round( 128+ ((*cpy-128) * 1.1143) ); vectorfield[temp_x][temp_y*-1]=1; } /* Here we got to the n'th next line if needed */ i = i + (do_vectorscope-1); cpy = cpy + (pwidth/ss_v) * (do_vectorscope-1); cpx = cpx + (pwidth/ss_v) * (do_vectorscope-1); } } make_stat(); /* showing the sats */ SDL_UpdateRect(screen,0,0,0,0); /* updating all */ /* Events for SDL */ HandleEvent(); #endif } y4m_fini_frame_info(&iframe); y4m_fini_stream_info(&istream); if (textout) { for (i = 0; i < 255; i++) printf("Y %d %lld\n", i, y_stats[i]); for (i = 0; i < 255; i++) printf("U %d %lld\n", i, u_stats[i]); for (i = 0; i < 255; i++) printf("V %d %lld\n", i, v_stats[i]); } exit(0); }
int main(int argc, char *argv[]) { AVFormatContext *pFormatCtx; AVInputFormat *avif = NULL; int i, videoStream; AVCodecContext *pCodecCtx; AVCodec *pCodec; AVFrame *pFrame; AVFrame *pFrame444; AVPacket packet; int frameFinished; int numBytes; uint8_t *buffer; int fdOut = 1 ; int yuv_interlacing = Y4M_UNKNOWN; int yuv_ss_mode = Y4M_UNKNOWN; y4m_ratio_t yuv_frame_rate; y4m_ratio_t yuv_aspect; // need something for chroma subsampling type. int write_error_code; int header_written = 0; int convert = 0; int stream = 0; enum PixelFormat convert_mode; const static char *legal_flags = "chI:F:A:S:o:s:f:"; int y; int frame_data_size ; uint8_t *yuv_data[3] ; y4m_stream_info_t streaminfo; y4m_frame_info_t frameinfo; y4m_init_stream_info(&streaminfo); y4m_init_frame_info(&frameinfo); yuv_frame_rate.d = 0; yuv_aspect.d = 0; // Register all formats and codecs av_register_all(); while ((i = getopt (argc, argv, legal_flags)) != -1) { switch (i) { case 'I': switch (optarg[0]) { case 'p': yuv_interlacing = Y4M_ILACE_NONE; break; case 't': yuv_interlacing = Y4M_ILACE_TOP_FIRST; break; case 'b': yuv_interlacing = Y4M_ILACE_BOTTOM_FIRST; break; default: mjpeg_error("Unknown value for interlace: '%c'", optarg[0]); return -1; break; } break; case 'F': if( Y4M_OK != y4m_parse_ratio(&yuv_frame_rate, optarg) ) mjpeg_error_exit1 ("Syntax for frame rate should be Numerator:Denominator"); break; case 'A': if( Y4M_OK != y4m_parse_ratio(&yuv_aspect, optarg) ) { if (!strcmp(optarg,PAL)) { y4m_parse_ratio(&yuv_aspect, "128:117"); } else if (!strcmp(optarg,PAL_WIDE)) { y4m_parse_ratio(&yuv_aspect, "640:351"); } else if (!strcmp(optarg,NTSC)) { y4m_parse_ratio(&yuv_aspect, "4320:4739"); } else if (!strcmp(optarg,NTSC_WIDE)) { y4m_parse_ratio(&yuv_aspect, "5760:4739"); } else { mjpeg_error_exit1 ("Syntax for aspect ratio should be Numerator:Denominator"); } } break; case 'S': yuv_ss_mode = y4m_chroma_parse_keyword(optarg); if (yuv_ss_mode == Y4M_UNKNOWN) { mjpeg_error("Unknown subsampling mode option: %s", optarg); mjpeg_error("Try: 420mpeg2 444 422 411"); return -1; } break; case 'o': fdOut = open (optarg,O_CREAT|O_WRONLY,0644); if (fdOut == -1) { mjpeg_error_exit1 ("Cannot open file for writing"); } break; case 'c': convert = 1; break; case 's': stream = atoi(optarg); break; case 'f': avif = av_find_input_format (optarg); break; case 'h': case '?': print_usage (argv); return 0 ; break; } } //fprintf (stderr,"optind: %d\n",optind); optind--; argc -= optind; argv += optind; if (argc == 1) { print_usage (argv); return 0 ; } // Open video file if(av_open_input_file(&pFormatCtx, argv[1], avif, 0, NULL)!=0) return -1; // Couldn't open file // Retrieve stream information if(av_find_stream_info(pFormatCtx)<0) return -1; // Couldn't find stream information // Dump information about file onto standard error dump_format(pFormatCtx, 0, argv[1], 0); // Find the first video stream videoStream=-1; for(i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) { // mark debug //fprintf (stderr,"Video Codec ID: %d (%s)\n",pFormatCtx->streams[i]->codec->codec_id ,pFormatCtx->streams[i]->codec->codec_name); if (videoStream == -1 && stream == 0) { // May still be overridden by the -s option videoStream=i; } if (stream == i) { videoStream=i; break; } } if(videoStream==-1) return -1; // Didn't find a video stream // Get a pointer to the codec context for the video stream pCodecCtx=pFormatCtx->streams[videoStream]->codec; // Find the decoder for the video stream pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) return -1; // Codec not found // Open codec if(avcodec_open(pCodecCtx, pCodec)<0) return -1; // Could not open codec // Read framerate, aspect ratio and chroma subsampling from Codec if (yuv_frame_rate.d == 0) { yuv_frame_rate.n = pFormatCtx->streams[videoStream]->r_frame_rate.num; yuv_frame_rate.d = pFormatCtx->streams[videoStream]->r_frame_rate.den; } if (yuv_aspect.d == 0) { yuv_aspect.n = pCodecCtx-> sample_aspect_ratio.num; yuv_aspect.d = pCodecCtx-> sample_aspect_ratio.den; } // 0:0 is an invalid aspect ratio default to 1:1 if (yuv_aspect.d == 0 || yuv_aspect.n == 0 ) { yuv_aspect.n=1; yuv_aspect.d=1; } if (convert) { if (yuv_ss_mode == Y4M_UNKNOWN) { print_usage(); return 0; } else { y4m_accept_extensions(1); switch (yuv_ss_mode) { case Y4M_CHROMA_420MPEG2: convert_mode = PIX_FMT_YUV420P; break; case Y4M_CHROMA_422: convert_mode = PIX_FMT_YUV422P; break; case Y4M_CHROMA_444: convert_mode = PIX_FMT_YUV444P; break; case Y4M_CHROMA_411: convert_mode = PIX_FMT_YUV411P; break; case Y4M_CHROMA_420JPEG: convert_mode = PIX_FMT_YUVJ420P; break; default: mjpeg_error_exit1("Cannot convert to this chroma mode"); break; } } } else if (yuv_ss_mode == Y4M_UNKNOWN) { switch (pCodecCtx->pix_fmt) { case PIX_FMT_YUV420P: yuv_ss_mode=Y4M_CHROMA_420MPEG2; break; case PIX_FMT_YUV422P: yuv_ss_mode=Y4M_CHROMA_422; break; case PIX_FMT_YUV444P: yuv_ss_mode=Y4M_CHROMA_444; break; case PIX_FMT_YUV411P: yuv_ss_mode=Y4M_CHROMA_411; break; case PIX_FMT_YUVJ420P: yuv_ss_mode=Y4M_CHROMA_420JPEG; break; default: yuv_ss_mode=Y4M_CHROMA_444; convert_mode = PIX_FMT_YUV444P; // is there a warning function mjpeg_error("Unsupported Chroma mode. Upsampling to YUV444\n"); // enable advanced yuv stream y4m_accept_extensions(1); convert = 1; break; } } // Allocate video frame pFrame=avcodec_alloc_frame(); // Output YUV format details // is there some mjpeg_info functions? fprintf (stderr,"YUV Aspect Ratio: %d:%d\n",yuv_aspect.n,yuv_aspect.d); fprintf (stderr,"YUV frame rate: %d:%d\n",yuv_frame_rate.n,yuv_frame_rate.d); fprintf (stderr,"YUV Chroma Subsampling: %d\n",yuv_ss_mode); // Set the YUV stream details // Interlace is handled when the first frame is read. y4m_si_set_sampleaspect(&streaminfo, yuv_aspect); y4m_si_set_framerate(&streaminfo, yuv_frame_rate); y4m_si_set_chroma(&streaminfo, yuv_ss_mode); // Loop until nothing read while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size); // Did we get a video frame? if(frameFinished) { // Save the frame to disk // As we don't know interlacing until the first frame // we wait until the first frame is read before setting the interlace flag // and outputting the YUV header // It also appears that some codecs don't set width or height until the first frame either if (!header_written) { if (yuv_interlacing == Y4M_UNKNOWN) { if (pFrame->interlaced_frame) { if (pFrame->top_field_first) { yuv_interlacing = Y4M_ILACE_TOP_FIRST; } else { yuv_interlacing = Y4M_ILACE_BOTTOM_FIRST; } } else { yuv_interlacing = Y4M_ILACE_NONE; } } if (convert) { // initialise conversion to different chroma subsampling pFrame444=avcodec_alloc_frame(); numBytes=avpicture_get_size(convert_mode, pCodecCtx->width, pCodecCtx->height); buffer=(uint8_t *)malloc(numBytes); avpicture_fill((AVPicture *)pFrame444, buffer, convert_mode, pCodecCtx->width, pCodecCtx->height); } y4m_si_set_interlace(&streaminfo, yuv_interlacing); y4m_si_set_width(&streaminfo, pCodecCtx->width); y4m_si_set_height(&streaminfo, pCodecCtx->height); chromalloc(yuv_data,&streaminfo); fprintf (stderr,"YUV interlace: %d\n",yuv_interlacing); fprintf (stderr,"YUV Output Resolution: %dx%d\n",pCodecCtx->width, pCodecCtx->height); if ((write_error_code = y4m_write_stream_header(fdOut, &streaminfo)) != Y4M_OK) { mjpeg_error("Write header failed: %s", y4m_strerr(write_error_code)); } header_written = 1; } if (convert) { // convert to 444 /* +#ifdef HAVE_LIBSWSCALE + struct SwsContext* img_convert_ctx = + sws_getContext(context->width, context->height, PIX_FMT_RGB24, + context->width, context->height, context->pix_fmt, + SWS_BICUBIC, NULL, NULL, NULL); + + sws_scale(img_convert_ctx, pict->data, pict->linesize, + 0, context->height, encodable->data, + encodable->linesize); + + sws_freeContext (img_convert_ctx); +#else img_convert((AVPicture *)encodable, context->pix_fmt, (AVPicture *)pict, PIX_FMT_RGB24, context->width, context->height); - + (AVPicture *)pict, PIX_FMT_RGB24, + context->width, context->height); +#endif */ struct SwsContext* img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL); sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrame444->data, pFrame444->linesize); sws_freeContext (img_convert_ctx); //img_convert((AVPicture *)pFrame444, convert_mode, (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height); chromacpy(yuv_data,pFrame444,&streaminfo); } else { chromacpy(yuv_data,pFrame,&streaminfo); } write_error_code = y4m_write_frame( fdOut, &streaminfo, &frameinfo, yuv_data); } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); } y4m_fini_stream_info(&streaminfo); y4m_fini_frame_info(&frameinfo); free(yuv_data[0]); free(yuv_data[1]); free(yuv_data[2]); // Free the YUV frame av_free(pFrame); // Close the codec avcodec_close(pCodecCtx); // Close the video file av_close_input_file(pFormatCtx); return 0; }
int main(int argc, char **argv) { cl_info_t cl; y4m_stream_info_t sinfo; y4m_frame_info_t finfo; uint8_t *buffers[Y4M_MAX_NUM_PLANES]; /* R'G'B' or Y'CbCr */ uint8_t *buffers2[Y4M_MAX_NUM_PLANES]; /* R'G'B' or Y'CbCr */ ppm_info_t ppm; int field_height; int fdout = 1; int err, i, count, repeating_last; y4m_accept_extensions(1); y4m_init_stream_info(&sinfo); y4m_init_frame_info(&finfo); parse_args(&cl, argc, argv); ppm.width = 0; ppm.height = 0; for (i = 0; i < 3; i++) { buffers[i] = NULL; buffers2[i] = NULL; } /* Read first PPM frame/field-pair, to get dimensions */ if (read_ppm_frame(cl.fdin, &ppm, buffers, buffers2, cl.interlace, cl.interleave, cl.bgr)) mjpeg_error_exit1("Failed to read first frame."); /* Setup streaminfo and write output header */ setup_output_stream(fdout, &cl, &sinfo, &ppm, &field_height); /* Loop 'framecount' times, or possibly forever... */ for (count = 0, repeating_last = 0; (count < (cl.offset + cl.framecount)) || (cl.framecount == 0); count++) { if (repeating_last) goto WRITE_FRAME; /* Read PPM frame/field */ /* ...but skip reading very first frame, already read prior to loop */ if (count > 0) { err = read_ppm_frame(cl.fdin, &ppm, buffers, buffers2, cl.interlace, cl.interleave, cl.bgr); if (err == 1) { /* clean input EOF */ if (cl.repeatlast) { repeating_last = 1; goto WRITE_FRAME; } else if (cl.framecount != 0) { mjpeg_error_exit1("Input frame shortfall (only %d converted).", count - cl.offset); } else { break; /* input is exhausted; we are done! time to go home! */ } } else if (err) mjpeg_error_exit1("Error reading ppm frame"); } /* ...skip transforms if we are just going to skip this frame anyway. BUT, if 'cl.repeatlast' is on, we must process/buffer every frame, because we don't know when we will see the last one. */ if ((count >= cl.offset) || (cl.repeatlast)) { /* Transform colorspace, then subsample (in place) */ convert_RGB_to_YCbCr(buffers, ppm.width * field_height); chroma_subsample(cl.ss_mode, buffers, ppm.width, field_height); if (cl.interlace != Y4M_ILACE_NONE) { convert_RGB_to_YCbCr(buffers2, ppm.width * field_height); chroma_subsample(cl.ss_mode, buffers2, ppm.width, field_height); } } WRITE_FRAME: /* Write converted frame to output */ if (count >= cl.offset) { switch (cl.interlace) { case Y4M_ILACE_NONE: if ((err = y4m_write_frame(fdout, &sinfo, &finfo, buffers)) != Y4M_OK) mjpeg_error_exit1("Write frame failed: %s", y4m_strerr(err)); break; case Y4M_ILACE_TOP_FIRST: if ((err = y4m_write_fields(fdout, &sinfo, &finfo, buffers, buffers2)) != Y4M_OK) mjpeg_error_exit1("Write fields failed: %s", y4m_strerr(err)); break; case Y4M_ILACE_BOTTOM_FIRST: if ((err = y4m_write_fields(fdout, &sinfo, &finfo, buffers2, buffers)) != Y4M_OK) mjpeg_error_exit1("Write fields failed: %s", y4m_strerr(err)); break; default: mjpeg_error_exit1("Unknown ilace type! %d", cl.interlace); break; } } } for (i = 0; i < 3; i++) { free(buffers[i]); free(buffers2[i]); } y4m_fini_stream_info(&sinfo); y4m_fini_frame_info(&finfo); mjpeg_debug("Done."); return 0; }
/** MAIN */ int main( int argc, char **argv) { int i, frame_count; int horz, vert; /* width and height of the frame */ uint8_t *frame[3]; /*pointer to the 3 color planes of the input frame */ struct area_s inarea; struct color_yuv coloryuv; int input_fd = 0; /* std in */ int output_fd = 1; /* std out */ int darker = 0; /* how much darker should the image be */ int copy_pixel = 0; /* how much pixels we should use for filling up the area */ int average_pixel = 0; /* how much pixel to use for average */ y4m_stream_info_t istream, ostream; y4m_frame_info_t iframe; inarea.width=0; inarea.height=0; inarea.voffset=0; inarea.hoffset=0; coloryuv.luma = LUMA; /*Setting the luma to black */ coloryuv.chroma_b = CHROMA; /*Setting the chroma to center, means white */ coloryuv.chroma_r = CHROMA; /*Setting the chroma to center, means white */ (void)mjpeg_default_handler_verbosity(verbose); /* processing commandline */ process_commandline(argc, argv, &inarea, &darker, ©_pixel, &coloryuv, &average_pixel); y4m_init_stream_info(&istream); y4m_init_stream_info(&ostream); y4m_init_frame_info(&iframe); /* First read the header of the y4m stream */ i = y4m_read_stream_header(input_fd, &istream); if ( i != Y4M_OK) /* a basic check if we really have y4m stream */ mjpeg_error_exit1("Input stream error: %s", y4m_strerr(i)); else { /* Here we copy the input stream info to the output stream info header */ y4m_copy_stream_info(&ostream, &istream); /* Here we write the new output header to the output fd */ y4m_write_stream_header(output_fd, &ostream); horz = y4m_si_get_width(&istream); /* get the width of the frame */ vert = y4m_si_get_height(&istream); /* get the height of the frame */ if ( (inarea.width + inarea.hoffset) > horz) mjpeg_error_exit1("Input width and offset larger than framewidth,exit"); if ( (inarea.height + inarea.voffset) > vert) mjpeg_error_exit1("Input height and offset larger than frameheight,exit"); /* Here we allocate the memory for on frame */ frame[0] = malloc( horz * vert ); frame[1] = malloc( (horz/2) * (vert/2) ); frame[2] = malloc( (horz/2) * (vert/2) ); /* Here we set the initial number of of frames */ /* We do not need it. Just for showing that is does something */ frame_count = 0 ; /* This is the main loop here can filters effects, scaling and so on be done with the video frames. Just up to your mind */ /* We read now a single frame with the header and check if it does not have any problems or we have alreaddy processed the last without data */ while(y4m_read_frame(input_fd, &istream, &iframe, frame) == Y4M_OK) { frame_count++; /* You can do something usefull here */ if (darker != 0) set_darker(inarea, horz, vert, frame, darker); else if (copy_pixel != 0) copy_area(inarea, horz, vert, frame, copy_pixel); else if (average_pixel != 0) average_area(inarea, horz, vert, frame, average_pixel); else set_inactive(inarea, horz, vert, frame, &coloryuv); /* Now we put out the read frame */ y4m_write_frame(output_fd, &ostream, &iframe, frame); } /* Cleaning up the data structures */ y4m_fini_stream_info(&istream); y4m_fini_stream_info(&ostream); y4m_fini_frame_info(&iframe); } /* giving back the memory to the system */ free(frame[0]); frame[0] = 0; free(frame[1]); frame[1] = 0; free(frame[2]); frame[2] = 0; exit(0); /* exiting */ }