status_t MediaPlayer::initFFmpegEngine() { //FFMPEG INIT code avcodec_register_all(); avfilter_register_all(); av_register_all(); avformat_network_init(); ffmpegEngineInitialized = true; return NO_ERROR; }
/* global initialization * * XXX: in theory we can select the filters/formats we want to support, but * this does not work in practise. */ void BarPlayerInit () { ao_initialize (); av_log_set_level (AV_LOG_FATAL); av_register_all (); avfilter_register_all (); avformat_network_init (); }
jint Java_org_libsdl_app_SDLActivity_ffInit(JNIEnv* env, jobject thiz ) { #if CONFIG_AVDEVICE avdevice_register_all(); #endif #if CONFIG_AVFILTER avfilter_register_all(); #endif av_register_all(); avformat_network_init(); }
int main(int argc, char *argv[]) { av_register_all(); avcodec_register_all(); avfilter_register_all(); QApplication a(argc, argv); GUI::MainWindow mw; mw.show(); return a.exec(); }
LibAVFilterPrivate() : filter_graph(0) , in_filter_ctx(0) , out_filter_ctx(0) , pixfmt(QTAV_PIX_FMT_C(NONE)) , width(0) , height(0) , avframe(0) , options_changed(false) { avfilter_register_all(); }
// @todo - add a rx_shutdown_libav() to deinitialize e.g. the network (see libav/tools/aviocat.c) void rx_init_libav() { if(!rx_did_register_all) { av_log_set_level(AV_LOG_DEBUG); av_register_all(); avcodec_register_all(); avformat_network_init(); avfilter_register_all(); rx_did_register_all = true; } }
void init_ffmpeg(void) { // register all common ffmpeg things av_register_all(); // register all the codec avcodec_register_all(); // register all the devices avdevice_register_all(); // register all filters avfilter_register_all(); //register network avformat_network_init(); }
ScreenGrabber::ScreenGrabber(int inputWidth, int inputHeight) { avcodec_register_all(); avdevice_register_all(); #if CONFIG_AVFILTER avfilter_register_all(); #endif av_register_all(); webcamCapture.resize(WebcamWidth * WebcamHeight * 4); auto fileName = ":0.0+65,126"; if (inputHeight == 1080) fileName = ":0.0+0,74"; auto format = "x11grab"; auto inputFormat = av_find_input_format(format); if (!inputFormat) { std::cerr << "Unknown input format: '" << format << "'" << std::endl; exit(1); } AVDictionary *format_opts = NULL; av_dict_set(&format_opts, "framerate", std::to_string(OutputFrameRate).c_str(), 0); std::string resolution = std::to_string(inputWidth) + "x" + std::to_string(inputHeight); av_dict_set(&format_opts, "video_size", resolution.c_str(), 0); int len = avformat_open_input(&formatContext, fileName, inputFormat, &format_opts); if (len != 0) { std::cerr << "Could not open input " << fileName << std::endl; throw - 0x10; } if (avformat_find_stream_info(formatContext, NULL) < 0) { std::cerr << "Could not read stream information from " << fileName << std::endl; throw - 0x11; } av_dump_format(formatContext, 0, fileName, 0); av_dict_free(&format_opts); width = formatContext->streams[0]->codecpar->width; height = formatContext->streams[0]->codecpar->height; std::cout << "YUV4MPEG2 W" << width << " H" << height << " F" << OutputFrameRate << ":1 Ip A0:0 C420jpeg XYSCSS=420JPEG\n"; yuv.resize(width * height * 3 / 2); memset(&packet, 0, sizeof(packet)); webcamThread = std::make_unique<std::thread>(webcamGrabber, std::ref(webcamCapture), std::ref(done)); }
int groove_init(void) { av_lockmgr_register(&my_lockmgr_cb); srand(time(NULL)); // register all codecs, demux and protocols avcodec_register_all(); av_register_all(); avformat_network_init(); avfilter_register_all(); should_deinit_network = 1; av_log_set_level(AV_LOG_QUIET); return 0; }
void audio_filter_process_init(vod_log_t* log) { avcodec_register_all(); avfilter_register_all(); buffersrc_filter = avfilter_get_by_name(BUFFERSRC_FILTER_NAME); if (buffersrc_filter == NULL) { vod_log_error(VOD_LOG_WARN, log, 0, "audio_filter_process_init: failed to get buffer source filter, audio filtering is disabled"); return; } buffersink_filter = avfilter_get_by_name(BUFFERSINK_FILTER_NAME); if (buffersink_filter == NULL) { vod_log_error(VOD_LOG_WARN, log, 0, "audio_filter_process_init: failed to get buffer sink filter, audio filtering is disabled"); return; } decoder_codec = avcodec_find_decoder(AV_CODEC_ID_AAC); if (decoder_codec == NULL) { vod_log_error(VOD_LOG_WARN, log, 0, "audio_filter_process_init: failed to get AAC decoder, audio filtering is disabled"); return; } encoder_codec = avcodec_find_encoder_by_name(AAC_ENCODER_NAME); if (encoder_codec == NULL) { vod_log_error(VOD_LOG_WARN, log, 0, "audio_filter_process_init: failed to get AAC encoder, audio filtering is disabled. recompile libavcodec with libfdk_aac to enable it"); return; } if (!audio_filter_is_format_supported(encoder_codec, ENCODER_INPUT_SAMPLE_FORMAT)) { vod_log_error(VOD_LOG_WARN, log, 0, "audio_filter_process_init: encoder does not support the required input format, audio filtering is disabled"); return; } initialized = TRUE; }
bool FFmpegCommon::initFFmpeg() { if ( !initDone ) { av_lockmgr_register( lockManager ); avcodec_register_all(); av_register_all(); avfilter_register_all(); initDone = true; /*const AVCodecDescriptor *desc = NULL; desc = avcodec_descriptor_next( desc ); while ( desc ) { qDebug() << desc->name; //qDebug() << " " << desc->long_name; desc = avcodec_descriptor_next( desc ); }*/ /*const AVOutputFormat *format = NULL; format = av_oformat_next( format ); while ( format ) { qDebug() << format->name << avcodec_get_name( format->video_codec ) << avcodec_get_name( format->audio_codec ); format = av_oformat_next( format ); }*/ h264CodecNames << "default"; hevcCodecNames << "default"; AVCodec * codec = av_codec_next(NULL); while (codec != NULL) { if (av_codec_is_encoder(codec) && codec->type == AVMEDIA_TYPE_VIDEO) { QString cn = codec->name; if (cn.contains("264")) { qDebug() << codec->name; h264CodecNames << codec->name; } else if (cn.contains("hevc") || cn.contains("265")) { qDebug() << codec->name; hevcCodecNames << codec->name; } } codec = av_codec_next(codec); } } return initDone; }
void av_ff_init() { if( _ff_init )return; avdevice_register_all(); avfilter_register_all(); av_register_all(); avformat_network_init(); #ifdef __ANDROID__ { extern AVInputFormat ff_android_demuxer; av_register_input_format(&ff_android_demuxer); } #endif _ff_init = 1; }
void pyav_register_all(void) { /* * Setup base library. While the docs and experience may lead us to believe we * don't need to call all of these (e.g. avcodec_register_all is usually * unnessesary), some users have found contexts in which they are required. */ #if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(58, 9, 100) av_register_all(); #endif avformat_network_init(); avdevice_register_all(); #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 10, 100) avcodec_register_all(); #endif #if LIBAVFILTER_VERSION_INT < AV_VERSION_INT(7, 14, 100) avfilter_register_all(); #endif }
struct ff_demuxer *ff_demuxer_init() { struct ff_demuxer *demuxer; av_register_all(); avdevice_register_all(); avfilter_register_all(); avformat_network_init(); demuxer = av_mallocz(sizeof(struct ff_demuxer)); if (demuxer == NULL) return NULL; demuxer->clock.sync_type = DEFAULT_AV_SYNC_TYPE; demuxer->options.audio_frame_queue_size = AUDIO_FRAME_QUEUE_SIZE; demuxer->options.video_frame_queue_size = VIDEO_FRAME_QUEUE_SIZE; demuxer->options.audio_packet_queue_size = AUDIO_PACKET_QUEUE_SIZE; demuxer->options.video_packet_queue_size = VIDEO_PACKET_QUEUE_SIZE; demuxer->options.is_hw_decoding = false; return demuxer; }
void init_libav(struct mpv_global *global) { pthread_mutex_lock(&log_lock); if (!log_mpv_instance) { log_mpv_instance = global; log_root = mp_log_new(NULL, global->log, LIB_PREFIX); log_decaudio = mp_log_new(log_root, log_root, "audio"); log_decvideo = mp_log_new(log_root, log_root, "video"); log_demuxer = mp_log_new(log_root, log_root, "demuxer"); av_log_set_callback(mp_msg_av_log_callback); } pthread_mutex_unlock(&log_lock); avcodec_register_all(); av_register_all(); avformat_network_init(); avfilter_register_all(); #if HAVE_LIBAVDEVICE avdevice_register_all(); #endif }
int main(int argc, char** argv) { QApplication app(argc, argv); av_register_all(); avfilter_register_all(); // i18n QTranslator qtTranslator; qtTranslator.load("qt_" + QLocale::system().name(), QLibraryInfo::location(QLibraryInfo::TranslationsPath)); app.installTranslator(&qtTranslator); QTranslator appTranslator; appTranslator.load("dvdcp_" + QLocale::system().name()); app.installTranslator(&appTranslator); MainWindow win; win.show(); return app.exec(); }
void FormatBaseStream::initialize() { if (!isInitialized) { LOGDEBUG("Init ffmpeg Libraries"); /* register all codecs, demux and protocols */ avcodec_register_all(); av_register_all(); avfilter_register_all(); avformat_network_init(); /*setup own logging callback*/ av_log_set_callback(FormatBaseStream::mhive_log_default_callback); av_log_set_level(AV_LOG_INFO); /* install my own lock manager * this is needed for multithreaded environment */ av_lockmgr_register(lockmgr); isInitialized = true; } }
int VideoDecoder::init(const string& filename) { av_register_all(); avcodec_register_all(); avformat_network_init(); avfilter_register_all(); if (avformat_open_input(&mFmtCtxPtr, filename.c_str(), nullptr, nullptr) != 0) { LOGE("Could not open input file: %s", filename.c_str()); goto failed; } if (avformat_find_stream_info(mFmtCtxPtr, nullptr) < 0) { LOGE("Could not find stream information"); goto failed; } if (initializeVideo() < 0) { LOGE("Init video failed"); goto failed; } mFramePtr = av_frame_alloc(); if (!mFramePtr) { LOGE("Not enough memory"); goto failed; } mFilterFramePtr = av_frame_alloc(); if (!mFilterFramePtr) { LOGE("Not enough memory"); goto failed; } return 0; failed: destroy(); return -1; }
int main(int argc, char **argv) { int ret; AVPacket packet; AVFrame *frame = av_frame_alloc(); AVFrame *filt_frame = av_frame_alloc(); int got_frame; if (!frame || !filt_frame) { perror("Could not allocate frame"); exit(1); } if (argc != 2) { fprintf(stderr, "Usage: %s file\n", argv[0]); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ while (1) { if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { avcodec_get_frame_defaults(frame); got_frame = 0; ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); break; } if (got_frame) { frame->pts = av_frame_get_best_effort_timestamp(frame); /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); break; } /* pull filtered frames from the filtergraph */ while (1) { ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base); av_frame_unref(filt_frame); } av_frame_unref(frame); } } av_free_packet(&packet); } end: avfilter_graph_free(&filter_graph); if (dec_ctx) avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); av_frame_free(&frame); av_frame_free(&filt_frame); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); fprintf(stderr, "Error occurred: %s\n", buf); exit(1); } exit(0); }
int main(int argc, char **argv) { //curses int color_pair; if(initscr() == NULL){ fprintf(stderr, "init failure\n"); exit(EXIT_FAILURE); } /* start_colorは色属性を使用するときは最初に必ず実行する. initscrの直後に実行するのがよい習慣らしい. */ if(has_colors() == FALSE || start_color() == ERR){ endwin(); fprintf(stderr, "This term seems not to having Color\n"); exit(EXIT_FAILURE); } if(signal(SIGINT, sig_handler) == SIG_ERR || signal(SIGQUIT, sig_handler) == SIG_ERR){ fprintf(stderr, "signal failure\n"); exit(EXIT_FAILURE); } curs_set(0); /* 色のペアを作る */ color_pair = 1; for(color_pair = 1; color_pair < 256; color_pair++){ init_pair(color_pair, color_pair, color_pair); } refresh(); char filter_descr[10000]; int w, h; w = 80;//COLS; h = 25;//LINES; bak = malloc(sizeof (int) * LINES*COLS+1); sprintf(filter_descr, "scale=%d:%d", w, h); int ret; AVPacket packet; AVFrame frame; int got_frame; if (argc != 2) { fprintf(stderr, "Usage: %s file\n", argv[0]); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ while (1) { AVFilterBufferRef *picref; if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; if (packet.stream_index == video_stream_index) { avcodec_get_frame_defaults(&frame); got_frame = 0; ret = avcodec_decode_video2(dec_ctx, &frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding video\n"); break; } if (got_frame) { frame.pts = av_frame_get_best_effort_timestamp(&frame); /* push the decoded frame into the filtergraph */ if (av_buffersrc_add_frame(buffersrc_ctx, &frame, 0) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n"); break; } /* pull filtered pictures from the filtergraph */ while (repeat_flag) { ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; if (picref) { display_picref(picref, buffersink_ctx->inputs[0]->time_base); avfilter_unref_bufferp(&picref); } } } } av_free_packet(&packet); } end: endwin(); avfilter_graph_free(&filter_graph); if (dec_ctx) avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { char buf[1024]; av_strerror(ret, buf, sizeof(buf)); fprintf(stderr, "Error occurred: %s\n", buf); exit(1); } exit(0); }
void preloadCodecsAndFormats() { av_register_all(); avfilter_register_all(); }
int main(int argc, char **argv) { const char *outfilename = NULL; const char *infilename = NULL; FILE *outfile = NULL; FILE *infile = NULL; char *graph_string = NULL; AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph)); char c; av_log_set_level(AV_LOG_DEBUG); while ((c = getopt(argc, argv, "hi:o:")) != -1) { switch (c) { case 'h': usage(); return 0; case 'i': infilename = optarg; break; case 'o': outfilename = optarg; break; case '?': return 1; } } if (!infilename || !strcmp(infilename, "-")) infilename = "/dev/stdin"; infile = fopen(infilename, "r"); if (!infile) { fprintf(stderr, "Impossible to open input file '%s': %s\n", infilename, strerror(errno)); return 1; } if (!outfilename || !strcmp(outfilename, "-")) outfilename = "/dev/stdout"; outfile = fopen(outfilename, "w"); if (!outfile) { fprintf(stderr, "Impossible to open output file '%s': %s\n", outfilename, strerror(errno)); return 1; } /* read from infile and put it in a buffer */ { unsigned int count = 0; struct line *line, *last_line, *first_line; char *p; last_line = first_line = av_malloc(sizeof(struct line)); while (fgets(last_line->data, sizeof(last_line->data), infile)) { struct line *new_line = av_malloc(sizeof(struct line)); count += strlen(last_line->data); last_line->next = new_line; last_line = new_line; } last_line->next = NULL; graph_string = av_malloc(count + 1); p = graph_string; for (line = first_line; line->next; line = line->next) { unsigned int l = strlen(line->data); memcpy(p, line->data, l); p += l; } *p = '\0'; } avfilter_register_all(); if (avfilter_graph_parse(graph, graph_string, NULL, NULL, NULL) < 0) { fprintf(stderr, "Impossible to parse the graph description\n"); return 1; } if (avfilter_graph_config(graph, NULL) < 0) return 1; print_digraph(outfile, graph); fflush(outfile); return 0; }
int main(int argc, char **argv) { char *in_graph_desc, **out_dev_name; int nb_out_dev = 0, nb_streams = 0; AVFilterGraph *in_graph = NULL; Stream *streams = NULL, *st; AVFrame *frame = NULL; int i, j, run = 1, ret; //av_log_set_level(AV_LOG_DEBUG); if (argc < 3) { av_log(NULL, AV_LOG_ERROR, "Usage: %s filter_graph dev:out [dev2:out2...]\n\n" "Examples:\n" "%s movie=file.nut:s=v+a xv:- alsa:default\n" "%s movie=file.nut:s=v+a uncodedframecrc:pipe:0\n", argv[0], argv[0], argv[0]); exit(1); } in_graph_desc = argv[1]; out_dev_name = argv + 2; nb_out_dev = argc - 2; av_register_all(); avdevice_register_all(); avfilter_register_all(); /* Create input graph */ if (!(in_graph = avfilter_graph_alloc())) { ret = AVERROR(ENOMEM); av_log(NULL, AV_LOG_ERROR, "Unable to alloc graph graph: %s\n", av_err2str(ret)); goto fail; } ret = avfilter_graph_parse_ptr(in_graph, in_graph_desc, NULL, NULL, NULL); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Unable to parse graph: %s\n", av_err2str(ret)); goto fail; } nb_streams = 0; for (i = 0; i < in_graph->nb_filters; i++) { AVFilterContext *f = in_graph->filters[i]; for (j = 0; j < f->nb_inputs; j++) { if (!f->inputs[j]) { av_log(NULL, AV_LOG_ERROR, "Graph has unconnected inputs\n"); ret = AVERROR(EINVAL); goto fail; } } for (j = 0; j < f->nb_outputs; j++) if (!f->outputs[j]) nb_streams++; } if (!nb_streams) { av_log(NULL, AV_LOG_ERROR, "Graph has no output stream\n"); ret = AVERROR(EINVAL); goto fail; } if (nb_out_dev != 1 && nb_out_dev != nb_streams) { av_log(NULL, AV_LOG_ERROR, "Graph has %d output streams, %d devices given\n", nb_streams, nb_out_dev); ret = AVERROR(EINVAL); goto fail; } if (!(streams = av_calloc(nb_streams, sizeof(*streams)))) { ret = AVERROR(ENOMEM); av_log(NULL, AV_LOG_ERROR, "Could not allocate streams\n"); } st = streams; for (i = 0; i < in_graph->nb_filters; i++) { AVFilterContext *f = in_graph->filters[i]; for (j = 0; j < f->nb_outputs; j++) { if (!f->outputs[j]) { if ((ret = create_sink(st++, in_graph, f, j)) < 0) goto fail; } } } av_assert0(st - streams == nb_streams); if ((ret = avfilter_graph_config(in_graph, NULL)) < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to configure graph\n"); goto fail; } /* Create output devices */ for (i = 0; i < nb_out_dev; i++) { char *fmt = NULL, *dev = out_dev_name[i]; st = &streams[i]; if ((dev = strchr(dev, ':'))) { *(dev++) = 0; fmt = out_dev_name[i]; } ret = avformat_alloc_output_context2(&st->mux, NULL, fmt, dev); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Failed to allocate output: %s\n", av_err2str(ret)); goto fail; } if (!(st->mux->oformat->flags & AVFMT_NOFILE)) { ret = avio_open2(&st->mux->pb, st->mux->filename, AVIO_FLAG_WRITE, NULL, NULL); if (ret < 0) { av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n", av_err2str(ret)); goto fail; } } } for (; i < nb_streams; i++) streams[i].mux = streams[0].mux; /* Create output device streams */ for (i = 0; i < nb_streams; i++) { st = &streams[i]; if (!(st->stream = avformat_new_stream(st->mux, NULL))) { ret = AVERROR(ENOMEM); av_log(NULL, AV_LOG_ERROR, "Failed to create output stream\n"); goto fail; } st->stream->codec->codec_type = st->link->type; st->stream->time_base = st->stream->codec->time_base = st->link->time_base; switch (st->link->type) { case AVMEDIA_TYPE_VIDEO: st->stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO; st->stream->avg_frame_rate = st->stream-> r_frame_rate = av_buffersink_get_frame_rate(st->sink); st->stream->codec->width = st->link->w; st->stream->codec->height = st->link->h; st->stream->codec->sample_aspect_ratio = st->link->sample_aspect_ratio; st->stream->codec->pix_fmt = st->link->format; break; case AVMEDIA_TYPE_AUDIO: st->stream->codec->channel_layout = st->link->channel_layout; st->stream->codec->channels = avfilter_link_get_channels(st->link); st->stream->codec->sample_rate = st->link->sample_rate; st->stream->codec->sample_fmt = st->link->format; st->stream->codec->codec_id = av_get_pcm_codec(st->stream->codec->sample_fmt, -1); break; default: av_assert0(!"reached"); } } /* Init output devices */ for (i = 0; i < nb_out_dev; i++) { st = &streams[i]; if ((ret = avformat_write_header(st->mux, NULL)) < 0) { av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n", av_err2str(ret)); goto fail; } } /* Check output devices */ for (i = 0; i < nb_streams; i++) { st = &streams[i]; ret = av_write_uncoded_frame_query(st->mux, st->stream->index); if (ret < 0) { av_log(st->mux, AV_LOG_ERROR, "Uncoded frames not supported on stream #%d: %s\n", i, av_err2str(ret)); goto fail; } } while (run) { ret = avfilter_graph_request_oldest(in_graph); if (ret < 0) { if (ret == AVERROR_EOF) { run = 0; } else { av_log(NULL, AV_LOG_ERROR, "Error filtering: %s\n", av_err2str(ret)); break; } } for (i = 0; i < nb_streams; i++) { st = &streams[i]; while (1) { if (!frame && !(frame = av_frame_alloc())) { ret = AVERROR(ENOMEM); av_log(NULL, AV_LOG_ERROR, "Could not allocate frame\n"); goto fail; } ret = av_buffersink_get_frame_flags(st->sink, frame, AV_BUFFERSINK_FLAG_NO_REQUEST); if (ret < 0) { if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) av_log(NULL, AV_LOG_WARNING, "Error in sink: %s\n", av_err2str(ret)); break; } if (frame->pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(frame->pts, st->link ->time_base, st->stream->time_base); ret = av_interleaved_write_uncoded_frame(st->mux, st->stream->index, frame); frame = NULL; if (ret < 0) { av_log(st->stream->codec, AV_LOG_ERROR, "Error writing frame: %s\n", av_err2str(ret)); goto fail; } } } } ret = 0; for (i = 0; i < nb_out_dev; i++) { st = &streams[i]; av_write_trailer(st->mux); } fail: av_frame_free(&frame); avfilter_graph_free(&in_graph); if (streams) { for (i = 0; i < nb_out_dev; i++) { st = &streams[i]; if (st->mux) { if (st->mux->pb) avio_closep(&st->mux->pb); avformat_free_context(st->mux); } } } av_freep(&streams); return ret < 0; }
int main(int argc, char **argv) { AVFilter *filter; AVFilterContext *filter_ctx; const char *filter_name; const char *filter_args = NULL; int i; av_log_set_level(AV_LOG_DEBUG); if (!argv[1]) { fprintf(stderr, "Missing filter name as argument\n"); return 1; } filter_name = argv[1]; if (argv[2]) filter_args = argv[2]; avfilter_register_all(); /* get a corresponding filter and open it */ if (!(filter = avfilter_get_by_name(filter_name))) { fprintf(stderr, "Unrecognized filter with name '%s'\n", filter_name); return 1; } if (avfilter_open(&filter_ctx, filter, NULL) < 0) { fprintf(stderr, "Impossible to open filter with name '%s'\n", filter_name); return 1; } if (avfilter_init_filter(filter_ctx, filter_args, NULL) < 0) { fprintf(stderr, "Impossible to init filter '%s' with arguments '%s'\n", filter_name, filter_args); return 1; } /* create a link for each of the input pads */ for (i = 0; i < filter_ctx->input_count; i++) { AVFilterLink *link = av_mallocz(sizeof(AVFilterLink)); link->type = filter_ctx->filter->inputs[i].type; filter_ctx->inputs[i] = link; } for (i = 0; i < filter_ctx->output_count; i++) { AVFilterLink *link = av_mallocz(sizeof(AVFilterLink)); link->type = filter_ctx->filter->outputs[i].type; filter_ctx->outputs[i] = link; } if (filter->query_formats) filter->query_formats(filter_ctx); else avfilter_default_query_formats(filter_ctx); print_formats(filter_ctx); avfilter_free(filter_ctx); fflush(stdout); return 0; }
/* * Class: com_jpou_meditor_ffmpeg_trans * Method: startTrans * Signature: (Ljava/lang/String;Ljava/lang/String;)Z */ JNIEXPORT jboolean JNICALL Java_com_jpou_ffmpeg_Transcoding_startTrans (JNIEnv *env, jobject clazz, jstring input, jstring output) { int ret; AVPacket packet = { .data = NULL, .size = 0 }; AVFrame *frame = NULL; enum AVMediaType type; unsigned int stream_index; unsigned int i; int got_frame; int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *); char *input_str, *output_str; input_str = (*env)->GetStringUTFChars(env, input, 0); output_str = (*env)->GetStringUTFChars(env, output, 0); LOGI("input_str ~ : %s -------------------", input_str); LOGI("output_str ~ : %s -------------------", output_str); if ((input == NULL) || (output == NULL)) { LOGI("input_str or output_str is null"); return (jboolean)0; } av_register_all(); avfilter_register_all(); if ((ret = open_input_file(input_str)) < 0) { LOGI("open_input_file error"); goto end; } if ((ret = open_output_file(output_str)) < 0) { LOGI("open_output_file error"); goto end; } LOGI("init_filters ----------------"); if ((ret = init_filters()) < 0) { LOGI("init_filters error"); goto end; } /* read all packets */ LOGI("start av_read_frame ----------------"); while (1) { if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0) break; stream_index = packet.stream_index; type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type; LOGI("Demuxer gave frame of stream_index %u\n", stream_index); if (filter_ctx[stream_index].filter_graph) { av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n"); frame = av_frame_alloc(); if (!frame) { ret = AVERROR(ENOMEM); break; } av_packet_rescale_ts(&packet, ifmt_ctx->streams[stream_index]->time_base, ifmt_ctx->streams[stream_index]->codec->time_base); dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 : avcodec_decode_audio4; ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame, &got_frame, &packet); if (ret < 0) { av_frame_free(&frame); LOGI("Decoding failed\n"); break; } if (got_frame) { frame->pts = av_frame_get_best_effort_timestamp(frame); ret = filter_encode_write_frame(frame, stream_index); av_frame_free(&frame); if (ret < 0) goto end; } else { av_frame_free(&frame); } } else { /* remux this frame without reencoding */ av_packet_rescale_ts(&packet, ifmt_ctx->streams[stream_index]->time_base, ofmt_ctx->streams[stream_index]->time_base); ret = av_interleaved_write_frame(ofmt_ctx, &packet); if (ret < 0) goto end; } av_free_packet(&packet); } /* flush filters and encoders */ for (i = 0; i < ifmt_ctx->nb_streams; i++) { /* flush filter */ if (!filter_ctx[i].filter_graph) continue; ret = filter_encode_write_frame(NULL, i); if (ret < 0) { LOGI("Flushing filter failed\n"); goto end; } /* flush encoder */ ret = flush_encoder(i); if (ret < 0) { LOGI("Flushing encoder failed\n"); goto end; } } av_write_trailer(ofmt_ctx); return (jboolean)1; end: av_free_packet(&packet); av_frame_free(&frame); for (i = 0; i < ifmt_ctx->nb_streams; i++) { avcodec_close(ifmt_ctx->streams[i]->codec); if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec) avcodec_close(ofmt_ctx->streams[i]->codec); if (filter_ctx && filter_ctx[i].filter_graph) avfilter_graph_free(&filter_ctx[i].filter_graph); } av_free(filter_ctx); avformat_close_input(&ifmt_ctx); if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) avio_closep(&ofmt_ctx->pb); avformat_free_context(ofmt_ctx); /** if (ret < 0) av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret)); */ return (jboolean)0; } #ifdef __cplusplus }
int main(void) { avfilter_register_all(); vf_next_query_format(); return 0; }
int main(int argc, char **argv) { const AVFilter *filter; AVFilterContext *filter_ctx; AVFilterGraph *graph_ctx; const char *filter_name; const char *filter_args = NULL; int i; int ret = 0; av_log_set_level(AV_LOG_DEBUG); if (argc < 2) { fprintf(stderr, "Missing filter name as argument\n"); return 1; } filter_name = argv[1]; if (argc > 2) filter_args = argv[2]; /* allocate graph */ graph_ctx = avfilter_graph_alloc(); if (!graph_ctx) return 1; avfilter_register_all(); /* get a corresponding filter and open it */ if (!(filter = avfilter_get_by_name(filter_name))) { fprintf(stderr, "Unrecognized filter with name '%s'\n", filter_name); return 1; } /* open filter and add it to the graph */ if (!(filter_ctx = avfilter_graph_alloc_filter(graph_ctx, filter, filter_name))) { fprintf(stderr, "Impossible to open filter with name '%s'\n", filter_name); return 1; } if (avfilter_init_str(filter_ctx, filter_args) < 0) { fprintf(stderr, "Impossible to init filter '%s' with arguments '%s'\n", filter_name, filter_args); return 1; } /* create a link for each of the input pads */ for (i = 0; i < filter_ctx->nb_inputs; i++) { AVFilterLink *link = av_mallocz(sizeof(AVFilterLink)); if (!link) { fprintf(stderr, "Unable to allocate memory for filter input link\n"); ret = 1; goto fail; } link->type = avfilter_pad_get_type(filter_ctx->input_pads, i); filter_ctx->inputs[i] = link; } for (i = 0; i < filter_ctx->nb_outputs; i++) { AVFilterLink *link = av_mallocz(sizeof(AVFilterLink)); if (!link) { fprintf(stderr, "Unable to allocate memory for filter output link\n"); ret = 1; goto fail; } link->type = avfilter_pad_get_type(filter_ctx->output_pads, i); filter_ctx->outputs[i] = link; } if (filter->query_formats) filter->query_formats(filter_ctx); else ff_default_query_formats(filter_ctx); print_formats(filter_ctx); fail: avfilter_free(filter_ctx); avfilter_graph_free(&graph_ctx); fflush(stdout); return ret; }
int main(int argc, char **argv) { int ret; AVPacket packet0, packet; AVFrame *frame = av_frame_alloc(); AVFrame *filt_frame = av_frame_alloc(); int got_frame; if (!frame || !filt_frame) { perror("Could not allocate frame"); exit(1); } if (argc != 2) { fprintf(stderr, "Usage: %s file | %s\n", argv[0], player); exit(1); } avcodec_register_all(); av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = init_filters(filter_descr)) < 0) goto end; /* read all packets */ packet0.data = NULL; packet.data = NULL; while (1) { if (!packet0.data) { if ((ret = av_read_frame(fmt_ctx, &packet)) < 0) break; packet0 = packet; } if (packet.stream_index == audio_stream_index) { got_frame = 0; ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n"); continue; } packet.size -= ret; packet.data += ret; if (got_frame) { /* push the audio data from decoded frame into the filtergraph */ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) { av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n"); break; } /* pull filtered audio from the filtergraph */ while (1) { ret = av_buffersink_get_frame(buffersink_ctx, filt_frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; if (ret < 0) goto end; print_frame(filt_frame); av_frame_unref(filt_frame); } } if (packet.size <= 0) av_free_packet(&packet0); } else { /* discard non-wanted packets */ av_free_packet(&packet0); } } end: avfilter_graph_free(&filter_graph); avcodec_close(dec_ctx); avformat_close_input(&fmt_ctx); av_frame_free(&frame); av_frame_free(&filt_frame); if (ret < 0 && ret != AVERROR_EOF) { fprintf(stderr, "Error occurred: %s\n", av_err2str(ret)); exit(1); } exit(0); }
int main(int argc, char **argv) { int ret; AVPacket packet = { .data = NULL, .size = 0 }; AVFrame *frame = NULL; enum AVMediaType type; unsigned int stream_index; unsigned int i; int got_frame; int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *); if (argc != 3) { av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]); return 1; } av_register_all(); avfilter_register_all(); if ((ret = open_input_file(argv[1])) < 0) goto end; if ((ret = open_output_file(argv[2])) < 0) goto end; if ((ret = init_filters()) < 0) goto end; /* read all packets */ while (1) { if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0) break; stream_index = packet.stream_index; type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type; av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", stream_index); if (filter_ctx[stream_index].filter_graph) { av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n"); frame = av_frame_alloc(); if (!frame) { ret = AVERROR(ENOMEM); break; } av_packet_rescale_ts(&packet, ifmt_ctx->streams[stream_index]->time_base, ifmt_ctx->streams[stream_index]->codec->time_base); dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 : avcodec_decode_audio4; ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame, &got_frame, &packet); if (ret < 0) { av_frame_free(&frame); av_log(NULL, AV_LOG_ERROR, "Decoding failed\n"); break; } if (got_frame) { frame->pts = av_frame_get_best_effort_timestamp(frame); ret = filter_encode_write_frame(frame, stream_index); av_frame_free(&frame); if (ret < 0) goto end; } else { av_frame_free(&frame); } } else { /* remux this frame without reencoding */ av_packet_rescale_ts(&packet, ifmt_ctx->streams[stream_index]->time_base, ofmt_ctx->streams[stream_index]->time_base); ret = av_interleaved_write_frame(ofmt_ctx, &packet); if (ret < 0) goto end; } av_packet_unref(&packet); } /* flush filters and encoders */ for (i = 0; i < ifmt_ctx->nb_streams; i++) { /* flush filter */ if (!filter_ctx[i].filter_graph) continue; ret = filter_encode_write_frame(NULL, i); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n"); goto end; } /* flush encoder */ ret = flush_encoder(i); if (ret < 0) { av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n"); goto end; } } av_write_trailer(ofmt_ctx); end: av_packet_unref(&packet); av_frame_free(&frame); for (i = 0; i < ifmt_ctx->nb_streams; i++) { avcodec_close(ifmt_ctx->streams[i]->codec); if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec) avcodec_close(ofmt_ctx->streams[i]->codec); if (filter_ctx && filter_ctx[i].filter_graph) avfilter_graph_free(&filter_ctx[i].filter_graph); } av_free(filter_ctx); avformat_close_input(&ifmt_ctx); if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) avio_closep(&ofmt_ctx->pb); avformat_free_context(ofmt_ctx); if (ret < 0) av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret)); return ret ? 1 : 0; }
// // Initilaise all the software codecs // int an_init_codecs( void ) { sys_config info; av_register_all(); avfilter_register_all(); dvb_config_get( &info ); av_init_packet(&m_avpkt[ENVC]); m_avpkt[ENVC].data = NULL; m_avpkt[ENVC].size = 0; // 25 frames per sec, every 40 ms m_video_timestamp_delta = ((0.04*27000000.0)/300.0); // New audio packet sent every 24 ms m_audio_timestamp_delta = ((0.024*27000000.0)/300.0); AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_MPEG2VIDEO); if(codec != NULL) { m_pC[ENVC] = avcodec_alloc_context3(codec); m_pC[ENVC]->bit_rate = info.video_bitrate; m_pC[ENVC]->bit_rate_tolerance = info.video_bitrate/10; m_pC[ENVC]->width = m_width; m_pC[ENVC]->height = m_height; m_pC[ENVC]->gop_size = 12; m_pC[ENVC]->max_b_frames = 0; m_pC[ENVC]->me_method = 5; m_pC[ENVC]->pix_fmt = AV_PIX_FMT_YUV420P; m_pC[ENVC]->time_base = (AVRational){1,25}; m_pC[ENVC]->ticks_per_frame = 2;// MPEG2 & 4 m_pC[ENVC]->profile = FF_PROFILE_MPEG2_MAIN; m_pC[ENVC]->thread_count = 1; if(avcodec_open2(m_pC[ENVC], codec, NULL)<0) { loggerf("Unable to open MPEG2 Codec"); return -1; } } else { loggerf("MPEG2 Codec not found"); return -1; } // // Audio // av_init_packet( &m_avpkt[ENAC] ); // // Must be set to 48000, 2 chan // // Size in bytes 2 channels, 16 bits 1/25 sec codec = avcodec_find_encoder(AV_CODEC_ID_MP2); if( codec != NULL ) { m_pC[ENAC] = avcodec_alloc_context3(codec); m_pC[ENAC]->bit_rate = info.audio_bitrate; m_pC[ENAC]->bit_rate_tolerance = 0; m_pC[ENAC]->bits_per_raw_sample = 16; m_pC[ENAC]->sample_rate = 48000; m_pC[ENAC]->channels = 2; m_pC[ENAC]->sample_fmt = AV_SAMPLE_FMT_S16; m_pC[ENAC]->channel_layout = AV_CH_LAYOUT_STEREO; m_pC[ENAC]->thread_count = 1; if(avcodec_open2(m_pC[ENAC], codec, NULL)<0 ) { loggerf("Unable to open MPEG1 codec"); return -1; } // 16 bit samples & stereo so multiply by 4 m_sound_capture_buf_size = m_pC[ENAC]->frame_size*4; an_set_audio_size(); } return 0; }