OptionArrayMap getOutputFormatOptions() { OptionArrayMap optionsPerFormat; AVOutputFormat* outputFormat = av_oformat_next( NULL ); // iterate on formats while( outputFormat ) { // add only format with video track // outputFormat->audio_codec ? if( outputFormat->video_codec != AV_CODEC_ID_NONE ) { if( outputFormat->priv_class ) { const std::string outputFormatName( outputFormat->name ); OptionArray options; loadOptions( options, (void*)&outputFormat->priv_class, 0 ); optionsPerFormat.insert( std::make_pair( outputFormatName, options ) ); } } outputFormat = av_oformat_next( outputFormat ); } return optionsPerFormat; }
static const AVClass *format_child_class_next(const AVClass *prev) { AVInputFormat *ifmt = NULL; AVOutputFormat *ofmt = NULL; if (!prev) #if !FF_API_OLD_AVIO return &ffio_url_class; #else prev = (void *)&ifmt; // Dummy pointer; #endif while ((ifmt = av_iformat_next(ifmt))) if (ifmt->priv_class == prev) break; if (!ifmt) while ((ofmt = av_oformat_next(ofmt))) if (ofmt->priv_class == prev) break; if (!ofmt) while (ifmt = av_iformat_next(ifmt)) if (ifmt->priv_class) return ifmt->priv_class; while (ofmt = av_oformat_next(ofmt)) if (ofmt->priv_class) return ofmt->priv_class; return NULL; }
static const AVClass *format_child_class_next(const AVClass *prev) { AVInputFormat *ifmt = NULL; AVOutputFormat *ofmt = NULL; while (prev && (ifmt = av_iformat_next(ifmt))) if (ifmt->priv_class == prev){ prev = NULL; break; } if (!prev) while (ifmt = av_iformat_next(ifmt)) if (ifmt->priv_class) return ifmt->priv_class; while (prev && (ofmt = av_oformat_next(ofmt))) if (ofmt->priv_class == prev){ prev = NULL; break; } if (!prev) while (ofmt = av_oformat_next(ofmt)) if (ofmt->priv_class) return ofmt->priv_class; #if !FF_API_OLD_AVIO if (prev != &ffio_url_class) return &ffio_url_class; #endif return NULL; }
static const AVClass *format_child_class_next(const AVClass *prev) { AVInputFormat *ifmt = NULL; AVOutputFormat *ofmt = NULL; if (!prev) return &ffio_url_class; while ((ifmt = av_iformat_next(ifmt))) if (ifmt->priv_class == prev) break; if (!ifmt) while ((ofmt = av_oformat_next(ofmt))) if (ofmt->priv_class == prev) break; if (!ofmt) while (ifmt = av_iformat_next(ifmt)) if (ifmt->priv_class) return ifmt->priv_class; while (ofmt = av_oformat_next(ofmt)) if (ofmt->priv_class) return ofmt->priv_class; return NULL; }
VideoFFmpegWriter::VideoFFmpegWriter() : FFmpeg ( ) , FFmpegPreset ( ) , _avformatOptions ( NULL ) , _sws_context ( NULL ) , _stream ( NULL ) , _codec ( NULL ) , _ofmt ( NULL ) , _error ( IGNORE_FINISH ) , _filename ( "" ) , _width ( 0 ) , _height ( 0 ) , _aspectRatio ( 1 ) , _out_pixelFormat ( PIX_FMT_YUV420P ) , _fps ( 25.0f ) , _formatName ( "" ) , _codecName ( "" ) , _videoPresetName ( "" ) , _bitRate ( 400000 ) , _bitRateTolerance ( 4000 * 10000 ) , _gopSize ( 12 ) , _bFrames ( 0 ) , _mbDecision ( FF_MB_DECISION_SIMPLE ) { av_log_set_level( AV_LOG_WARNING ); av_register_all(); AVOutputFormat* fmt = av_oformat_next( NULL ); while( fmt ) { if( fmt->video_codec != CODEC_ID_NONE ) { if( fmt->long_name ) { _formatsLongNames.push_back( std::string( fmt->long_name ) + std::string( " (" ) + std::string( fmt->name ) + std::string( ")" ) ); _formatsShortNames.push_back( std::string( fmt->name ) ); } } fmt = av_oformat_next( fmt ); } AVCodec* c = av_codec_next( NULL ); while( c ) { #if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 53, 34, 0 ) if( c->type == AVMEDIA_TYPE_VIDEO && c->encode2 ) #else if( c->type == AVMEDIA_TYPE_VIDEO && c->encode ) #endif { if( c->long_name ) { _codecsLongNames.push_back( std::string( c->long_name ) ); _codecsShortNames.push_back( std::string( c->name ) ); } } c = av_codec_next( c ); } }
VideoFFmpegWriter::VideoFFmpegWriter() : _avformatOptions( 0 ) , _sws_context( NULL ) , _stream( 0 ) , _error( IGNORE_FINISH ) , _filename( "" ) , _width( 0 ) , _height( 0 ) , _aspectRatio( 1 ) , _out_pixelFormat( PIX_FMT_YUV420P ) , _fps( 25.0f ) , _format( "default" ) , _codec( "default" ) , _bitRate( 400000 ) , _bitRateTolerance( 4000 * 10000 ) , _gopSize( 12 ) , _bFrames( 0 ) , _mbDecision( FF_MB_DECISION_SIMPLE ) { av_log_set_level( AV_LOG_WARNING ); av_register_all(); for( int i = 0; i < CODEC_TYPE_NB; ++i ) _avctxOptions[i] = avcodec_alloc_context2( CodecType( i ) ); _formatsLongNames.push_back( std::string( "default" ) ); _formatsShortNames.push_back( std::string( "default" ) ); AVOutputFormat* fmt = av_oformat_next( NULL ); while( fmt ) { if( fmt->video_codec != CODEC_ID_NONE ) { if( fmt->long_name ) { _formatsLongNames.push_back( std::string( fmt->long_name ) + std::string( " (" ) + std::string( fmt->name ) + std::string( ")" ) ); _formatsShortNames.push_back( std::string( fmt->name ) ); } } fmt = av_oformat_next( fmt ); } _codecsLongNames.push_back( std::string( "default" ) ); _codecsShortNames.push_back( std::string( "default" ) ); AVCodec* c = av_codec_next( NULL ); while( c ) { if( c->type == CODEC_TYPE_VIDEO && c->encode ) { if( c->long_name ) { _codecsLongNames.push_back( std::string( c->long_name ) ); _codecsShortNames.push_back( std::string( c->name ) ); } } c = av_codec_next( c ); } }
int main(int argc,const char* argv[]) { av_register_all(); AVOutputFormat * oformat = av_oformat_next(NULL); while(oformat != NULL) { fprintf(stderr, "{\"%s\",\"%s\"},\n", oformat->name,oformat->extensions); oformat = av_oformat_next(oformat); } return 0; }
const QStringList& supportedInputMimeTypes() { static QStringList mimes; if (!mimes.isEmpty()) return mimes; av_register_all(); // MUST register all input/output formats AVOutputFormat *i = av_oformat_next(NULL); QStringList list; while (i) { list << QString(i->mime_type).split(QChar(','), QString::SkipEmptyParts); i = av_oformat_next(i); } foreach (const QString& v, list) { mimes.append(v.trimmed()); }
static const AVOption *opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags) { AVFormatContext *s = obj; AVInputFormat *ifmt = NULL; AVOutputFormat *ofmt = NULL; if (s->priv_data) { if ((s->iformat && !s->iformat->priv_class) || (s->oformat && !s->oformat->priv_class)) return NULL; return av_opt_find(s->priv_data, name, unit, opt_flags, search_flags); } while ((ifmt = av_iformat_next(ifmt))) { const AVOption *o; if (ifmt->priv_class && (o = av_opt_find(&ifmt->priv_class, name, unit, opt_flags, search_flags))) return o; } while ((ofmt = av_oformat_next(ofmt))) { const AVOption *o; if (ofmt->priv_class && (o = av_opt_find(&ofmt->priv_class, name, unit, opt_flags, search_flags))) return o; } return NULL; }
JNIEXPORT jlong JNICALL Java_bits_jav_format_JavOutputFormat_nNextOutputFormat (JNIEnv* env, jclass clazz, jlong pointer) { AVOutputFormat* f = *(AVOutputFormat**)&pointer; f = av_oformat_next( f ); return *(jlong*)&f; }
const struct ff_format_desc *ff_format_supported() { AVOutputFormat *output_format = NULL; struct ff_format_desc *desc = NULL; struct ff_format_desc *current = NULL; while ((output_format = av_oformat_next(output_format))) { struct ff_format_desc *d; if (is_output_device(output_format->priv_class)) continue; d = av_mallocz(sizeof(struct ff_format_desc)); d->audio_codec = output_format->audio_codec; d->video_codec = output_format->video_codec; d->name = output_format->name; d->long_name = output_format->long_name; d->mime_type = output_format->mime_type; d->extensions = output_format->extensions; d->codec_tags = output_format->codec_tag; if (current != NULL) { current->next = d; current = d; } else { desc = current = d; } } return desc; }
AVOutputFormat *av_guess_format(const char *short_name, const char *filename, const char *mime_type) { AVOutputFormat *fmt = NULL, *fmt_found; int score_max, score; /* specific test for image sequences */ #if CONFIG_IMAGE2_MUXER if (!short_name && filename && av_filename_number_test(filename) && ff_guess_image2_codec(filename) != AV_CODEC_ID_NONE) { return av_guess_format("image2", NULL, NULL); } #endif /* Find the proper file type. */ fmt_found = NULL; score_max = 0; while ((fmt = av_oformat_next(fmt))) { score = 0; if (fmt->name && short_name && av_match_name(short_name, fmt->name)) score += 100; if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type)) score += 10; if (filename && fmt->extensions && av_match_ext(filename, fmt->extensions)) { score += 5; } if (score > score_max) { score_max = score; fmt_found = fmt; } } return fmt_found; }
int opt_default(const char *opt, const char *arg){ int type; int ret= 0; const AVOption *o= NULL; int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0}; for(type=0; *avcodec_opts && type<AVMEDIA_TYPE_NB && ret>= 0; type++){ const AVOption *o2 = av_find_opt(avcodec_opts[0], opt, NULL, opt_types[type], opt_types[type]); if(o2) ret = av_set_string3(avcodec_opts[type], opt, arg, 1, &o); } if(!o && avformat_opts) ret = av_set_string3(avformat_opts, opt, arg, 1, &o); if(!o && sws_opts) ret = av_set_string3(sws_opts, opt, arg, 1, &o); if(!o){ if (opt[0] == 'a' && avcodec_opts[AVMEDIA_TYPE_AUDIO]) ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_AUDIO], opt+1, arg, 1, &o); else if(opt[0] == 'v' && avcodec_opts[AVMEDIA_TYPE_VIDEO]) ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_VIDEO], opt+1, arg, 1, &o); else if(opt[0] == 's' && avcodec_opts[AVMEDIA_TYPE_SUBTITLE]) ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], opt+1, arg, 1, &o); } if (o && ret < 0) { fprintf(stderr, "Invalid value '%s' for option '%s'\n", arg, opt); exit(1); } if (!o) { AVCodec *p = NULL; AVOutputFormat *oformat = NULL; while ((p=av_codec_next(p))){ AVClass *c= p->priv_class; if(c && av_find_opt(&c, opt, NULL, 0, 0)) break; } if (!p) { while ((oformat = av_oformat_next(oformat))) { const AVClass *c = oformat->priv_class; if (c && av_find_opt(&c, opt, NULL, 0, 0)) break; } } if(!p && !oformat){ fprintf(stderr, "Unrecognized option '%s'\n", opt); exit(1); } } // av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL)); //FIXME we should always use avcodec_opts, ... for storing options so there will not be any need to keep track of what i set over this opt_values= av_realloc(opt_values, sizeof(void*)*(opt_name_count+1)); opt_values[opt_name_count]= o ? NULL : arg; opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1)); opt_names[opt_name_count++]= o ? o->name : opt; if ((*avcodec_opts && avcodec_opts[0]->debug) || (avformat_opts && avformat_opts->debug)) av_log_set_level(AV_LOG_DEBUG); return 0; }
int32_t MuxerFormat::getNumFormats() { Global::init(); int i = 0; for(AVOutputFormat* f = 0; (f = av_oformat_next(f))!=0; ++i) ; return i; }
static const AVClass *format_child_class_next(const AVClass *prev) { AVInputFormat *ifmt = NULL; AVOutputFormat *ofmt = NULL; while (prev && (ifmt = av_iformat_next(ifmt))) if (ifmt->priv_class == prev) break; if ((prev && ifmt) || (!prev)) while (ifmt = av_iformat_next(ifmt)) if (ifmt->priv_class) return ifmt->priv_class; while (prev && (ofmt = av_oformat_next(ofmt))) if (ofmt->priv_class == prev) break; while (ofmt = av_oformat_next(ofmt)) if (ofmt->priv_class) return ofmt->priv_class; return NULL; }
QStringList MediaSink::supportedFormats() { QStringList formats; AVOutputFormat *outputFormat = NULL; while ((outputFormat = av_oformat_next(outputFormat))) { QString format(outputFormat->name); if (!formats.contains(format)) formats << format; } return formats; }
MuxerFormat* MuxerFormat::getFormat(int32_t index) { Global::init(); int i = 0; for(AVOutputFormat* f = 0; (f = av_oformat_next(f))!=0; ++i) if (i == index) { MuxerFormat * retval = MuxerFormat::make(f); return retval; } return 0; }
int opt_formats(const char *opt, const char *arg) { AVInputFormat *ifmt=NULL; AVOutputFormat *ofmt=NULL; const char *last_name; printf( "File formats:\n" " D. = Demuxing supported\n" " .E = Muxing supported\n" " --\n"); last_name= "000"; for(;;){ int decode=0; int encode=0; const char *name=NULL; const char *long_name=NULL; while((ofmt= av_oformat_next(ofmt))) { if((name == NULL || strcmp(ofmt->name, name)<0) && strcmp(ofmt->name, last_name)>0){ name= ofmt->name; long_name= ofmt->long_name; encode=1; } } while((ifmt= av_iformat_next(ifmt))) { if((name == NULL || strcmp(ifmt->name, name)<0) && strcmp(ifmt->name, last_name)>0){ name= ifmt->name; long_name= ifmt->long_name; encode=0; } if(name && strcmp(ifmt->name, name)==0) decode=1; } if(name==NULL) break; last_name= name; printf( " %s%s %-15s %s\n", decode ? "D":" ", encode ? "E":" ", name, long_name ? long_name:" "); } return 0; }
static void getFFmpegOutputFormats(QStringList* formats, QStringList* extensions) { static QStringList exts; static QStringList fmts; if (exts.isEmpty() && fmts.isEmpty()) { av_register_all(); // MUST register all input/output formats AVOutputFormat *o = NULL; QStringList e, f; while ((o = av_oformat_next(o))) { if (o->extensions) e << QString::fromLatin1(o->extensions).split(QLatin1Char(','), QString::SkipEmptyParts); if (o->name) f << QString::fromLatin1(o->name).split(QLatin1Char(','), QString::SkipEmptyParts); } foreach (const QString& v, e) { exts.append(v.trimmed()); }
NamesArray getFormatsNames() { NamesArray formatsNames; AVOutputFormat* fmt = NULL; while( ( fmt = av_oformat_next( fmt ) ) ) { // skip undefined codec if( fmt->video_codec == AV_CODEC_ID_NONE ) continue; if( ! fmt->name && ! fmt->long_name ) continue; formatsNames.push_back( std::make_pair( std::string( fmt->name ? fmt->name : "" ), std::string( fmt->long_name ? fmt->long_name : "" ) ) ); } return formatsNames; }
static void *av_device_next(void *prev, int output, AVClassCategory c1, AVClassCategory c2) { const AVClass *pc; AVClassCategory category = AV_CLASS_CATEGORY_NA; do { if (output) { if (!(prev = av_oformat_next(prev))) break; pc = ((AVOutputFormat *)prev)->priv_class; } else { if (!(prev = av_iformat_next(prev))) break; pc = ((AVInputFormat *)prev)->priv_class; } if (!pc) continue; category = pc->category; } while (category != c1 && category != c2); return prev; }
/** * com.leixiaohua1020.sffmpegandroidhelloworld.MainActivity.avformatinfo() * AVFormat Support Information */ JNIEXPORT jstring Java_com_leixiaohua1020_sffmpegandroidhelloworld_MainActivity_avformatinfo(JNIEnv *env, jobject obj){ char info[40000] = { 0 }; av_register_all(); AVInputFormat *if_temp = av_iformat_next(NULL); AVOutputFormat *of_temp = av_oformat_next(NULL); //Input while(if_temp!=NULL){ sprintf(info, "%s[In ][%10s]\n", info, if_temp->name); if_temp=if_temp->next; } //Output while (of_temp != NULL){ sprintf(info, "%s[Out][%10s]\n", info, of_temp->name); of_temp = of_temp->next; } //LOGE("%s", info); return (*env)->NewStringUTF(env, info); }
JNIEXPORT jstring JNICALL Java_com_pl_asndktest_jni_NdkTest_avformatinfo (JNIEnv * env, jobject thiz) { char info[40000] = { 0 }; av_register_all(); AVInputFormat *if_temp = av_iformat_next(NULL); AVOutputFormat *of_temp = av_oformat_next(NULL); //Input while(if_temp!=NULL){ sprintf(info, "%s[In ][%10s]\n", info, if_temp->name); if_temp=if_temp->next; } //Output while (of_temp != NULL){ sprintf(info, "%s[Out][%10s]\n", info, of_temp->name); of_temp = of_temp->next; } //LOGE("%s", info); return (*env)->NewStringUTF(env, info); }
static mlt_properties avformat_metadata( mlt_service_type type, const char *id, void *data ) { char file[ PATH_MAX ]; const char *service_type = NULL; mlt_properties result = NULL; // Convert the service type to a string. switch ( type ) { case consumer_type: service_type = "consumer"; break; case filter_type: service_type = "filter"; break; case producer_type: service_type = "producer"; break; case transition_type: service_type = "transition"; break; default: return NULL; } // Load the yaml file snprintf( file, PATH_MAX, "%s/avformat/%s_%s.yml", mlt_environment( "MLT_DATA" ), service_type, id ); result = mlt_properties_parse_yaml( file ); if ( result && ( type == consumer_type || type == producer_type ) ) { // Annotate the yaml properties with AVOptions. mlt_properties params = (mlt_properties) mlt_properties_get_data( result, "parameters", NULL ); AVFormatContext *avformat = avformat_alloc_context(); #if LIBAVCODEC_VERSION_INT > ((53<<16)+(8<<8)+0) AVCodecContext *avcodec = avcodec_alloc_context3( NULL ); #else AVCodecContext *avcodec = avcodec_alloc_context(); #endif int flags = ( type == consumer_type )? AV_OPT_FLAG_ENCODING_PARAM : AV_OPT_FLAG_DECODING_PARAM; add_parameters( params, avformat, flags, NULL, NULL ); #if LIBAVFORMAT_VERSION_MAJOR >= 53 avformat_init(); if ( type == producer_type ) { AVInputFormat *f = NULL; while ( ( f = av_iformat_next( f ) ) ) if ( f->priv_class ) add_parameters( params, &f->priv_class, flags, NULL, f->name ); } else { AVOutputFormat *f = NULL; while ( ( f = av_oformat_next( f ) ) ) if ( f->priv_class ) add_parameters( params, &f->priv_class, flags, NULL, f->name ); } #endif add_parameters( params, avcodec, flags, NULL, NULL ); #if LIBAVCODEC_VERSION_MAJOR >= 53 AVCodec *c = NULL; while ( ( c = av_codec_next( c ) ) ) if ( c->priv_class ) add_parameters( params, &c->priv_class, flags, NULL, c->name ); #endif av_free( avformat ); av_free( avcodec ); } return result; }
// ###################################################################### FfmpegEncoder::FfmpegEncoder(const std::string& fname, const std::string& codecname, const int bitrate, const int framerate, const int frameratebase, const Dims& dims, const int bufsz, const bool useFormatContext) : itsFile(0), itsContext(), itsFormatContext(0), itsFrameNumber(0), itsOutbufSize(bufsz), itsFrameSizeRange(), itsUseFormatContext(useFormatContext) { GVX_TRACE(__PRETTY_FUNCTION__); // no need to guard these functions for being called multiple times; // they all have internal guards av_register_all(); avcodec_init(); avcodec_register_all(); AVOutputFormat* oformat = NULL; #if LIBAVCODEC_VERSION_MAJOR >= 53 && LIBAVCODEC_VERSION_MINOR >= 21 if (codecname.compare("List") == 0) { // list available codecs LINFO("##### Available output codecs (not all may work for video):"); AVOutputFormat* f = av_oformat_next(NULL); while(f) { LINFO("%s: %s %d", f->name, f->long_name, f->flags); f = av_oformat_next(f); } LFATAL("Please select a codec from this list"); } else { // format is given // no av_find_output_format()?? let's do it by hand... AVOutputFormat* f = av_oformat_next(NULL); while(f) { if (codecname.compare(f->name) == 0) { oformat = f; break; } f = av_oformat_next(f); } } #else if (codecname.compare("List") == 0) { // list available codecs LINFO("##### Available output codecs (not all may work for video):"); for(AVOutputFormat* f = first_oformat; f != NULL; f = f->next) LINFO("%s: %s %d", f->name, f->long_name, f->flags); LFATAL("Please select a codec from this list"); } else { // format is given // no av_find_output_format()?? let's do it by hand... for(AVOutputFormat* f = first_oformat; f != NULL; f = f->next) if (codecname.compare(f->name) == 0) { oformat = f; break; } } #endif if (oformat == 0) LFATAL("No such video codec '%s';\n" "try re-running with --output-codec=List to see a list\n" "of available codecs", codecname.c_str()); char ext[100]; ext[0] = '.'; uint i; for (i = 0; i < strlen(oformat->extensions); i ++) if (oformat->extensions[i] == ',') break; else ext[i+1] = oformat->extensions[i]; ext[i+1] = '\0'; LINFO("Using output format '%s' (%s), extension %s", oformat->name, oformat->long_name, ext); std::string oname(fname); std::string::size_type idx1 = oname.rfind('/', oname.npos); std::string::size_type idx2 = oname.rfind('.', oname.npos); // must check that idx2 is valid; otherwise if we do // oname.erase(idx2) with e.g. idx2==npos then we will get a // std::out_of_range exception if (idx2 < oname.size() && idx2 > idx1) oname.erase(idx2, oname.npos); oname.append(ext); LINFO("Output file: %s", oname.c_str()); if (itsUseFormatContext) { #ifdef INVT_FFMPEG_HAS_FORMATCONTEXT_FUNCTIONS LINFO("Using FormatContext to output data"); #ifdef AVMEDIA_TYPE_VIDEO itsFormatContext = avformat_alloc_context(); #else itsFormatContext = av_alloc_format_context(); #endif if (!itsFormatContext) LFATAL("Cannot allocate format context"); itsFormatContext->oformat = oformat; itsAVStream = av_new_stream(itsFormatContext, 0); if (!itsAVStream) LFATAL("Can not allocate AVStream"); #else LFATAL("Need a new version of ffmpeg libs for this option"); itsFormatContext = NULL; #endif } AVCodec* const codec = avcodec_find_encoder(oformat->video_codec); if (codec == NULL) LFATAL("codec not found"); #if defined(INVT_FFMPEG_HAS_DEFAULTS_FUNCTIONS) avcodec_get_context_defaults(&itsContext); #else { AVCodecContext* const tmp = avcodec_alloc_context(); memcpy(&itsContext, tmp, sizeof(AVCodecContext)); free(tmp); } #endif itsContext.bit_rate = bitrate; // Be sure to set itsContext.pix_fmt -- it may occasionally // appear to work to leave pix_fmt unset, because the value we want, // PIX_FMT_YUV420P, has the enum value of 0, so if the uninitialized // memory for pix_fmt happens to have the value 0, then we'll slip // through without setting it explicitly. itsContext.pix_fmt = PIX_FMT_YUV420P; /* resolution must be a multiple of two */ itsContext.width = dims.w(); itsContext.height = dims.h(); #if defined(INVT_FFMPEG_AVCODECCONTEXT_HAS_TIME_BASE) AVRational time_base = { frameratebase, framerate }; itsContext.time_base = time_base; const int frb = frameratebase; #elif LIBAVCODEC_VERSION_INT >= 0x000406 && LIBAVCODEC_BUILD > 4665 itsContext.frame_rate = framerate; const int frb = frameratebase; itsContext.frame_rate_base = frb; #else itsContext.frame_rate = framerate; const int frb = FRAME_RATE_BASE; #endif itsContext.gop_size = 10; /* emit one intra frame every ten frames */ if(codec->id != CODEC_ID_MPEG4 && codec->id != CODEC_ID_MPEG1VIDEO && codec->id != CODEC_ID_MPEG2VIDEO) itsContext.max_b_frames = 0; else itsContext.max_b_frames = 1; itsFrameNumber = 0; LINFO("using max_b_frames=%i bitrate=%u width=%u height=%u framerate=%u frameratebase=%u", itsContext.max_b_frames, itsContext.bit_rate, itsContext.width, itsContext.height, framerate, frb); if (avcodec_open(&itsContext, codec) < 0) LFATAL("could not open codec\n"); if (itsUseFormatContext) { #ifdef INVT_FFMPEG_HAS_FORMATCONTEXT_FUNCTIONS AVCodecContext *c = itsAVStream->codec; c->codec_id = itsContext.codec_id; #ifdef CODEC_TYPE_VIDEO c->codec_type = CODEC_TYPE_VIDEO; #else #ifdef AVMEDIA_TYPE_VIDEO c->codec_type = AVMEDIA_TYPE_VIDEO; #endif #endif /* put sample parameters */ c->bit_rate = itsContext.bit_rate; /* resolution must be a multiple of two */ c->width = itsContext.width; c->height = itsContext.height; /* time base: this is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented. for fixed-fps content, timebase should be 1/framerate and timestamp increments should be identically 1. */ #if defined(INVT_FFMPEG_AVCODECCONTEXT_HAS_TIME_BASE) c->time_base.den = itsContext.time_base.den; c->time_base.num = itsContext.time_base.num; #endif c->gop_size = 12; /* emit one intra frame every twelve frames at most */ c->pix_fmt = itsContext.pix_fmt; /* set the output parameters (must be done even if no parameters). */ if (av_set_parameters(itsFormatContext, NULL) < 0) LFATAL("Invalid output format parameters"); #if defined(INVT_FFMPEG_URL_OPEN_FUNC_TAKES_SINGLE_POINTER) #if defined(INVT_FFMPEG_AVFORMATCONTEXT_BYTEIO_ISPOINTER) if (url_fopen(itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str()); #else if (url_fopen(&itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str()); #endif #else #if defined(INVT_FFMPEG_AVFORMATCONTEXT_BYTEIO_ISPOINTER) if (url_fopen(&itsFormatContext->pb, oname.c_str(), URL_WRONLY) < 0) LFATAL("Could not open '%s'", oname.c_str()); #else LFATAL("Could not open '%s' ffmpeg version mismatch", oname.c_str()); #endif #endif //INVT_FFMPEG_URL_OPEN_FUNC_TAKES_SINGLE_POINTER) /* write the stream header, if any */ av_write_header(itsFormatContext); #else LFATAL("Need a new version of FFMPEG for this option"); #endif } else { itsFile = fopen(oname.c_str(), "w"); if (itsFile==NULL) LFATAL("could not open file! %s", oname.c_str()); } LINFO("EnCoder Inited"); }
PageOutput::PageOutput(MainWindow* main_window) : QWidget(main_window->centralWidget()) { m_main_window = main_window; m_old_container = (enum_container) 0; m_old_container_av = 0; // main codecs // (initializer lists should use explicit types for Clang) m_containers = { ContainerData({"Matroska (MKV)", "matroska", QStringList({"mkv"}), tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg("Matroska") + " (*.mkv)", {VIDEO_CODEC_H264, VIDEO_CODEC_VP8, VIDEO_CODEC_THEORA}, {AUDIO_CODEC_VORBIS, AUDIO_CODEC_MP3, AUDIO_CODEC_AAC, AUDIO_CODEC_UNCOMPRESSED}}), ContainerData({"MP4", "mp4", QStringList({"mp4"}), tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg("MP4") + " (*.mp4)", {VIDEO_CODEC_H264}, {AUDIO_CODEC_VORBIS, AUDIO_CODEC_MP3, AUDIO_CODEC_AAC}}), ContainerData({"WebM", "webm", QStringList({"webm"}), tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg("WebM") + " (*.webm)", {VIDEO_CODEC_VP8}, {AUDIO_CODEC_VORBIS}}), ContainerData({"OGG", "ogg", QStringList({"ogg"}), tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg("OGG") + " (*.ogg)", {VIDEO_CODEC_THEORA}, {AUDIO_CODEC_VORBIS}}), ContainerData({tr("Other..."), "other", QStringList(), "", std::set<enum_video_codec>({}), std::set<enum_audio_codec>({})}), }; m_video_codecs = { {"H.264" , "libx264" }, {"VP8" , "libvpx" }, {"Theora" , "libtheora"}, {tr("Other..."), "other" }, }; m_audio_codecs = { {"Vorbis" , "libvorbis" }, {"MP3" , "libmp3lame" }, {"AAC" , "libvo_aacenc"}, {tr("Uncompressed"), "pcm_s16le" }, {tr("Other...") , "other" }, }; // alternative aac codec if(!AVCodecIsInstalled(m_audio_codecs[AUDIO_CODEC_AAC].avname)) { m_audio_codecs[AUDIO_CODEC_AAC].avname = "aac"; } // load AV container list m_containers_av.clear(); for(AVOutputFormat *format = av_oformat_next(NULL); format != NULL; format = av_oformat_next(format)) { if(format->video_codec == AV_CODEC_ID_NONE) continue; ContainerData c; c.name = format->long_name; c.avname = format->name; c.suffixes = QString(format->extensions).split(',', QString::SkipEmptyParts); if(c.suffixes.isEmpty()) { c.filter = ""; } else { c.filter = tr("%1 files", "This appears in the file dialog, e.g. 'MP4 files'").arg(c.avname) + " (*." + c.suffixes[0]; for(int i = 1; i < c.suffixes.size(); ++i) { c.suffixes[i] = c.suffixes[i].trimmed(); // needed because libav/ffmpeg isn't very consistent when they say 'comma-separated' c.filter += " *." + c.suffixes[i]; } c.filter += ")"; } m_containers_av.push_back(c); } std::sort(m_containers_av.begin(), m_containers_av.end()); // load AV codec list m_video_codecs_av.clear(); m_audio_codecs_av.clear(); for(AVCodec *codec = av_codec_next(NULL); codec != NULL; codec = av_codec_next(codec)) { if(!av_codec_is_encoder(codec)) continue; if(codec->type == AVMEDIA_TYPE_VIDEO && VideoEncoder::AVCodecIsSupported(codec->name)) { VideoCodecData c; c.name = codec->long_name; c.avname = codec->name; m_video_codecs_av.push_back(c); } if(codec->type == AVMEDIA_TYPE_AUDIO && AudioEncoder::AVCodecIsSupported(codec->name)) { AudioCodecData c; c.name = codec->long_name; c.avname = codec->name; m_audio_codecs_av.push_back(c); } } std::sort(m_video_codecs_av.begin(), m_video_codecs_av.end()); std::sort(m_audio_codecs_av.begin(), m_audio_codecs_av.end()); if(m_containers_av.empty()) { Logger::LogError("[PageOutput::PageOutput] " + tr("Error: Could not find any suitable container in libavformat!")); throw LibavException(); } if(m_video_codecs_av.empty()) { Logger::LogError("[PageOutput::PageOutput] " + tr("Error: Could not find any suitable video codec in libavcodec!")); throw LibavException(); } if(m_audio_codecs_av.empty()) { Logger::LogError("[PageOutput::PageOutput] " + tr("Error: Could not find any suitable audio codec in libavcodec!")); throw LibavException(); } m_profile_box = new ProfileBox(this, "output-profiles", &LoadProfileSettingsCallback, &SaveProfileSettingsCallback, this); QGroupBox *groupbox_file = new QGroupBox(tr("File"), this); { QLabel *label_file = new QLabel(tr("Save as:"), groupbox_file); m_lineedit_file = new QLineEdit(groupbox_file); m_lineedit_file->setToolTip(tr("The recording will be saved to this location.")); QPushButton *button_browse = new QPushButton(tr("Browse..."), groupbox_file); m_checkbox_separate_files = new QCheckBox(tr("Separate file per segment"), groupbox_file); m_checkbox_separate_files->setToolTip(tr("If checked, a separate video file will be created every time you pause and resume the recording.\n" "If the original file name is 'test.mkv', the segments will be saved as 'test-YYYY-MM-DD_HH.MM.SS.mkv'.")); QLabel *label_container = new QLabel(tr("Container:"), groupbox_file); m_combobox_container = new QComboBox(groupbox_file); for(unsigned int i = 0; i < CONTAINER_COUNT; ++i) { QString name = "\u200e" + m_containers[i].name + "\u200e"; if(i != CONTAINER_OTHER && !AVFormatIsInstalled(m_containers[i].avname)) name += " \u200e" + tr("(not installed)") + "\u200e"; m_combobox_container->addItem(name); } m_combobox_container->setToolTip(tr("The container (file format) that will be used to save the recording.\n" "Note that not all codecs are supported by all containers, and that not all media players can read all file formats.\n" "- Matroska (MKV) supports all the codecs, but is less well-known.\n" "- MP4 is the most well-known format and will play on almost any modern media player, but supports only H.264 video\n" " (and many media players only support AAC audio).\n" "- WebM is intended for embedding video into websites (with the HTML5 <video> tag). The format was created by Google.\n" " WebM is supported by default in Firefox, Chrome and Opera, and plugins are available for Internet Explorer and Safari.\n" " It supports only VP8 and Vorbis.\n" "- OGG supports only Theora and Vorbis.")); m_label_container_av = new QLabel(tr("Container name:"), groupbox_file); m_combobox_container_av = new QComboBox(groupbox_file); for(unsigned int i = 0; i < m_containers_av.size(); ++i) { ContainerData &c = m_containers_av[i]; m_combobox_container_av->addItem(c.avname); } m_combobox_container_av->setToolTip(tr("For advanced users. You can use any libav/ffmpeg format, but many of them are not useful or may not work.")); connect(m_combobox_container, SIGNAL(activated(int)), this, SLOT(OnUpdateSuffixAndContainerFields())); connect(m_combobox_container_av, SIGNAL(activated(int)), this, SLOT(OnUpdateSuffixAndContainerFields())); connect(button_browse, SIGNAL(clicked()), this, SLOT(OnBrowse())); QGridLayout *layout = new QGridLayout(groupbox_file); layout->addWidget(label_file, 0, 0); layout->addWidget(m_lineedit_file, 0, 1); layout->addWidget(button_browse, 0, 2); layout->addWidget(m_checkbox_separate_files, 1, 0, 1, 3); layout->addWidget(label_container, 2, 0); layout->addWidget(m_combobox_container, 2, 1, 1, 2); layout->addWidget(m_label_container_av, 3, 0); layout->addWidget(m_combobox_container_av, 3, 1, 1, 2); } QGroupBox *groupbox_video = new QGroupBox(tr("Video"), this); { QLabel *label_video_codec = new QLabel(tr("Codec:"), groupbox_video); m_combobox_video_codec = new QComboBox(groupbox_video); for(unsigned int i = 0; i < VIDEO_CODEC_COUNT; ++i) { m_combobox_video_codec->addItem(m_video_codecs[i].name); } m_combobox_video_codec->setToolTip(tr("The codec that will be used to compress the video stream.\n" "- H.264 (libx264) is by far the best codec - high quality and very fast.\n" "- VP8 (libvpx) is quite good but also quite slow.\n" "- Theora (libtheora) isn't really recommended because the quality isn't very good.")); m_label_video_codec_av = new QLabel(tr("Codec name:"), groupbox_video); m_combobox_video_codec_av = new QComboBox(groupbox_video); for(unsigned int i = 0; i < m_video_codecs_av.size(); ++i) { VideoCodecData &c = m_video_codecs_av[i]; m_combobox_video_codec_av->addItem(c.avname); } m_combobox_video_codec_av->setToolTip(tr("For advanced users. You can use any libav/ffmpeg video codec, but many of them are not useful or may not work.")); m_label_video_kbit_rate = new QLabel(tr("Bit rate (in kbps):"), groupbox_video); m_lineedit_video_kbit_rate = new QLineEdit(groupbox_video); m_lineedit_video_kbit_rate->setToolTip(tr("The video bit rate (in kilobit per second). A higher value means a higher quality." "\nIf you have no idea where to start, try 5000 and change it if needed.")); m_label_h264_crf = new QLabel(tr("Constant rate factor:", "libx264 setting: don't translate this unless you can come up with something sensible"), groupbox_video); m_slider_h264_crf = new QSlider(Qt::Horizontal, groupbox_video); m_slider_h264_crf->setRange(0, 51); m_slider_h264_crf->setSingleStep(1); m_slider_h264_crf->setPageStep(5); m_slider_h264_crf->setToolTip(tr("This setting changes the video quality. A lower value means a higher quality.\n" "The allowed range is 0-51 (0 means lossless, the default is 23).")); m_label_h264_crf_value = new QLabel(groupbox_video); m_label_h264_crf_value->setNum(m_slider_h264_crf->value()); m_label_h264_crf_value->setAlignment(Qt::AlignRight | Qt::AlignVCenter); m_label_h264_crf_value->setMinimumWidth(QFontMetrics(m_label_h264_crf_value->font()).width("99") + 2); m_label_h264_preset = new QLabel(tr("Preset:", "libx264 setting: don't translate this unless you can come up with something sensible"), groupbox_video); m_combobox_h264_preset = new QComboBox(groupbox_video); for(unsigned int i = 0; i < H264_PRESET_COUNT; ++i) { m_combobox_h264_preset->addItem(EnumToString((enum_h264_preset) i)); } m_combobox_h264_preset->setToolTip(tr("The encoding speed. A higher speed uses less CPU (making higher recording frame rates possible),\n" "but results in larger files. The quality shouldn't be affected too much.")); m_label_vp8_cpu_used = new QLabel(tr("CPU used:", "libvpx setting: don't translate this unless you can come up with something sensible"), groupbox_video); m_combobox_vp8_cpu_used = new QComboBox(groupbox_video); m_combobox_vp8_cpu_used->addItem("5 (" + tr("fastest") + ")"); m_combobox_vp8_cpu_used->addItem("4"); m_combobox_vp8_cpu_used->addItem("3"); m_combobox_vp8_cpu_used->addItem("2"); m_combobox_vp8_cpu_used->addItem("1"); m_combobox_vp8_cpu_used->addItem("0 (" + tr("slowest") + ")"); m_combobox_vp8_cpu_used->setToolTip(tr("The encoding speed. A higher value uses *less* CPU time. (I didn't choose the name, this is the name\n" "used by the VP8 encoder). Higher values result in lower quality video, unless you increase the bit rate too.")); m_label_video_options = new QLabel(tr("Custom options:"), groupbox_video); m_lineedit_video_options = new QLineEdit(groupbox_video); m_lineedit_video_options->setToolTip(tr("Custom codec options separated by commas (e.g. option1=value1,option2=value2,option3=value3)")); m_checkbox_video_allow_frame_skipping = new QCheckBox(tr("Allow frame skipping"), groupbox_video); m_checkbox_video_allow_frame_skipping->setToolTip(tr("If checked, the video encoder will be allowed to skip frames if the input frame rate is\n" "lower than the output frame rate. If not checked, input frames will be duplicated to fill the holes.\n" "This increases the file size and CPU usage, but reduces the latency for live streams in some cases.\n" "It shouldn't affect the appearance of the video.")); connect(m_combobox_video_codec, SIGNAL(activated(int)), this, SLOT(OnUpdateVideoCodecFields())); connect(m_slider_h264_crf, SIGNAL(valueChanged(int)), m_label_h264_crf_value, SLOT(setNum(int))); QGridLayout *layout = new QGridLayout(groupbox_video); layout->addWidget(label_video_codec, 0, 0); layout->addWidget(m_combobox_video_codec, 0, 1, 1, 2); layout->addWidget(m_label_video_codec_av, 1, 0); layout->addWidget(m_combobox_video_codec_av, 1, 1, 1, 2); layout->addWidget(m_label_video_kbit_rate, 2, 0); layout->addWidget(m_lineedit_video_kbit_rate, 2, 1, 1, 2); layout->addWidget(m_label_h264_crf, 3, 0); layout->addWidget(m_slider_h264_crf, 3, 1); layout->addWidget(m_label_h264_crf_value, 3, 2); layout->addWidget(m_label_h264_preset, 4, 0); layout->addWidget(m_combobox_h264_preset, 4, 1, 1, 2); layout->addWidget(m_label_vp8_cpu_used, 5, 0); layout->addWidget(m_combobox_vp8_cpu_used, 5, 1, 1, 2); layout->addWidget(m_label_video_options, 6, 0); layout->addWidget(m_lineedit_video_options, 6, 1, 1, 2); layout->addWidget(m_checkbox_video_allow_frame_skipping, 7, 0, 1, 3); } m_groupbox_audio = new QGroupBox(tr("Audio")); { QLabel *label_audio_codec = new QLabel(tr("Codec:"), m_groupbox_audio); m_combobox_audio_codec = new QComboBox(m_groupbox_audio); for(unsigned int i = 0; i < AUDIO_CODEC_COUNT; ++i) { m_combobox_audio_codec->addItem(m_audio_codecs[i].name); } m_combobox_audio_codec->setToolTip(tr("The codec that will be used to compress the audio stream. You shouldn't worry too much about\n" "this, because the size of the audio data is usually negligible compared to the size of the video data.\n" "And if you're only recording your own voice (i.e. no music), the quality won't matter that much anyway.\n" "- Vorbis (libvorbis) is great, this is the recommended codec.\n" "- MP3 (libmp3lame) is reasonably good.\n" "- AAC is a good codec, but the implementations used here (libvo_aacenc or the experimental ffmpeg aac encoder)\n" " are pretty bad. Only use it if you have no other choice.\n" "- Uncompressed will simply store the sound data without compressing it. The file will be quite large, but it's very fast.")); m_label_audio_codec_av = new QLabel(tr("Codec name:"), m_groupbox_audio); m_combobox_audio_codec_av = new QComboBox(m_groupbox_audio); for(unsigned int i = 0; i < m_audio_codecs_av.size(); ++i) { AudioCodecData &c = m_audio_codecs_av[i]; m_combobox_audio_codec_av->addItem(c.avname); } m_combobox_audio_codec_av->setToolTip(tr("For advanced users. You can use any libav/ffmpeg audio codec, but many of them are not useful or may not work.")); m_label_audio_kbit_rate = new QLabel(tr("Bit rate (in kbps):"), m_groupbox_audio); m_lineedit_audio_kbit_rate = new QLineEdit(m_groupbox_audio); m_lineedit_audio_kbit_rate->setToolTip(tr("The audio bit rate (in kilobit per second). A higher value means a higher quality. The typical value is 128.")); m_label_audio_options = new QLabel(tr("Custom options:"), m_groupbox_audio); m_lineedit_audio_options = new QLineEdit(m_groupbox_audio); m_lineedit_audio_options->setToolTip(tr("Custom codec options separated by commas (e.g. option1=value1,option2=value2,option3=value3)")); connect(m_combobox_audio_codec, SIGNAL(activated(int)), this, SLOT(OnUpdateAudioCodecFields())); QGridLayout *layout = new QGridLayout(m_groupbox_audio); layout->addWidget(label_audio_codec, 0, 0); layout->addWidget(m_combobox_audio_codec, 0, 1); layout->addWidget(m_label_audio_codec_av, 1, 0); layout->addWidget(m_combobox_audio_codec_av, 1, 1); layout->addWidget(m_label_audio_kbit_rate, 2, 0); layout->addWidget(m_lineedit_audio_kbit_rate, 2, 1); layout->addWidget(m_label_audio_options, 3, 0); layout->addWidget(m_lineedit_audio_options, 3, 1); } QPushButton *button_back = new QPushButton(g_icon_go_previous, tr("Back"), this); QPushButton *button_continue = new QPushButton(g_icon_go_next, tr("Continue"), this); connect(button_back, SIGNAL(clicked()), m_main_window, SLOT(GoPageInput())); connect(button_continue, SIGNAL(clicked()), this, SLOT(OnContinue())); QVBoxLayout *layout = new QVBoxLayout(this); layout->addWidget(m_profile_box); layout->addWidget(groupbox_file); layout->addWidget(groupbox_video); layout->addWidget(m_groupbox_audio); layout->addStretch(); { QHBoxLayout *layout2 = new QHBoxLayout(); layout->addLayout(layout2); layout2->addWidget(button_back); layout2->addWidget(button_continue); } OnUpdateContainerFields(); OnUpdateVideoCodecFields(); OnUpdateAudioCodecFields(); }
static void list_formats(void) { AVOutputFormat *fmt; mp_msg(MSGT_DEMUX, MSGL_INFO, "Available lavf output formats:\n"); for (fmt = av_oformat_next(NULL); fmt; fmt = av_oformat_next(fmt)) mp_msg(MSGT_DEMUX, MSGL_INFO, "%15s : %s\n", fmt->name, fmt->long_name); }
int main(int argc, char *argv[]) { AVFormatContext *ic; char *iname; char *oname; char *size; int err; int vidindex; int i, j; OMX_ERRORTYPE oerr; OMX_HANDLETYPE m2 = NULL, m4 = NULL, resize = NULL; OMX_VIDEO_PARAM_PORTFORMATTYPE *pfmt; OMX_PORT_PARAM_TYPE *porttype; OMX_PARAM_PORTDEFINITIONTYPE *portdef; OMX_BUFFERHEADERTYPE *decbufs; OMX_VIDEO_PORTDEFINITIONTYPE *viddef; OMX_VIDEO_PARAM_PROFILELEVELTYPE *level; int decportidx = 200; int resizeportidx = 60; int encportidx = 130; int fd; time_t start, end; int offset; AVPacket *p, *rp; int ish264; int filtertest; int opt; ILCLIENT_T *client; if (argc < 3) usage(argv[0]); ctx.bitrate = 2*1024*1024; ctx.verbose = 0; ctx.width = 0; ctx.height = 0; while ((opt = getopt(argc, argv, ":b:vs:")) != -1) { switch (opt) { case 'b': //bitrate ctx.bitrate = atoi(optarg); printf("bitrate = %d\n", ctx.bitrate); break; case 'v': //verbose` ctx.verbose = 1; break; case 's': //WxH ctx.width = atoi(optarg); if ((atoi(optarg) % 16) != 0) { printf("W = %d is not a multiple of 16\n", ctx.width); usage(argv[0]); } if (ctx.width <16 || ctx.width > 1080) { printf("W = %d should be between 16 and 1080\n", ctx.width); usage(argv[0]); } printf("W = %d\n", ctx.width); if ((size = strchr(optarg, 'x')) == NULL) { printf("wrong size parameter (no 'x') exiting\n"); usage(argv[0]); } ctx.height = atoi(size+1); if ((atoi(size+1) % 16) != 0) { printf("H = %d is not a multiple of 16\n", ctx.height); usage(argv[0]); } if (ctx.height <16 || ctx.height > 1080) { printf("H = %d should be between 16 and 1080\n", ctx.height); usage(argv[0]); } printf("H = %d\n", ctx.height); break; case '?': usage(argv[0]); } } if ((client = ilclient_init()) == NULL) return -2; iname = argv[optind++]; oname = argv[optind++]; MAKEME(porttype, OMX_PORT_PARAM_TYPE); MAKEME(portdef, OMX_PARAM_PORTDEFINITIONTYPE); MAKEME(pfmt, OMX_VIDEO_PARAM_PORTFORMATTYPE); av_register_all(); ic = NULL; ish264 = 0; pthread_mutex_init(&ctx.lock, NULL); #if 0 fmt = av_oformat_next(fmt); while (fmt) { printf("Found '%s'\t\t'%s'\n", fmt->name, fmt->long_name); fmt = av_oformat_next(fmt); } #endif /* Input init: */ if ((err = avformat_open_input(&ic, iname, NULL, NULL) != 0)) { fprintf(stderr, "Failed to open '%s': %s\n", iname, strerror(err)); exit(1); } ctx.ic = ic; if (avformat_find_stream_info(ic, NULL) < 0) { fprintf(stderr, "Failed to find streams in '%s'\n", iname); exit(1); } av_dump_format(ic, 0, iname, 0); vidindex = av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0); if (vidindex < 0) { fprintf(stderr, "Failed to find a video stream in '%s'\n", iname); exit(1); } printf("Found a video at index %d\n", vidindex); printf("Frame size: %dx%d\n", ic->streams[vidindex]->codec->width, ic->streams[vidindex]->codec->height); ish264 = (ic->streams[vidindex]->codec->codec_id == CODEC_ID_H264); /* Output init: */ ctx.fd = fd = open(oname, O_CREAT | O_LARGEFILE | O_WRONLY | O_TRUNC, 0666); printf("File descriptor %d\n", fd); #if 0 avformat_alloc_output_context(&oc, NULL, /*NULL,*/ oname); if (!oc) { printf("Couldn't determine output from '%s'; using MPEG.\n", oname); avformat_alloc_output_context(&oc, NULL, /*"matroska",*/ oname); } #endif // if (!oc) // exit(1); // fmt = oc->oformat; for (i = 0; i < ic->nb_streams; i++) { printf("Found stream %d, context %p\n", ic->streams[i]->index, ic->streams[i]->codec); } bcm_host_init(); OERR(OMX_Init(), ctx.verbose); OERR(OMX_GetHandle(&m2, DECNAME, &ctx, &decevents), ctx.verbose); OERR(OMX_GetHandle(&m4, ENCNAME, &ctx, &encevents), ctx.verbose); OERR(OMX_GetHandle(&resize, RESIZENAME, &ctx, &resizeevents), ctx.verbose); ctx.m2 = m2; ctx.m4 = m4; ctx.resize = resize; if (ctx.verbose) printf("Obtained handles. %p decode, %p resize, %p encode\n", m2, resize, m4); OERR(OMX_GetParameter(m2, OMX_IndexParamVideoInit, porttype), ctx.verbose); if (ctx.verbose) printf("Found %d ports, starting at %d (%x) on decoder\n", porttype->nPorts, porttype->nStartPortNumber, porttype->nStartPortNumber); ctx.decportidx = decportidx = porttype->nStartPortNumber; OERR(OMX_GetParameter(resize, OMX_IndexParamImageInit, porttype), ctx.verbose); if (ctx.verbose) printf("Found %d ports, starting at %d (%x) on resizer\n", porttype->nPorts, porttype->nStartPortNumber, porttype->nStartPortNumber); ctx.resizeportidx = resizeportidx = porttype->nStartPortNumber; OERR(OMX_GetParameter(m4, OMX_IndexParamVideoInit, porttype), ctx.verbose); if (ctx.verbose) printf("Found %d ports, starting at %d (%x) on encoder\n", porttype->nPorts, porttype->nStartPortNumber, porttype->nStartPortNumber); ctx.encportidx = encportidx = porttype->nStartPortNumber; free(porttype); OERR(OMX_SendCommand(m2, OMX_CommandPortDisable, decportidx, NULL), ctx.verbose); OERR(OMX_SendCommand(m2, OMX_CommandPortDisable, decportidx+1, NULL), ctx.verbose); OERR(OMX_SendCommand(resize, OMX_CommandPortDisable, resizeportidx, NULL), ctx.verbose); OERR(OMX_SendCommand(resize, OMX_CommandPortDisable, resizeportidx+1, NULL), ctx.verbose); OERR(OMX_SendCommand(m4, OMX_CommandPortDisable, encportidx, NULL), ctx.verbose); OERR(OMX_SendCommand(m4, OMX_CommandPortDisable, encportidx+1, NULL), ctx.verbose); portdef->nPortIndex = decportidx; OERR(OMX_GetParameter(m2, OMX_IndexParamPortDefinition, portdef), ctx.verbose); viddef = &portdef->format.video; viddef->nFrameWidth = ic->streams[vidindex]->codec->width; viddef->nFrameHeight = ic->streams[vidindex]->codec->height; printf("Mapping codec %d to %d\n", ic->streams[vidindex]->codec->codec_id, mapcodec(ic->streams[vidindex]->codec->codec_id)); viddef->eCompressionFormat = mapcodec(ic->streams[vidindex]->codec->codec_id); viddef->bFlagErrorConcealment = 0; // viddef->xFramerate = 25<<16; OERR(OMX_SetParameter(m2, OMX_IndexParamPortDefinition, portdef), ctx.verbose); free(portdef); #if 0 /* It appears these have limited effect: */ dataunit->nPortIndex = decportidx; dataunit->eUnitType = OMX_DataUnitCodedPicture; dataunit->eEncapsulationType = OMX_DataEncapsulationGenericPayload; OERR(OMX_SetParameter(m2, OMX_IndexParamBrcmDataUnit, dataunit), ctx.verbose); if (ish264) { naltype->nPortIndex = decportidx; naltype->eNaluFormat = OMX_NaluFormatStartCodes; OERR(OMX_SetParameter(m2, OMX_IndexParamNalStreamFormatSelect, naltype), ctx.verbose); } #endif MAKEME(level, OMX_VIDEO_PARAM_PROFILELEVELTYPE); level->nPortIndex = encportidx+1; /* Dump what the encoder is capable of: */ if (ctx.verbose) print_codecs(); if (ctx.verbose) { for (oerr = OMX_ErrorNone, i = 0; oerr == OMX_ErrorNone; i++) { pfmt->nIndex = i; oerr = OMX_GetParameter(m4, OMX_IndexParamVideoPortFormat, pfmt); if (oerr == OMX_ErrorNoMore) break; printf("Codecs supported:\n" "\tIndex:\t\t%d\n" "\tCodec:\t\t%d (%x)\n" "\tColour:\t\t%d\n" "\tFramerate:\t%x (%f)\n", pfmt->nIndex, pfmt->eCompressionFormat, pfmt->eCompressionFormat, pfmt->eColorFormat, pfmt->xFramerate, ((float)pfmt->xFramerate/(float)65536)); } for (oerr = OMX_ErrorNone, i = 0; oerr == OMX_ErrorNone; i++) { level->nProfileIndex = i; oerr = OMX_GetParameter(m4, OMX_IndexParamVideoProfileLevelQuerySupported, level); if (oerr == OMX_ErrorNoMore) break; printf("Levels supported:\n" "\tIndex:\t\t%d\n" "\tProfile:\t%d\n" "\tLevel:\t\t%d\n", level->nProfileIndex, level->eProfile, level->eLevel); } } free(pfmt); free(level); /* Dump current port states: */ if (ctx.verbose) { dumpport(m2, decportidx); dumpport(m2, decportidx+1); dumpport(resize, resizeportidx); dumpport(resize, resizeportidx+1); dumpport(m4, encportidx); dumpport(m4, encportidx+1); } OERR(OMX_SendCommand(m2, OMX_CommandStateSet, OMX_StateIdle, NULL), ctx.verbose); decbufs = allocbufs(m2, decportidx, 1); /* Start the initial loop. Process until we have a state change on port 131 */ ctx.decstate = DECINIT; ctx.encstate = ENCPREINIT; OERR(OMX_SendCommand(m2, OMX_CommandStateSet, OMX_StateExecuting, NULL), ctx.verbose); rp = calloc(sizeof(AVPacket), 1); filtertest = ish264; for (offset = i = j = 0; ctx.decstate != DECFAILED; i++, j++) { int rc; int k; int size, nsize; OMX_BUFFERHEADERTYPE *spare; if (offset == 0 && ctx.decstate != DECFLUSH) { rc = av_read_frame(ic, rp); if (rc != 0) { if (ic->pb->eof_reached) ctx.decstate = DECFLUSH; break; } if (rp->stream_index != vidindex) { i--; av_free_packet(rp); continue; } size = rp->size; ctx.fps++; ctx.framecount++; if (ish264 && filtertest) { filtertest = 0; ctx.bsfc = dofiltertest(rp); } if (ctx.bsfc) { p = filter(&ctx, rp); } else { p = rp; } } switch (ctx.decstate) { case DECTUNNELSETUP: start = time(NULL); //printf("NOW to CONFIGURE !!!!!!!!!!!!!!\n\n"); configure(&ctx); ctx.decstate = DECRUNNING; break; case DECFLUSH: size = 0; /* Add the flush code here */ printf("IN FLUSH NOW\n\n"); break; case DECINIT: if (i < 120) /* Bail; decoder doesn't like it */ break; ctx.decstate = DECFAILED; /* Drop through */ case DECFAILED: fprintf(stderr, "Failed to set the parameters after " "%d video frames. Giving up.\n", i); dumpport(m2, decportidx); dumpport(m2, decportidx+1); dumpport(resize, resizeportidx); dumpport(resize, resizeportidx+1); dumpport(m4, encportidx); dumpport(m4, encportidx+1); exit(1); break; default: break; /* Shuts the compiler up */ } for (spare = NULL; !spare; usleep(10)) { pthread_mutex_lock(&ctx.lock); spare = ctx.bufhead; ctx.bufhead = NULL; ctx.flags &= ~FLAGS_DECEMPTIEDBUF; pthread_mutex_unlock(&ctx.lock); while (spare) { write(fd, &spare->pBuffer[spare->nOffset], spare->nFilledLen); spare->nFilledLen = 0; spare->nOffset = 0; OERRq(OMX_FillThisBuffer(m4, spare)); spare = spare->pAppPrivate; } spare = decbufs; for (k = 0; spare && spare->nFilledLen != 0; k++) spare = spare->pAppPrivate; } if (size > spare->nAllocLen) { nsize = spare->nAllocLen; } else { nsize = size; } if (ctx.decstate != DECFLUSH) { memcpy(spare->pBuffer, &(p->data[offset]), nsize); spare->nFlags = i == 0 ? OMX_BUFFERFLAG_STARTTIME : 0; spare->nFlags |= size == nsize ? OMX_BUFFERFLAG_ENDOFFRAME : 0; } else { spare->nFlags = OMX_BUFFERFLAG_STARTTIME | OMX_BUFFERFLAG_EOS; } spare->nFilledLen = nsize; spare->nOffset = 0; OERRq(OMX_EmptyThisBuffer(m2, spare)); size -= nsize; if (size) { offset += nsize; } else { offset = 0; av_free_packet(p); } } close(fd); end = time(NULL); printf("Processed %d frames in %d seconds; %df/s\n", ctx.framecount, end-start, (ctx.framecount/(end-start))); // flush the encoder // OERR(OMX_SendCommand(m4, OMX_CommandFlush, encportidx, NULL), ctx.verbose); // OERR(OMX_SendCommand(m4, OMX_CommandFlush, encportidx+ctx.verbose, NULL), ctx.verbose); // tear down the tunnels OERR(OMX_SendCommand(m2, OMX_CommandStateSet, OMX_StateIdle, NULL), ctx.verbose); OERR(OMX_SendCommand(resize, OMX_CommandStateSet, OMX_StateIdle, NULL), ctx.verbose); OERR(OMX_SendCommand(m4, OMX_CommandStateSet, OMX_StateIdle, NULL), ctx.verbose); OERR(OMX_SendCommand(m2, OMX_CommandStateSet, OMX_StateLoaded, NULL), ctx.verbose); OERR(OMX_SendCommand(resize, OMX_CommandStateSet, OMX_StateLoaded, NULL), ctx.verbose); OERR(OMX_SendCommand(m4, OMX_CommandStateSet, OMX_StateLoaded, NULL), ctx.verbose); // free buffers vcos_free(decbufs); vcos_free(ctx.encbufs); // Apparantly the teardwon function is not implemented. Use setup function instead //OERR(OMX_TeardownTunnel(m2, decportidx+ctx.verbose, resize, resizeportidx), ctx.verbose); //OERR(OMX_TeardownTunnel(resize, resizeportidx+ctx.verbose, m4, encportidx), ctx.verbose); OERR(OMX_SendCommand(m2, OMX_CommandPortDisable, decportidx, NULL), ctx.verbose); OERR(OMX_SendCommand(m2, OMX_CommandPortDisable, decportidx+ctx.verbose, NULL), ctx.verbose); OERR(OMX_SendCommand(resize, OMX_CommandPortDisable, resizeportidx, NULL), ctx.verbose); OERR(OMX_SendCommand(resize, OMX_CommandPortDisable, resizeportidx+ctx.verbose, NULL), ctx.verbose); OERR(OMX_SendCommand(m4, OMX_CommandPortDisable, encportidx, NULL), ctx.verbose); OERR(OMX_SendCommand(m4, OMX_CommandPortDisable, encportidx+ctx.verbose, NULL), ctx.verbose); // ilclient_disable_port_buffers(m2, decportidx, NULL, NULL, NULL); // ilclient_disable_port_buffers(m4, encportidx, NULL, NULL, NULL); OERR(OMX_SetupTunnel(m2, decportidx+ctx.verbose, NULL, 0), ctx.verbose); OERR(OMX_SetupTunnel(resize, resizeportidx, NULL, 0), ctx.verbose); OERR(OMX_SetupTunnel(resize, resizeportidx+ctx.verbose, NULL, 0), ctx.verbose); OERR(OMX_SetupTunnel(m4, encportidx, NULL, 0), ctx.verbose); OERR(OMX_FreeHandle(m2), ctx.verbose); OERR(OMX_FreeHandle(resize), ctx.verbose); OERR(OMX_FreeHandle(m4), ctx.verbose); // free(porttype); // free(portdef); // free(pfmt); // free(level); return 0; }
int opt_default(const char *opt, const char *arg){ int type; int ret= 0; const AVOption *o= NULL; int opt_types[]={AV_OPT_FLAG_VIDEO_PARAM, AV_OPT_FLAG_AUDIO_PARAM, 0, AV_OPT_FLAG_SUBTITLE_PARAM, 0}; AVCodec *p = NULL; AVOutputFormat *oformat = NULL; AVInputFormat *iformat = NULL; while ((p = av_codec_next(p))) { AVClass *c = p->priv_class; if (c && av_find_opt(&c, opt, NULL, 0, 0)) break; } if (p) goto out; while ((oformat = av_oformat_next(oformat))) { const AVClass *c = oformat->priv_class; if (c && av_find_opt(&c, opt, NULL, 0, 0)) break; } if (oformat) goto out; while ((iformat = av_iformat_next(iformat))) { const AVClass *c = iformat->priv_class; if (c && av_find_opt(&c, opt, NULL, 0, 0)) break; } if (iformat) goto out; for(type=0; *avcodec_opts && type<AVMEDIA_TYPE_NB && ret>= 0; type++){ const AVOption *o2 = av_find_opt(avcodec_opts[0], opt, NULL, opt_types[type], opt_types[type]); if(o2) ret = av_set_string3(avcodec_opts[type], opt, arg, 1, &o); } if(!o && avformat_opts) ret = av_set_string3(avformat_opts, opt, arg, 1, &o); if(!o && sws_opts) ret = av_set_string3(sws_opts, opt, arg, 1, &o); if(!o){ if (opt[0] == 'a' && avcodec_opts[AVMEDIA_TYPE_AUDIO]) ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_AUDIO], opt+1, arg, 1, &o); else if(opt[0] == 'v' && avcodec_opts[AVMEDIA_TYPE_VIDEO]) ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_VIDEO], opt+1, arg, 1, &o); else if(opt[0] == 's' && avcodec_opts[AVMEDIA_TYPE_SUBTITLE]) ret = av_set_string3(avcodec_opts[AVMEDIA_TYPE_SUBTITLE], opt+1, arg, 1, &o); if (ret >= 0) opt += 1; } if (o && ret < 0) { fprintf(stderr, "Invalid value '%s' for option '%s'\n", arg, opt); exit(1); } if (!o) { fprintf(stderr, "Unrecognized option '%s'\n", opt); exit(1); } out: // av_log(NULL, AV_LOG_ERROR, "%s:%s: %f 0x%0X\n", opt, arg, av_get_double(avcodec_opts, opt, NULL), (int)av_get_int(avcodec_opts, opt, NULL)); opt_values= av_realloc(opt_values, sizeof(void*)*(opt_name_count+1)); opt_values[opt_name_count] = av_strdup(arg); opt_names= av_realloc(opt_names, sizeof(void*)*(opt_name_count+1)); opt_names[opt_name_count++] = av_strdup(opt); if ((*avcodec_opts && avcodec_opts[0]->debug) || (avformat_opts && avformat_opts->debug)) av_log_set_level(AV_LOG_DEBUG); return 0; }
void show_formats(void) { AVInputFormat *ifmt=NULL; AVOutputFormat *ofmt=NULL; URLProtocol *up=NULL; AVCodec *p=NULL, *p2; AVBitStreamFilter *bsf=NULL; const char *last_name; printf( "File formats:\n" " D. = Demuxing supported\n" " .E = Muxing supported\n" " --\n"); last_name= "000"; for(;;){ int decode=0; int encode=0; const char *name=NULL; const char *long_name=NULL; while((ofmt= av_oformat_next(ofmt))) { if((name == NULL || strcmp(ofmt->name, name)<0) && strcmp(ofmt->name, last_name)>0){ name= ofmt->name; long_name= ofmt->long_name; encode=1; } } while((ifmt= av_iformat_next(ifmt))) { if((name == NULL || strcmp(ifmt->name, name)<0) && strcmp(ifmt->name, last_name)>0){ name= ifmt->name; long_name= ifmt->long_name; encode=0; } if(name && strcmp(ifmt->name, name)==0) decode=1; } if(name==NULL) break; last_name= name; printf( " %s%s %-15s %s\n", decode ? "D":" ", encode ? "E":" ", name, long_name ? long_name:" "); } printf("\n"); printf( "Codecs:\n" " D..... = Decoding supported\n" " .E.... = Encoding supported\n" " ..V... = Video codec\n" " ..A... = Audio codec\n" " ..S... = Subtitle codec\n" " ...S.. = Supports draw_horiz_band\n" " ....D. = Supports direct rendering method 1\n" " .....T = Supports weird frame truncation\n" " ------\n"); last_name= "000"; for(;;){ int decode=0; int encode=0; int cap=0; const char *type_str; p2=NULL; while((p= av_codec_next(p))) { if((p2==NULL || strcmp(p->name, p2->name)<0) && strcmp(p->name, last_name)>0){ p2= p; decode= encode= cap=0; } if(p2 && strcmp(p->name, p2->name)==0){ if(p->decode) decode=1; if(p->encode) encode=1; cap |= p->capabilities; } } if(p2==NULL) break; last_name= p2->name; switch(p2->type) { case CODEC_TYPE_VIDEO: type_str = "V"; break; case CODEC_TYPE_AUDIO: type_str = "A"; break; case CODEC_TYPE_SUBTITLE: type_str = "S"; break; default: type_str = "?"; break; } printf( " %s%s%s%s%s%s %-15s %s", decode ? "D": (/*p2->decoder ? "d":*/" "), encode ? "E":" ", type_str, cap & CODEC_CAP_DRAW_HORIZ_BAND ? "S":" ", cap & CODEC_CAP_DR1 ? "D":" ", cap & CODEC_CAP_TRUNCATED ? "T":" ", p2->name, p2->long_name ? p2->long_name : ""); /* if(p2->decoder && decode==0) printf(" use %s for decoding", p2->decoder->name);*/ printf("\n"); } printf("\n"); printf("Bitstream filters:\n"); while((bsf = av_bitstream_filter_next(bsf))) printf(" %s", bsf->name); printf("\n"); printf("Supported file protocols:\n"); while((up = av_protocol_next(up))) printf(" %s:", up->name); printf("\n"); printf("Frame size, frame rate abbreviations:\n ntsc pal qntsc qpal sntsc spal film ntsc-film sqcif qcif cif 4cif\n"); printf("\n"); printf( "Note, the names of encoders and decoders do not always match, so there are\n" "several cases where the above table shows encoder only or decoder only entries\n" "even though both encoding and decoding are supported. For example, the h263\n" "decoder corresponds to the h263 and h263p encoders, for file formats it is even\n" "worse.\n"); }