status_t MediaPlayer::initFFmpegEngine() { //FFMPEG INIT code avcodec_register_all(); avfilter_register_all(); av_register_all(); avformat_network_init(); ffmpegEngineInitialized = true; return NO_ERROR; }
/* static */ AVCodec* FFmpegDataDecoder<LIBAV_VER>::FindAVCodec(AVCodecID aCodec) { StaticMutexAutoLock mon(sMonitor); if (!sFFmpegInitDone) { avcodec_register_all(); #ifdef DEBUG av_log_set_level(AV_LOG_DEBUG); #endif sFFmpegInitDone = true; } return avcodec_find_decoder(aCodec); }
AVFile::AVFile() : AVObject(), formatCtx(nullptr), codecCtx(nullptr), swrCtx(nullptr), audioStream(-1), decoding(false), _position(0), _seek_to(-1) { ffmpeg_init_mutex.lock(); if (!ffmpeg_init) { av_register_all(); avcodec_register_all(); ffmpeg_init = true; } ffmpeg_init_mutex.unlock(); }
// @todo - add a rx_shutdown_libav() to deinitialize e.g. the network (see libav/tools/aviocat.c) void rx_init_libav() { if(!rx_did_register_all) { av_log_set_level(AV_LOG_DEBUG); av_register_all(); avcodec_register_all(); avformat_network_init(); avfilter_register_all(); rx_did_register_all = true; } }
int CFfmpegDecode::initFFMPEG() { //m_state = RC_STATE_INIT; avcodec_register_all(); av_register_all(); //avformat_network_init(); if (av_lockmgr_register(lockmgr)) { // m_state = RC_STATE_INIT_ERROR; // return -1; } return 0; }
int H264Decoder::Init(AVCodecContext *pCtx){ av_register_all(); avcodec_register_all(); m_pCodec = avcodec_find_decoder(CODEC_ID_H264); m_pCodecCtx = avcodec_alloc_context3(m_pCodec); (*m_pCodecCtx) = (*pCtx); if (avcodec_open2(m_pCodecCtx, m_pCodec,NULL) < 0) { return 0; } m_pFrame = avcodec_alloc_frame(); m_pImgConvertCtx = NULL; m_bInitialized = TRUE; return 0; }
int main(int argc, char **argv) { const char *output_type; /* register all the codecs */ avcodec_register_all(); audio_encode_example("c:/test.mp2"); return 0; }
MixxxTest::ApplicationScope::ApplicationScope(int argc, char** argv) { DEBUG_ASSERT(!s_pApplication); testing::InitGoogleTest(&argc, argv); #ifdef __FFMPEGFILE__ av_register_all(); avcodec_register_all(); #endif s_pApplication.reset(new MixxxApplication(argc, argv)); SoundSourceProxy::loadPlugins(); }
void SamplePlayer_SW::initCodec() { avcodec_register_all(); av_register_all(); AVDictionary* dict; int stream_index = -1; if (avformat_open_input(&m_ContainerCtx, m_SourceFile.c_str(), NULL, NULL) < 0) { LOG_ERROR("Cound not open file: " << m_SourceFile.c_str()); } avformat_find_stream_info(m_ContainerCtx, NULL); LOG_INFO("Loading audio file " << m_SourceFile); LOG_INFO("#Streams: " << m_ContainerCtx->nb_streams); for (unsigned int i = 0; i < m_ContainerCtx->nb_streams; i++) { if (m_ContainerCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { stream_index = i; break; } } if(stream_index == -1){ LOG_ERROR("Could not find audio"); } m_CodecCtx = m_ContainerCtx->streams[stream_index]->codec; m_Codec = avcodec_find_decoder(m_CodecCtx->codec_id); if (!m_Codec) { LOG_ERROR("Codec not found"); avformat_close_input(&m_ContainerCtx); exit(1); } /* request internal sample format */ LOG_INFO("Codec tag: "<< m_Codec->name << ", Codec ID: " << m_Codec->id); LOG_DEBUG("#Channels: " << m_CodecCtx->channels); /* open it */ if (avcodec_open2(m_CodecCtx, m_Codec, NULL) < 0) { LOG_ERROR("Could not open codec"); exit(1); } }
static void init (GeglProperties *o) { static gint inited = 0; /*< this is actually meant to be static, only to be done once */ Priv *p = (Priv*)o->user_data; if (p == NULL) { p = g_new0 (Priv, 1); o->user_data = (void*) p; } if (!inited) { av_register_all (); avcodec_register_all (); inited = 1; } #ifndef DISABLE_AUDIO p->oxide_audio_instance = gggl_op_sym (op, "oxide_audio_instance"); p->oxide_audio_query = gggl_op_sym (op, "oxide_audio_query()"); p->oxide_audio_get_fragment = gggl_op_sym (op, "oxide_audio_get_fragment()"); if (p->oxide_audio_instance && p->oxide_audio_query) { p->oxide_audio_query (p->oxide_audio_instance, &p->sample_rate, &p->bits, &p->channels, &p->fragment_samples, &p->fragment_size); /* FIXME: for now, the buffer is set to a size double that of a oxide * provided fragment,. should be enough no matter how things are handled, * but it should also be more than needed,. find out exact amount needed later */ if (!p->buffer) { int size = (p->sample_rate / p->fps) * p->channels * (p->bits / 8) * 2; buffer_open (op, size); } if (!p->fragment) p->fragment = gggl_op_calloc (op, 1, p->fragment_size); } #endif }
QStringList AudioEncoder::supportedCodecs() { static QStringList codecs; if (!codecs.isEmpty()) return codecs; avcodec_register_all(); AVCodec* c = NULL; while ((c=av_codec_next(c))) { if (!av_codec_is_encoder(c) || c->type != AVMEDIA_TYPE_AUDIO) continue; codecs.append(QString::fromLatin1(c->name)); } return codecs; }
void xivss_highlevel_initialize() { //Register API lua_createtable(L,0,32); lua_setglobal(L,"IVSSAPI"); highlevel_addfunction("IVSSAPI","LoadModel",xivss_highlevel_loadmodel); highlevel_addfunction("IVSSAPI","ReloadModel",xivss_highlevel_reloadmodel); #ifdef FFMPEG_SUPPORT avcodec_register_all(); //av_log_set_level(AV_LOG_QUIET); #endif }
int CamReadThread::EncodeInit() { c= NULL; int yuv420_bytes = 0; m_pRGBFrame = new AVFrame[1]; //RGB帧数据 m_pYUVFrame = new AVFrame[1]; //YUV帧数据 av_register_all(); avcodec_register_all(); avformat_network_init(); fmtctx = avformat_alloc_context(); codec = avcodec_find_encoder(AV_CODEC_ID_H264); if (!codec) { dbgprint("%s(%d),%d CAM Codec not found!\n",DEBUGARGS,CameraID); return -1; } video_st = avformat_new_stream(fmtctx, codec); c = video_st->codec; avcodec_get_context_defaults3(c, codec); if (!c) { dbgprint("%s(%d),%d CAM Could not allocate video codec context!\n",DEBUGARGS,CameraID); return -1; } c->codec_id = AV_CODEC_ID_H264; c->width = m_cols; c->height = m_rows; c->time_base = (AVRational){1,m_fps}; c->gop_size = 10; c->max_b_frames = 1; c->pix_fmt = AV_PIX_FMT_YUV420P; if (avcodec_open2(c, codec, NULL) < 0) { dbgprint("%s(%d),%d CAM Could not open codec!\n",DEBUGARGS,CameraID); return -1; } m_pYUVFrame = av_frame_alloc(); yuv420_bytes = avpicture_get_size( AV_PIX_FMT_YUV420P, m_cols, m_rows); pYUV_buffer = (uint8_t *)av_malloc(yuv420_bytes*sizeof(uint8_t)); m_pYUVFrame->format = c->pix_fmt; m_pYUVFrame->width = c->width; m_pYUVFrame->height = c->height; avpicture_fill((AVPicture*)m_pYUVFrame, pYUV_buffer, AV_PIX_FMT_YUV420P, m_cols, m_rows); scxt = sws_getContext(m_cols, m_rows,AV_PIX_FMT_RGB24, m_cols, m_rows,AV_PIX_FMT_YUV420P, 0, 0, 0, 0); return 0; }
static void *aac_create(obs_data_t settings, obs_encoder_t encoder) { struct aac_encoder *enc; int bitrate = (int)obs_data_getint(settings, "bitrate"); audio_t audio = obs_encoder_audio(encoder); if (!bitrate) { aac_warn("aac_create", "Invalid bitrate specified"); return NULL; } avcodec_register_all(); enc = bzalloc(sizeof(struct aac_encoder)); enc->encoder = encoder; enc->aac = avcodec_find_encoder(AV_CODEC_ID_AAC); if (!enc->aac) { aac_warn("aac_create", "Couldn't find encoder"); goto fail; } blog(LOG_INFO, "Using ffmpeg \"%s\" aac encoder", enc->aac->name); enc->context = avcodec_alloc_context3(enc->aac); if (!enc->context) { aac_warn("aac_create", "Failed to create codec context"); goto fail; } enc->context->bit_rate = bitrate * 1000; enc->context->channels = (int)audio_output_channels(audio); enc->context->sample_rate = audio_output_samplerate(audio); enc->context->sample_fmt = enc->aac->sample_fmts ? enc->aac->sample_fmts[0] : AV_SAMPLE_FMT_FLTP; init_sizes(enc, audio); /* enable experimental FFmpeg encoder if the only one available */ enc->context->strict_std_compliance = -2; enc->context->flags = CODEC_FLAG_GLOBAL_HEADER; if (initialize_codec(enc)) return enc; fail: aac_destroy(enc); return NULL; }
static int module_init(void) { /* register all codecs, demux and protocols */ avcodec_register_all(); avdevice_register_all(); #if LIBAVFORMAT_VERSION_INT >= ((53<<16) + (13<<8) + 0) avformat_network_init(); #endif av_register_all(); return vidsrc_register(&mod_avf, baresip_vidsrcl(), "avformat", alloc, NULL); }
AVFormatWriter::AVFormatWriter() : FileWriterBase(), m_avfRingBuffer(NULL), m_ringBuffer(NULL), m_ctx(NULL), m_videoStream(NULL), m_avVideoCodec(NULL), m_audioStream(NULL), m_avAudioCodec(NULL), m_picture(NULL), m_audPicture(NULL), m_audioInBuf(NULL), m_audioInPBuf(NULL) { av_register_all(); avcodec_register_all(); memset(&m_fmt, 0, sizeof(m_fmt)); }
void cellAdec_init() { cellAdec.AddFunc(0x7e4a4a49, cellAdecQueryAttr); cellAdec.AddFunc(0xd00a6988, cellAdecOpen); cellAdec.AddFunc(0x8b5551a4, cellAdecOpenEx); cellAdec.AddFunc(0x847d2380, cellAdecClose); cellAdec.AddFunc(0x487b613e, cellAdecStartSeq); cellAdec.AddFunc(0xe2ea549b, cellAdecEndSeq); cellAdec.AddFunc(0x1529e506, cellAdecDecodeAu); cellAdec.AddFunc(0x97ff2af1, cellAdecGetPcm); cellAdec.AddFunc(0xbd75f78b, cellAdecGetPcmItem); av_register_all(); avcodec_register_all(); }
bool mythFFmpegEncoder::Init(){ //if it has been initialized before, we should do cleanup first Cleanup(); /* Initialize libavcodec, and register all codecs and formats. */ av_register_all(); avcodec_register_all(); video_codec = avcodec_find_encoder(AV_CODEC_ID_H264); if (!video_codec){ Cleanup(); return false; } c = avcodec_alloc_context3(video_codec); AVDictionary *opts = NULL; av_dict_set(&opts, "b", "2.5M", 0); c->width = mwidth; c->height = mheight; c->bit_rate = 100000; c->gop_size = 25; AVRational ration = { 1, 25 }; c->time_base = ration; c->pix_fmt = PIX_FMT_YUV420P; c->max_b_frames = 1; av_opt_set(c->priv_data, "preset", "ultrafast", 0); //ultrafast,superfast, veryfast, faster, fast, medium, slow, slower, veryslow,placebo. //av_opt_set(c->priv_data, "profile", "high", 0); //baseline main high //av_opt_set(c->priv_data, "level", "4.0", 0); av_opt_set(c->priv_data, "tune", "zerolatency", 0); // tune /* open the codec */ int ret = avcodec_open2(c, video_codec, &opts); if (ret < 0) { Cleanup(); return false; } /* allocate and init a re-usable frame */ frame = av_frame_alloc(); if (!frame) { Cleanup(); return false; } frame->format = c->pix_fmt; frame->width = c->width; frame->height = c->height; av_init_packet(&avpkt); return true; }
////////////////////////////////////////////////////////////////////// // Init - initialized or reiniyialized encoder SDK with given input // and output settings // // NOTE: these should all be replaced with calls to the API functions // lame_set_*(). Applications should not directly access the 'pgf' // data structure. // ////////////////////////////////////////////////////////////////////// HRESULT CEncoder2::Init() { CAutoLock l(this); /* Initialize avcodec lib */ avcodec_init(); /* Our "all" is only the mpeg 1 layer 2 audio anyway... */ avcodec_register_all(); codec = avcodec_find_encoder(CODEC_ID_MP2); if (!codec) { // fprintf(logfile,"Couldn't find codec\n"); // fclose(logfile); return E_OUTOFMEMORY; } // fprintf(logfile,"Allocating context\n"); c= avcodec_alloc_context(); /* put sample parameters */ if (m_mabsi.dwBitrate > 384) { m_mabsi.dwBitrate = 128; } c->bit_rate = m_mabsi.dwBitrate * 1000; c->sample_rate = m_wfex.nSamplesPerSec; c->channels = m_wfex.nChannels; DbgLog((LOG_TRACE, 1, TEXT("Using bitrate=%d sampling=%d"), (LONG)c->bit_rate, (LONG)c->sample_rate)); /* open it */ if (avcodec_open(c, codec) < 0) { // fprintf(logfile,"Could not open codec\n"); // fclose(logfile); return E_OUTOFMEMORY; } /* the codec gives us the frame size, in samples */ frame_size = c->frame_size; DbgLog((LOG_TRACE, 1, TEXT("FrameSize=%d\r\n"), (LONG)frame_size)); samples = (UINT8 *) malloc(frame_size * 2 * c->channels); outbuf_size = 10000; outbuf = (UINT8 *) malloc(outbuf_size); filled=0; return S_OK; }
/** * To debug ffmpeg", type this command on the console before starting playback: * setprop debug.nam.ffmpeg 1 * To disable the debug, type: * setprop debug.nam.ffmpge 0 */ status_t initFFmpeg() { status_t ret = OK; bool debug_enabled = false; char value[PROPERTY_VALUE_MAX]; pthread_mutex_lock(&init_mutex); if (property_get("debug.nam.ffmpeg", value, NULL) && (!strcmp(value, "1") || !av_strcasecmp(value, "true"))) { LOGI("set ffmpeg debug level to AV_LOG_DEBUG"); debug_enabled = true; } if (debug_enabled) av_log_set_level(AV_LOG_DEBUG); else av_log_set_level(AV_LOG_INFO); if(ref_count == 0) { nam_av_log_set_flags(AV_LOG_SKIP_REPEATED); av_log_set_callback(nam_av_log_callback); /* register all codecs, demux and protocols */ avcodec_register_all(); #if CONFIG_AVDEVICE avdevice_register_all(); #endif av_register_all(); avformat_network_init(); /* register android source */ ffmpeg_register_android_source(); init_opts(); if (av_lockmgr_register(lockmgr)) { LOGE("could not initialize lock manager!"); ret = NO_INIT; } } // update counter ref_count++; pthread_mutex_unlock(&init_mutex); return ret; }
ScreenGrabber::ScreenGrabber(int inputWidth, int inputHeight) { avcodec_register_all(); avdevice_register_all(); #if CONFIG_AVFILTER avfilter_register_all(); #endif av_register_all(); webcamCapture.resize(WebcamWidth * WebcamHeight * 4); auto fileName = ":0.0+65,126"; if (inputHeight == 1080) fileName = ":0.0+0,74"; auto format = "x11grab"; auto inputFormat = av_find_input_format(format); if (!inputFormat) { std::cerr << "Unknown input format: '" << format << "'" << std::endl; exit(1); } AVDictionary *format_opts = NULL; av_dict_set(&format_opts, "framerate", std::to_string(OutputFrameRate).c_str(), 0); std::string resolution = std::to_string(inputWidth) + "x" + std::to_string(inputHeight); av_dict_set(&format_opts, "video_size", resolution.c_str(), 0); int len = avformat_open_input(&formatContext, fileName, inputFormat, &format_opts); if (len != 0) { std::cerr << "Could not open input " << fileName << std::endl; throw - 0x10; } if (avformat_find_stream_info(formatContext, NULL) < 0) { std::cerr << "Could not read stream information from " << fileName << std::endl; throw - 0x11; } av_dump_format(formatContext, 0, fileName, 0); av_dict_free(&format_opts); width = formatContext->streams[0]->codecpar->width; height = formatContext->streams[0]->codecpar->height; std::cout << "YUV4MPEG2 W" << width << " H" << height << " F" << OutputFrameRate << ":1 Ip A0:0 C420jpeg XYSCSS=420JPEG\n"; yuv.resize(width * height * 3 / 2); memset(&packet, 0, sizeof(packet)); webcamThread = std::make_unique<std::thread>(webcamGrabber, std::ref(webcamCapture), std::ref(done)); }
// Initialise the library. // Pass in the program arguments through argc, argv int rtk_init(int *argc, char ***argv) { // Initialise the gtk lib gtk_init(argc, argv); // Allow rgb image handling gdk_rgb_init(); #ifdef ENABLE_AVCODEC // Allow movie capture avcodec_init(); avcodec_register_all(); #endif return 0; }
OutputProcessor::OutputProcessor() { audioCoder = 0; videoCoder = 0; audioPackager = 0; videoPackager = 0; timestamp_ = 0; encodedBuffer_ = NULL; packagedBuffer_ = NULL; rtpBuffer_ = NULL; avcodec_register_all(); av_register_all(); }
void FeMedia::init_av() { static bool do_init=true; if ( do_init ) { avcodec_register_all(); av_register_all(); #ifndef FE_DEBUG av_log_set_level(AV_LOG_FATAL); #endif do_init=false; } }
int groove_init(void) { av_lockmgr_register(&my_lockmgr_cb); srand(time(NULL)); // register all codecs, demux and protocols avcodec_register_all(); av_register_all(); avformat_network_init(); avfilter_register_all(); should_deinit_network = 1; av_log_set_level(AV_LOG_QUIET); return 0; }
SimpleAT3 *AT3Create() { #ifdef USE_FFMPEG avcodec_register_all(); av_register_all(); InitFFmpeg(); SimpleAT3 *at3 = new SimpleAT3(); if (!at3->IsOK()) { delete at3; return 0; } return at3; #else return 0; #endif // USE_FFMPEG }
AVFormatWriter::AVFormatWriter() : FileWriterBase(), m_avfRingBuffer(NULL), m_ringBuffer(NULL), m_ctx(NULL), m_videoStream(NULL), m_avVideoCodec(NULL), m_audioStream(NULL), m_avAudioCodec(NULL), m_picture(NULL), m_tmpPicture(NULL), m_videoOutBuf(NULL), m_audioOutBuf(NULL), m_audioOutBufSize(0), m_audioFltBuf(NULL) { av_register_all(); avcodec_register_all(); }
void av_register_all(void) { static int inited = 0; if (inited != 0) return ; inited = 1; //初始化CPU 相关加速指令 avcodec_init(); //把各种解码器添加到头部为first_avcodec的链表里 avcodec_register_all(); //把各种文件格式添加到头部为first_iformat的链表里 avidec_init(); //把各种文件读写协议添加到头部为first_protocol的链表里 register_protocol(&file_protocol); }
ExternalOutput::ExternalOutput(std::shared_ptr<Worker> worker, const std::string& output_url, const std::vector<RtpMap> rtp_mappings) : worker_{worker}, pipeline_{Pipeline::create()}, audio_queue_{5.0, 10.0}, video_queue_{5.0, 10.0}, inited_{false}, video_stream_{nullptr}, audio_stream_{nullptr}, video_source_ssrc_{0}, first_video_timestamp_{-1}, first_audio_timestamp_{-1}, first_data_received_{}, video_offset_ms_{-1}, audio_offset_ms_{-1}, need_to_send_fir_{true}, rtp_mappings_{rtp_mappings}, video_codec_{AV_CODEC_ID_NONE}, audio_codec_{AV_CODEC_ID_NONE}, pipeline_initialized_{false} { ELOG_DEBUG("Creating output to %s", output_url.c_str()); fb_sink_ = nullptr; sink_fb_source_ = this; // TODO(pedro): these should really only be called once per application run av_register_all(); avcodec_register_all(); fec_receiver_.reset(webrtc::UlpfecReceiver::Create(this)); stats_ = std::make_shared<Stats>(); quality_manager_ = std::make_shared<QualityManager>(); for (auto rtp_map : rtp_mappings_) { switch (rtp_map.media_type) { case AUDIO_TYPE: audio_maps_[rtp_map.payload_type] = rtp_map; break; case VIDEO_TYPE: video_maps_[rtp_map.payload_type] = rtp_map; break; case OTHER: break; } } context_ = avformat_alloc_context(); if (context_ == nullptr) { ELOG_ERROR("Error allocating memory for IO context"); } else { output_url.copy(context_->filename, sizeof(context_->filename), 0); context_->oformat = av_guess_format(nullptr, context_->filename, nullptr); if (!context_->oformat) { ELOG_ERROR("Error guessing format %s", context_->filename); } } }
/* Allocate resources and write header data to the output file. */ void ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height) { AVCodec *codec; int ret; avcodec_register_all(); codec = avcodec_find_encoder(codec_id); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate video codec context\n"); exit(1); } c->bit_rate = 400000; c->width = width; c->height = height; c->time_base.num = 1; c->time_base.den = fps; c->gop_size = 10; c->max_b_frames = 1; c->pix_fmt = AV_PIX_FMT_YUV420P; if (codec_id == AV_CODEC_ID_H264) av_opt_set(c->priv_data, "preset", "slow", 0); if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } file = fopen(filename, "wb"); if (!file) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate video frame\n"); exit(1); } frame->format = c->pix_fmt; frame->width = c->width; frame->height = c->height; ret = av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, frame->format, 32); if (ret < 0) { fprintf(stderr, "Could not allocate raw picture buffer\n"); exit(1); } }