int VideoDecoderDXVAPrivate::aligned(int x) { // from lavfilters int align = 16; // MPEG-2 needs higher alignment on Intel cards, and it doesn't seem to harm anything to do it for all cards. if (codec_ctx->codec_id == QTAV_CODEC_ID(MPEG2VIDEO)) align <<= 1; else if (codec_ctx->codec_id == QTAV_CODEC_ID(HEVC)) align = 128; return FFALIGN(x, align); }
bool VideoDecoderD3DPrivate::setup(AVCodecContext *avctx) { const int w = codedWidth(avctx); const int h = codedHeight(avctx); if (avctx->hwaccel_context && surface_width == aligned(w) && surface_height == aligned(h)) return true; width = avctx->width; // not necessary. set in decode() height = avctx->height; codec_ctx->hwaccel_context = NULL; releaseUSWC(); destroyDecoder(); avctx->hwaccel_context = NULL; /* Allocates all surfaces needed for the decoder */ if (surface_auto) { switch (codec_ctx->codec_id) { case QTAV_CODEC_ID(HEVC): case QTAV_CODEC_ID(H264): surface_count = 16 + 4; break; case QTAV_CODEC_ID(MPEG1VIDEO): case QTAV_CODEC_ID(MPEG2VIDEO): surface_count = 2 + 4; default: surface_count = 2 + 4; break; } if (avctx->active_thread_type & FF_THREAD_FRAME) surface_count += avctx->thread_count; } qDebug(">>>>>>>>>>>>>>>>>>>>>surfaces: %d, active_thread_type: %d, threads: %d, refs: %d", surface_count, avctx->active_thread_type, avctx->thread_count, avctx->refs); if (surface_count == 0) { qWarning("internal error: wrong surface count. %u auto=%d", surface_count, surface_auto); surface_count = 16 + 4; } qDeleteAll(surfaces); surfaces.clear(); hw_surfaces.clear(); surfaces.resize(surface_count); if (!createDecoder(codec_ctx->codec_id, w, h, surfaces)) return false; hw_surfaces.resize(surface_count); for (int i = 0; i < surfaces.size(); ++i) { hw_surfaces[i] = surfaces[i]->getSurface(); } surface_order = 0; surface_width = aligned(w); surface_height = aligned(h); setupAVVAContext(avctx); //can not use codec_ctx for threaded mode! initUSWC(surface_width); return true; }
bool VideoDecoderFFmpegHWPrivate::prepare() { //// From vlc begin codec_ctx->thread_safe_callbacks = true; //? #ifdef _MSC_VER #pragma warning(disable:4065) //vc: switch has default but no case #endif //_MSC_VER switch (codec_ctx->codec_id) { # if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55, 1, 0)) /// tested libav-9.x + va-api. If remove this code: Bug detected, please report the issue. Context scratch buffers could not be allocated due to unknown size case QTAV_CODEC_ID(H264): case QTAV_CODEC_ID(VC1): case QTAV_CODEC_ID(WMV3): codec_ctx->thread_type &= ~FF_THREAD_FRAME; # endif default: break; } //// From vlc end //TODO: neccesary? #if 0 if (!setup(codec_ctx)) { qWarning("Setup va failed."); return false; } #endif codec_ctx->opaque = this; //is it ok? pixfmt = codec_ctx->pix_fmt; get_format = codec_ctx->get_format; #if QTAV_HAVE(AVBUFREF) get_buffer2 = codec_ctx->get_buffer2; #else get_buffer = codec_ctx->get_buffer; reget_buffer = codec_ctx->reget_buffer; release_buffer = codec_ctx->release_buffer; #endif //QTAV_HAVE(AVBUFREF) codec_ctx->get_format = ffmpeg_get_va_format; #if QTAV_HAVE(AVBUFREF) codec_ctx->get_buffer2 = ffmpeg_get_va_buffer2; #else // TODO: FF_API_GET_BUFFER codec_ctx->get_buffer = ffmpeg_get_va_buffer;//ffmpeg_GetFrameBuf; codec_ctx->reget_buffer = avcodec_default_reget_buffer; codec_ctx->release_buffer = ffmpeg_release_va_buffer;//ffmpeg_ReleaseFrameBuf; #endif //QTAV_HAVE(AVBUFREF) return true; }
const codec_profile_t* findProfileEntry(AVCodecID codec, int profile) { if (codec == QTAV_CODEC_ID(NONE)) return 0; for (int i = 0; va_profiles[i].codec != QTAV_CODEC_ID(NONE); ++i) { const codec_profile_t* p = &va_profiles[i]; if (codec != p->codec || profile != p->profile) continue; // return the first profile entry if given profile is unknow if (profile == FF_PROFILE_UNKNOWN || p->profile == FF_PROFILE_UNKNOWN // force the profile || profile == p->profile) return p; } return 0; }
const codec_profile_t* findProfileEntry(AVCodecID codec, int profile, const codec_profile_t* p0 = NULL) { if (codec == QTAV_CODEC_ID(NONE)) return 0; if (p0 && p0->codec == QTAV_CODEC_ID(NONE)) //search from the end return 0; const codec_profile_t* pe0 = p0 ? ++p0 : va_profiles; for (const codec_profile_t* p = pe0 ; p->codec != QTAV_CODEC_ID(NONE); ++p) { if (codec != p->codec || profile != p->profile) continue; // return the first profile entry if given profile is unknow if (profile == FF_PROFILE_UNKNOWN || p->profile == FF_PROFILE_UNKNOWN // force the profile || profile == p->profile) return p; } return 0; }
bool VideoDecoderDXVAPrivate::open() { if (!prepare()) return false; if (codec_ctx->codec_id == QTAV_CODEC_ID(HEVC)) { // runtime hevc check if (isHEVCSupported()) { qWarning("HEVC DXVA2 is supported by current FFmpeg runtime."); } else { qWarning("HEVC DXVA2 is not supported by current FFmpeg runtime."); return false; } } if (!D3dCreateDevice()) { qWarning("Failed to create Direct3D device"); goto error; } qDebug("D3dCreateDevice succeed"); if (!D3dCreateDeviceManager()) { qWarning("D3dCreateDeviceManager failed"); goto error; } if (!DxCreateVideoService()) { qWarning("DxCreateVideoService failed"); goto error; } if (!DxFindVideoServiceConversion(&input, &render)) { qWarning("DxFindVideoServiceConversion failed"); goto error; } IDirect3DDevice9Ex *devEx; d3ddev->QueryInterface(IID_IDirect3DDevice9Ex, (void**)&devEx); qDebug("using D3D9Ex: %d", !!devEx); // runtime check gles for dynamic gl #if QTAV_HAVE(DXVA_EGL) if (OpenGLHelper::isOpenGLES()) { // d3d9ex is required to share d3d resource. It's available in vista and later. d3d9 can not CreateTexture with shared handle if (devEx) interop_res = dxva::InteropResourcePtr(new dxva::EGLInteropResource(d3ddev)); else qDebug("D3D9Ex is not available. Disable 0-copy."); } #endif SafeRelease(&devEx); #if QTAV_HAVE(DXVA_GL) if (!OpenGLHelper::isOpenGLES()) interop_res = dxva::InteropResourcePtr(new dxva::GLInteropResource(d3ddev)); #endif return true; error: close(); return false; }
bool AVMuxer::Private::prepareStreams() { audio_streams.clear(); video_streams.clear(); subtitle_streams.clear(); AVOutputFormat* fmt = format_ctx->oformat; if (venc && !venc->codecName().isEmpty()) { AVCodec *codec = avcodec_find_encoder_by_name(venc->codecName().toUtf8().constData()); addStream(format_ctx, codec->id); } else if (fmt->video_codec != QTAV_CODEC_ID(NONE)) { addStream(format_ctx, fmt->video_codec); } return true; }
bool VideoEncoderFFmpegPrivate::open() { if (codec_name.isEmpty()) { // copy ctx from muxer AVCodec *codec = avcodec_find_decoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); qDebug("tbc: %f", av_q2d(avctx->time_base)); avctx->width = width; // coded_width works, why? avctx->height = height; avctx->pix_fmt = QTAV_PIX_FMT_C(YUV420P); avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); //avctx->max_b_frames = 3;//h264 qDebug("2 tbc: %f=%d/%d", av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; // Set Option AVDictionary *param = 0; #if 0 //H.264 if(avctx->codec_id == QTAV_CODEC_ID(H264)) { av_dict_set(¶m, "preset", "slow", 0); av_dict_set(¶m, "tune", "zerolatency", 0); //av_dict_set(¶m, "profile", "main", 0); } //H.265 if(avctx->codec_id == AV_CODEC_ID_H265){ av_dict_set(¶m, "preset", "ultrafast", 0); av_dict_set(¶m, "tune", "zero-latency", 0); } #endif applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }
bool VideoDecoderDXVAPrivate::open() { if (codec_ctx->codec_id == QTAV_CODEC_ID(HEVC)) { // runtime hevc check if (isHEVCSupported()) { qWarning("HEVC DXVA2 is supported by current FFmpeg runtime."); } else { qWarning("HEVC DXVA2 is not supported by current FFmpeg runtime."); return false; } } if (!D3dCreateDevice()) { qWarning("Failed to create Direct3D device"); goto error; } qDebug("D3dCreateDevice succeed"); if (!D3dCreateDeviceManager()) { qWarning("D3dCreateDeviceManager failed"); goto error; } if (!DxCreateVideoService()) { qWarning("DxCreateVideoService failed"); goto error; } if (!DxFindVideoServiceConversion(&input, &render)) { qWarning("DxFindVideoServiceConversion failed"); goto error; } IDirect3DDevice9Ex *devEx; d3ddev->QueryInterface(IID_IDirect3DDevice9Ex, (void**)&devEx); qDebug("using D3D9Ex: %d", !!devEx); SafeRelease(&devEx); /* TODO print the hardware name/vendor for debugging purposes */ return true; error: close(); return false; }
bool VideoDecoderD3DPrivate::open() { if (!prepare()) return false; if (codec_ctx->codec_id == QTAV_CODEC_ID(HEVC)) { // runtime hevc check if (!isHEVCSupported()) { qWarning("HEVC DXVA2/D3D11VA is not supported by current FFmpeg runtime."); return false; } } if (!createDevice()) return false; format_fcc = 0; QVector<GUID> codecs = getSupportedCodecs(); const d3d_format_t *fmt = getFormat(codec_ctx, codecs, &codec_guid); if (!fmt) return false; format_fcc = fmt->fourcc; if (!setupSurfaceInterop()) return false; return true; }
bool VideoDecoderDXVAPrivate::DxCreateVideoDecoder(int codec_id, int w, int h) { if (!vs || !d3ddev) { qWarning("d3d is not ready. IDirectXVideoService: %p, IDirect3DDevice9: %p", vs, d3ddev); return false; } qDebug("DxCreateVideoDecoder id %d %dx%d, surfaces: %u", codec_id, w, h, surface_count); /* Allocates all surfaces needed for the decoder */ surface_width = aligned(w); surface_height = aligned(h); if (surface_auto) { switch (codec_id) { case QTAV_CODEC_ID(HEVC): case QTAV_CODEC_ID(H264): surface_count = 16 + 4; break; case QTAV_CODEC_ID(MPEG1VIDEO): case QTAV_CODEC_ID(MPEG2VIDEO): surface_count = 2 + 4; default: surface_count = 2 + 4; break; } if (codec_ctx->active_thread_type & FF_THREAD_FRAME) surface_count += codec_ctx->thread_count; } qDebug(">>>>>>>>>>>>>>>>>>>>>surfaces: %d, active_thread_type: %d, threads: %d, refs: %d", surface_count, codec_ctx->active_thread_type, codec_ctx->thread_count, codec_ctx->refs); if (surface_count == 0) { qWarning("internal error: wrong surface count. %u auto=%d", surface_count, surface_auto); surface_count = 16 + 4; } IDirect3DSurface9* surface_list[VA_DXVA2_MAX_SURFACE_COUNT]; qDebug("%s @%d vs=%p surface_count=%d surface_width=%d surface_height=%d" , __FUNCTION__, __LINE__, vs, surface_count, surface_width, surface_height); DX_ENSURE_OK(vs->CreateSurface(surface_width, surface_height, surface_count - 1, render, D3DPOOL_DEFAULT, 0, DXVA2_VideoDecoderRenderTarget, surface_list, NULL) , false); memset(surfaces, 0, sizeof(surfaces)); for (unsigned i = 0; i < surface_count; i++) { va_surface_t *surface = &this->surfaces[i]; surface->d3d = surface_list[i]; surface->refcount = 0; surface->order = 0; } qDebug("IDirectXVideoAccelerationService_CreateSurface succeed with %d surfaces (%dx%d)", surface_count, w, h); /* */ DXVA2_VideoDesc dsc; ZeroMemory(&dsc, sizeof(dsc)); dsc.SampleWidth = w; //coded_width dsc.SampleHeight = h; //coded_height dsc.Format = render; dsc.InputSampleFreq.Numerator = 0; dsc.InputSampleFreq.Denominator = 0; dsc.OutputFrameFreq = dsc.InputSampleFreq; dsc.UABProtectionLevel = FALSE; dsc.Reserved = 0; // see xbmc /* FIXME I am unsure we can let unknown everywhere */ DXVA2_ExtendedFormat *ext = &dsc.SampleFormat; ext->SampleFormat = 0;//DXVA2_SampleProgressiveFrame;//xbmc. DXVA2_SampleUnknown; ext->VideoChromaSubsampling = 0;//DXVA2_VideoChromaSubsampling_Unknown; ext->NominalRange = 0;//DXVA2_NominalRange_Unknown; ext->VideoTransferMatrix = 0;//DXVA2_VideoTransferMatrix_Unknown; ext->VideoLighting = 0;//DXVA2_VideoLighting_dim;//xbmc. DXVA2_VideoLighting_Unknown; ext->VideoPrimaries = 0;//DXVA2_VideoPrimaries_Unknown; ext->VideoTransferFunction = 0;//DXVA2_VideoTransFunc_Unknown; /* List all configurations available for the decoder */ UINT cfg_count = 0; DXVA2_ConfigPictureDecode *cfg_list = NULL; DX_ENSURE_OK(vs->GetDecoderConfigurations(input, &dsc, NULL, &cfg_count, &cfg_list) , false); qDebug("we got %d decoder configurations", cfg_count); /* Select the best decoder configuration */ int cfg_score = 0; for (unsigned i = 0; i < cfg_count; i++) { const DXVA2_ConfigPictureDecode *cfg = &cfg_list[i]; qDebug("configuration[%d] ConfigBitstreamRaw %d", i, cfg->ConfigBitstreamRaw); int score; if (cfg->ConfigBitstreamRaw == 1) score = 1; else if (codec_id == QTAV_CODEC_ID(H264) && cfg->ConfigBitstreamRaw == 2) score = 2; else continue; if (IsEqualGUID(cfg->guidConfigBitstreamEncryption, DXVA_NoEncrypt)) score += 16; if (cfg_score < score) { this->cfg = *cfg; cfg_score = score; } } CoTaskMemFree(cfg_list); if (cfg_score <= 0) { qWarning("Failed to find a supported decoder configuration"); return false; } /* Create the decoder */ DX_ENSURE_OK(vs->CreateVideoDecoder(input, &dsc, &cfg, surface_list, surface_count, &decoder), false); qDebug("IDirectXVideoDecoderService_CreateVideoDecoder succeed. decoder=%p", decoder); return true; }
bool AVDecoder::open() { DPTR_D(AVDecoder); if (!d.codec_ctx) { qWarning("FFmpeg codec context not ready"); return false; } AVCodec *codec = 0; if (!d.codec_name.isEmpty()) { codec = avcodec_find_decoder_by_name(d.codec_name.toUtf8().constData()); } else { codec = avcodec_find_decoder(d.codec_ctx->codec_id); } if (!codec) { if (d.codec_name.isEmpty()) { qWarning("No codec could be found with id %d", d.codec_ctx->codec_id); } else { qWarning("No codec could be found with name %s", d.codec_name.toUtf8().constData()); } return false; } //setup video codec context if (d.low_resolution > codec->max_lowres) { qWarning("Use the max value for lowres supported by the decoder (%d)", codec->max_lowres); d.low_resolution = codec->max_lowres; } d.codec_ctx->lowres = d.low_resolution; if (d.codec_ctx->lowres) { d.codec_ctx->flags |= CODEC_FLAG_EMU_EDGE; } if (d.fast) { d.codec_ctx->flags2 |= CODEC_FLAG2_FAST; } else { //d.codec_ctx->flags2 &= ~CODEC_FLAG2_FAST; //ffplay has no this } if (codec->capabilities & CODEC_CAP_DR1) { d.codec_ctx->flags |= CODEC_FLAG_EMU_EDGE; } //set thread //d.codec_ctx->strict_std_compliance = FF_COMPLIANCE_STRICT; //d.codec_ctx->slice_flags |= SLICE_FLAG_ALLOW_FIELD; // lavfilter //d.codec_ctx->slice_flags |= SLICE_FLAG_ALLOW_FIELD; //lavfilter //d.codec_ctx->strict_std_compliance = FF_COMPLIANCE_STRICT; //from vlc //HAVE_AVCODEC_MT macro? if (d.threads == -1) d.threads = qMax(0, QThread::idealThreadCount()); if (d.threads > 0) d.codec_ctx->thread_count = d.threads; d.codec_ctx->thread_safe_callbacks = true; switch (d.codec_ctx->codec_id) { case QTAV_CODEC_ID(MPEG4): case QTAV_CODEC_ID(H263): d.codec_ctx->thread_type = 0; break; case QTAV_CODEC_ID(MPEG1VIDEO): case QTAV_CODEC_ID(MPEG2VIDEO): d.codec_ctx->thread_type &= ~FF_THREAD_SLICE; /* fall through */ # if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55, 1, 0)) case QTAV_CODEC_ID(H264): case QTAV_CODEC_ID(VC1): case QTAV_CODEC_ID(WMV3): d.codec_ctx->thread_type &= ~FF_THREAD_FRAME; # endif default: break; } /* if (d.codec_ctx->thread_type & FF_THREAD_FRAME) p_dec->i_extra_picture_buffers = 2 * p_sys->p_context->thread_count; */ // hwa extra init can be here if (!d.open()) { d.close(); return false; } //set dict used by avcodec_open2(). see ffplay // AVDictionary *opts; int ret = avcodec_open2(d.codec_ctx, codec, d.options.isEmpty() ? NULL : &d.dict); if (ret < 0) { qWarning("open video codec failed: %s", av_err2str(ret)); return false; } d.is_open = true; return true; }
bool VideoEncoderFFmpegPrivate::open() { nb_encoded = 0LL; if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); avctx->width = width; // coded_width works, why? avctx->height = height; // reset format_used to user defined format. important to update default format if format is invalid format_used = VideoFormat::Format_Invalid; AVPixelFormat fffmt = (AVPixelFormat)format.pixelFormatFFmpeg(); if (codec->pix_fmts && format.isValid()) { for (int i = 0; codec->pix_fmts[i] != AVPixelFormat(-1); ++i) { if (fffmt == codec->pix_fmts[i]) { format_used = format.pixelFormat(); break; } } } //avctx->sample_aspect_ratio = AVPixelFormat hwfmt = AVPixelFormat(-1); if (av_pix_fmt_desc_get(codec->pix_fmts[0])->flags & AV_PIX_FMT_FLAG_HWACCEL) hwfmt = codec->pix_fmts[0]; bool use_hwctx = false; if (hwfmt != AVPixelFormat(-1)) { #ifdef HAVE_AVHWCTX const AVHWDeviceType dt = fromHWAName(codec_name.section(QChar('_'), -1).toUtf8().constData()); if (dt != AVHWDeviceType(-1)) { use_hwctx = true; avctx->pix_fmt = hwfmt; hw_device_ctx = NULL; AV_ENSURE(av_hwdevice_ctx_create(&hw_device_ctx, dt, hwdev.toLatin1().constData(), NULL, 0), false); avctx->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx); if (!avctx->hw_frames_ctx) { qWarning("Failed to create hw frame context for '%s'", codec_name.toLatin1().constData()); return false; } // get sw formats const void *hwcfg = NULL; AVHWFramesConstraints *constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx, hwcfg); const AVPixelFormat* in_fmts = constraints->valid_sw_formats; AVPixelFormat sw_fmt = AVPixelFormat(-1); if (in_fmts) { sw_fmt = in_fmts[0]; while (*in_fmts != AVPixelFormat(-1)) { if (*in_fmts == fffmt) sw_fmt = *in_fmts; sw_fmts.append(*in_fmts); ++in_fmts; } } else { sw_fmt = QTAV_PIX_FMT_C(YUV420P); } av_hwframe_constraints_free(&constraints); format_used = VideoFormat::pixelFormatFromFFmpeg(sw_fmt); // encoder surface pool parameters AVHWFramesContext* hwfs = (AVHWFramesContext*)avctx->hw_frames_ctx->data; hwfs->format = hwfmt; // must the same as avctx->pix_fmt hwfs->sw_format = sw_fmt; // if it's not set, vaapi will choose the last valid_sw_formats, but that's wrong for vaGetImage/DeriveImage. nvenc always need sw_format // hw upload parameters. encoder's hwframes is just for parameter checking, will never be intialized, so we allocate an individual one. hwframes_ref = av_hwframe_ctx_alloc(hw_device_ctx); if (!hwframes_ref) { qWarning("Failed to create hw frame context for uploading '%s'", codec_name.toLatin1().constData()); } else { hwframes = (AVHWFramesContext*)hwframes_ref->data; hwframes->format = hwfmt; } } #endif //HAVE_AVHWCTX } if (!use_hwctx) { // no hw device (videotoolbox, wrong device name etc.), or old ffmpeg // TODO: check frame is hw frame if (hwfmt == AVPixelFormat(-1)) { // sw enc if (format_used == VideoFormat::Format_Invalid) {// requested format is not supported by sw enc if (codec->pix_fmts) { //pix_fmts[0] is always a sw format here qDebug("use first supported pixel format '%d' for sw encoder", codec->pix_fmts[0]); format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]); } } } else { if (format_used == VideoFormat::Format_Invalid) { // requested format is not supported by hw enc qDebug("use first supported sw pixel format '%d' for hw encoder", codec->pix_fmts[1]); if (codec->pix_fmts && codec->pix_fmts[1] != AVPixelFormat(-1)) format_used = VideoFormat::pixelFormatFromFFmpeg(codec->pix_fmts[1]); } } if (format_used == VideoFormat::Format_Invalid) { qWarning("fallback to yuv420p"); format_used = VideoFormat::Format_YUV420P; } avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used); } if (frame_rate > 0) avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); else avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2); qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; //AVDictionary *dict = 0; if(avctx->codec_id == QTAV_CODEC_ID(H264)) { avctx->gop_size = 10; //avctx->max_b_frames = 3;//h264 av_dict_set(&dict, "preset", "fast", 0); //x264 av_dict_set(&dict, "tune", "zerolatency", 0); //x264 //av_dict_set(&dict, "profile", "main", 0); // conflict with vaapi (int values) } if(avctx->codec_id == AV_CODEC_ID_HEVC){ av_dict_set(&dict, "preset", "ultrafast", 0); av_dict_set(&dict, "tune", "zero-latency", 0); } if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) { av_dict_set(&dict, "strict", "-2", 0); // mpeg2 arbitrary fps } applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }
struct dxva2_mode_t { const char *name; const GUID *guid; int codec; const int *profiles; }; /* XXX Prefered modes must come first */ static const dxva2_mode_t dxva2_modes[] = { /* MPEG-1/2 */ { "MPEG-1 decoder, restricted profile A", &DXVA_ModeMPEG1_A, 0, NULL }, { "MPEG-2 decoder, restricted profile A", &DXVA_ModeMPEG2_A, 0, NULL }, { "MPEG-2 decoder, restricted profile B", &DXVA_ModeMPEG2_B, 0, NULL }, { "MPEG-2 decoder, restricted profile C", &DXVA_ModeMPEG2_C, 0, NULL }, { "MPEG-2 decoder, restricted profile D", &DXVA_ModeMPEG2_D, 0, NULL }, { "MPEG-2 variable-length decoder", &DXVA2_ModeMPEG2_VLD, QTAV_CODEC_ID(MPEG2VIDEO), PROF_MPEG2_SIMPLE }, { "MPEG-2 & MPEG-1 variable-length decoder", &DXVA2_ModeMPEG2and1_VLD, QTAV_CODEC_ID(MPEG2VIDEO), PROF_MPEG2_MAIN }, { "MPEG-2 & MPEG-1 variable-length decoder", &DXVA2_ModeMPEG2and1_VLD, QTAV_CODEC_ID(MPEG1VIDEO), NULL }, { "MPEG-2 motion compensation", &DXVA2_ModeMPEG2_MoComp, 0, NULL }, { "MPEG-2 inverse discrete cosine transform", &DXVA2_ModeMPEG2_IDCT, 0, NULL }, /* MPEG-1 http://download.microsoft.com/download/B/1/7/B172A3C8-56F2-4210-80F1-A97BEA9182ED/DXVA_MPEG1_VLD.pdf */ { "MPEG-1 variable-length decoder, no D pictures", &DXVA2_ModeMPEG1_VLD, 0, NULL }, /* H.264 http://www.microsoft.com/downloads/details.aspx?displaylang=en&FamilyID=3d1c290b-310b-4ea2-bf76-714063a6d7a6 */ { "H.264 variable-length decoder, film grain technology", &DXVA2_ModeH264_F, QTAV_CODEC_ID(H264), PROF_H264_HIGH }, { "H.264 variable-length decoder, no film grain technology (Intel ClearVideo)", &DXVA_Intel_H264_NoFGT_ClearVideo, QTAV_CODEC_ID(H264), PROF_H264_HIGH }, { "H.264 variable-length decoder, no film grain technology", &DXVA2_ModeH264_E, QTAV_CODEC_ID(H264), PROF_H264_HIGH }, { "H.264 variable-length decoder, no film grain technology, FMO/ASO", &DXVA_ModeH264_VLD_WithFMOASO_NoFGT, QTAV_CODEC_ID(H264), PROF_H264_HIGH }, { "H.264 variable-length decoder, no film grain technology, Flash", &DXVA_ModeH264_VLD_NoFGT_Flash, QTAV_CODEC_ID(H264), PROF_H264_HIGH },
bool VideoEncoderFFmpegPrivate::open() { nb_encoded = 0LL; if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); avctx->width = width; // coded_width works, why? avctx->height = height; // reset format_used to user defined format. important to update default format if format is invalid format_used = format.pixelFormat(); if (format.pixelFormat() == VideoFormat::Format_Invalid) { if (codec->pix_fmts) { qDebug("use first supported pixel format: %d", codec->pix_fmts[0]); format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]); } else { qWarning("pixel format and supported pixel format are not set. use yuv420p"); format_used = VideoFormat::Format_YUV420P; } } //avctx->sample_aspect_ratio = avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used); if (frame_rate > 0) avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); else avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2); qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; #if 1 //AVDictionary *dict = 0; if(avctx->codec_id == QTAV_CODEC_ID(H264)) { avctx->gop_size = 10; //avctx->max_b_frames = 3;//h264 av_dict_set(&dict, "preset", "fast", 0); av_dict_set(&dict, "tune", "zerolatency", 0); av_dict_set(&dict, "profile", "main", 0); } #ifdef FF_PROFILE_HEVC_MAIN if(avctx->codec_id == AV_CODEC_ID_HEVC){ av_dict_set(&dict, "preset", "ultrafast", 0); av_dict_set(&dict, "tune", "zero-latency", 0); } #endif //FF_PROFILE_HEVC_MAIN #endif applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }
{ AVCodec *c = avcodec_find_decoder(id); if (!c) return "Unknow"; return av_get_profile_name(c, profile); } typedef struct { AVCodecID codec; int profile; VAProfile va_profile; } codec_profile_t; #define VAProfileNone ((VAProfile)-1) //maybe not defined for old va static const codec_profile_t va_profiles[] = { { QTAV_CODEC_ID(MPEG1VIDEO), FF_PROFILE_UNKNOWN, VAProfileMPEG2Main }, //vlc { QTAV_CODEC_ID(MPEG2VIDEO), FF_PROFILE_MPEG2_MAIN, VAProfileMPEG2Main }, { QTAV_CODEC_ID(MPEG2VIDEO), FF_PROFILE_MPEG2_SIMPLE, VAProfileMPEG2Simple }, { QTAV_CODEC_ID(H263), FF_PROFILE_UNKNOWN, VAProfileMPEG4AdvancedSimple }, //xbmc { QTAV_CODEC_ID(MPEG4), FF_PROFILE_MPEG4_ADVANCED_SIMPLE, VAProfileMPEG4AdvancedSimple }, { QTAV_CODEC_ID(MPEG4), FF_PROFILE_MPEG4_MAIN, VAProfileMPEG4Main }, { QTAV_CODEC_ID(MPEG4), FF_PROFILE_MPEG4_SIMPLE, VAProfileMPEG4Simple }, { QTAV_CODEC_ID(H264), FF_PROFILE_H264_HIGH, VAProfileH264High }, { QTAV_CODEC_ID(H264), FF_PROFILE_H264_MAIN, VAProfileH264Main }, { QTAV_CODEC_ID(H264), FF_PROFILE_H264_BASELINE, VAProfileH264Baseline }, { QTAV_CODEC_ID(H264), FF_PROFILE_H264_CONSTRAINED_BASELINE, VAProfileH264ConstrainedBaseline }, //mpv force main { QTAV_CODEC_ID(VC1), FF_PROFILE_VC1_ADVANCED, VAProfileVC1Advanced }, { QTAV_CODEC_ID(VC1), FF_PROFILE_VC1_MAIN, VAProfileVC1Main }, { QTAV_CODEC_ID(VC1), FF_PROFILE_VC1_SIMPLE, VAProfileVC1Simple }, { QTAV_CODEC_ID(WMV3), FF_PROFILE_VC1_ADVANCED, VAProfileVC1Advanced }, { QTAV_CODEC_ID(WMV3), FF_PROFILE_VC1_MAIN, VAProfileVC1Main },
{ AVCodec *c = avcodec_find_decoder(id); if (!c) return "Unknow"; return av_get_profile_name(c, profile); } typedef struct { AVCodecID codec; int profile; VAProfile va_profile; //TODO: use an array like dxva does } codec_profile_t; #define VAProfileNone ((VAProfile)-1) //maybe not defined for old va static const codec_profile_t va_profiles[] = { { QTAV_CODEC_ID(MPEG1VIDEO), FF_PROFILE_UNKNOWN, VAProfileMPEG2Main }, //vlc { QTAV_CODEC_ID(MPEG2VIDEO), FF_PROFILE_MPEG2_MAIN, VAProfileMPEG2Main }, { QTAV_CODEC_ID(MPEG2VIDEO), FF_PROFILE_MPEG2_SIMPLE, VAProfileMPEG2Simple }, { QTAV_CODEC_ID(H263), FF_PROFILE_UNKNOWN, VAProfileH263Baseline }, //xbmc use mpeg4 { QTAV_CODEC_ID(MPEG4), FF_PROFILE_MPEG4_ADVANCED_SIMPLE, VAProfileMPEG4AdvancedSimple }, { QTAV_CODEC_ID(MPEG4), FF_PROFILE_MPEG4_MAIN, VAProfileMPEG4Main }, { QTAV_CODEC_ID(MPEG4), FF_PROFILE_MPEG4_SIMPLE, VAProfileMPEG4Simple }, { QTAV_CODEC_ID(H264), FF_PROFILE_H264_HIGH, VAProfileH264High }, { QTAV_CODEC_ID(H264), FF_PROFILE_H264_MAIN, VAProfileH264Main }, { QTAV_CODEC_ID(H264), FF_PROFILE_H264_BASELINE, VAProfileH264Baseline }, { QTAV_CODEC_ID(H264), FF_PROFILE_H264_CONSTRAINED_BASELINE, VAProfileH264ConstrainedBaseline }, //mpv force main { QTAV_CODEC_ID(H264), FF_PROFILE_H264_CONSTRAINED_BASELINE, VAProfileH264Main }, { QTAV_CODEC_ID(VC1), FF_PROFILE_VC1_ADVANCED, VAProfileVC1Advanced }, { QTAV_CODEC_ID(VC1), FF_PROFILE_VC1_MAIN, VAProfileVC1Main }, { QTAV_CODEC_ID(VC1), FF_PROFILE_VC1_SIMPLE, VAProfileVC1Simple }, { QTAV_CODEC_ID(WMV3), FF_PROFILE_VC1_ADVANCED, VAProfileVC1Advanced },