// hwaccel_context bool VideoDecoderDXVAPrivate::setup(void **hwctx, AVPixelFormat *chroma, int w, int h) { if (w <= 0 || h <= 0) return false; if (!decoder || ((width != w || height != h) && (surface_width != FFALIGN(w, 16) || surface_height != FFALIGN(h, 16))//) )) { DxDestroyVideoConversion(); DxDestroyVideoDecoder(); *hwctx = NULL; *chroma = QTAV_PIX_FMT_C(NONE); /* FIXME transmit a video_format_t by VaSetup directly */ if (!DxCreateVideoDecoder(codec_ctx->codec_id, w, h)) return false; hw.decoder = decoder; hw.cfg = &cfg; hw.surface_count = surface_count; hw.surface = hw_surfaces; memset(hw_surfaces, 0, sizeof(hw_surfaces)); for (unsigned i = 0; i < surface_count; i++) hw.surface[i] = surfaces[i].d3d; DxCreateVideoConversion(); } *hwctx = &hw; const d3d_format_t *outfmt = D3dFindFormat(output); *chroma = outfmt ? outfmt->codec : QTAV_PIX_FMT_C(NONE); return true; }
AVPixelFormat VideoDecoderFFmpegHWPrivate::getFormat(struct AVCodecContext *p_context, const AVPixelFormat *pi_fmt) { bool can_hwaccel = false; for (size_t i = 0; pi_fmt[i] != QTAV_PIX_FMT_C(NONE); i++) { const AVPixFmtDescriptor *dsc = av_pix_fmt_desc_get(pi_fmt[i]); if (dsc == NULL) continue; bool hwaccel = (dsc->flags & AV_PIX_FMT_FLAG_HWACCEL) != 0; qDebug("available %sware decoder output format %d (%s)", hwaccel ? "hard" : "soft", pi_fmt[i], dsc->name); if (hwaccel) can_hwaccel = true; } if (!can_hwaccel) goto end; for (size_t i = 0; pi_fmt[i] != QTAV_PIX_FMT_C(NONE); i++) { if (vaPixelFormat() != pi_fmt[i]) continue; /* We try to call setup when possible to detect errors when possible (later is too late) */ if (p_context->width > 0 && p_context->height > 0 && !setup(p_context)) { qWarning("acceleration setup failure"); break; } qDebug("Using %s for hardware decoding.", qPrintable(description)); p_context->draw_horiz_band = NULL; //?? return pi_fmt[i]; } close(); end: qWarning("hardware acceleration is not available" ); /* Fallback to default behaviour */ return avcodec_default_get_format(p_context, pi_fmt); }
bool ImageConverter::prepareData() { DPTR_D(ImageConverter); if (d.fmt_out == QTAV_PIX_FMT_C(NONE) || d.w_out <=0 || d.h_out <= 0) return false; AV_ENSURE(av_image_check_size(d.w_out, d.h_out, 0, NULL), false); const int nb_planes = qMax(av_pix_fmt_count_planes(d.fmt_out), 0); d.bits.resize(nb_planes); d.pitchs.resize(nb_planes); // alignment is 16. sws in ffmpeg is 16, libav10 is 8 const int kAlign = 16; AV_ENSURE(av_image_fill_linesizes((int*)d.pitchs.constData(), d.fmt_out, kAlign > 7 ? FFALIGN(d.w_out, 8) : d.w_out), false); for (int i = 0; i < d.pitchs.size(); ++i) d.pitchs[i] = FFALIGN(d.pitchs[i], kAlign); int s = av_image_fill_pointers((uint8_t**)d.bits.constData(), d.fmt_out, d.h_out, NULL, d.pitchs.constData()); if (s < 0) return false; d.data_out.resize(s + kAlign-1); const int offset = (kAlign - ((uintptr_t)d.data_out.constData() & (kAlign-1))) & (kAlign-1); AV_ENSURE(av_image_fill_pointers((uint8_t**)d.bits.constData(), d.fmt_out, d.h_out, (uint8_t*)d.data_out.constData()+offset, d.pitchs.constData()), false); // TODO: special formats //if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) // avpriv_set_systematic_pal2((uint32_t*)pointers[1], pix_fmt); d.update_data = false; for (int i = 0; i < d.pitchs.size(); ++i) { Q_ASSERT(d.pitchs[i]%kAlign == 0); Q_ASSERT(qintptr(d.bits[i])%kAlign == 0); } return true; }
bool VideoDecoderDXVAPrivate::open() { if (va_pixfmt != QTAV_PIX_FMT_C(NONE)) codec_ctx->pix_fmt = va_pixfmt; if (!D3dCreateDevice()) { qWarning("Failed to create Direct3D device"); goto error; } qDebug("D3dCreateDevice succeed"); if (!D3dCreateDeviceManager()) { qWarning("D3dCreateDeviceManager failed"); goto error; } if (!DxCreateVideoService()) { qWarning("DxCreateVideoService failed"); goto error; } if (!DxFindVideoServiceConversion(&input, &render)) { qWarning("DxFindVideoServiceConversion failed"); goto error; } /* TODO print the hardware name/vendor for debugging purposes */ return true; error: close(); return false; }
void init() { // FIXME: hack for invalid ffmpeg formats if (pixfmt == VideoFormat::Format_VYUY) { pixfmt_ff = QTAV_PIX_FMT_C(UYVY422); } // TODO: what if other formats not supported by ffmpeg? give attributes in QtAV? if (pixfmt_ff == QTAV_PIX_FMT_C(NONE)) { qWarning("Invalid pixel format"); return; } planes = qMax(av_pix_fmt_count_planes(pixfmt_ff), 0); bpps.resize(planes); bpps_pad.resize(planes); pixdesc = const_cast<AVPixFmtDescriptor*>(av_pix_fmt_desc_get(pixfmt_ff)); if (!pixdesc) return; initBpp(); }
AVPixelFormat VideoDecoderFFmpegHWPrivate::getFormat(struct AVCodecContext *p_context, const AVPixelFormat *pi_fmt) { bool can_hwaccel = false; for (size_t i = 0; pi_fmt[i] != QTAV_PIX_FMT_C(NONE); i++) { const AVPixFmtDescriptor *dsc = av_pix_fmt_desc_get(pi_fmt[i]); if (dsc == NULL) continue; bool hwaccel = (dsc->flags & AV_PIX_FMT_FLAG_HWACCEL) != 0; qDebug("available %sware decoder output format %d (%s)", hwaccel ? "hard" : "soft", pi_fmt[i], dsc->name); if (hwaccel) can_hwaccel = true; } if (!can_hwaccel) goto end; /* Profile and level information is needed now. * TODO: avoid code duplication with avcodec.c */ #if 0 if (p_context->profile != FF_PROFILE_UNKNOWN) p_dec->fmt_in.i_profile = p_context->profile; if (p_context->level != FF_LEVEL_UNKNOWN) p_dec->fmt_in.i_level = p_context->level; #endif for (size_t i = 0; pi_fmt[i] != PIX_FMT_NONE; i++) { if (vaPixelFormat() != pi_fmt[i]) continue; /* We try to call vlc_va_Setup when possible to detect errors when * possible (later is too late) */ if (p_context->width > 0 && p_context->height > 0 && !setup(p_context)) { qWarning("acceleration setup failure"); break; } qDebug("Using %s for hardware decoding.", qPrintable(description)); /* FIXME this will disable direct rendering * even if a new pixel format is renegotiated */ //p_sys->b_direct_rendering = false; p_context->draw_horiz_band = NULL; return pi_fmt[i]; } close(); //vlc_va_Delete(p_va); end: qWarning("acceleration not available" ); /* Fallback to default behaviour */ return avcodec_default_get_format(p_context, pi_fmt); }
LibAVFilterPrivate() : filter_graph(0) , in_filter_ctx(0) , out_filter_ctx(0) , pixfmt(QTAV_PIX_FMT_C(NONE)) , width(0) , height(0) , avframe(0) , options_changed(false) { avfilter_register_all(); }
void init() { if (pixfmt_ff == QTAV_PIX_FMT_C(NONE)) { qWarning("Invalid pixel format"); return; } planes = qMax(av_pix_fmt_count_planes(pixfmt_ff), 0); bpps.resize(planes); bpps_pad.resize(planes); pixdesc = const_cast<AVPixFmtDescriptor*>(av_pix_fmt_desc_get(pixfmt_ff)); if (!pixdesc) return; initBpp(); }
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const { if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE)) return VideoFrame(); if (!frame.constBits(0)) // hw surface return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt)); const VideoFormat format(frame.format()); //if (fffmt == format.pixelFormatFFmpeg()) // return *this; if (!m_cvt) { m_cvt = new ImageConverterSWS(); } m_cvt->setBrightness(m_eq[0]); m_cvt->setContrast(m_eq[1]); m_cvt->setSaturation(m_eq[2]); m_cvt->setInFormat(format.pixelFormatFFmpeg()); m_cvt->setOutFormat(fffmt); m_cvt->setInSize(frame.width(), frame.height()); m_cvt->setOutSize(frame.width(), frame.height()); m_cvt->setInRange(frame.colorRange()); const int pal = format.hasPalette(); QVector<const uchar*> pitch(format.planeCount() + pal); QVector<int> stride(format.planeCount() + pal); for (int i = 0; i < format.planeCount(); ++i) { pitch[i] = frame.constBits(i); stride[i] = frame.bytesPerLine(i); } const QByteArray paldata(frame.metaData(QStringLiteral("pallete")).toByteArray()); if (pal > 0) { pitch[1] = (const uchar*)paldata.constData(); stride[1] = paldata.size(); } if (!m_cvt->convert(pitch.constData(), stride.constData())) { return VideoFrame(); } const VideoFormat fmt(fffmt); VideoFrame f(frame.width(), frame.height(), fmt, m_cvt->outData()); f.setBits(m_cvt->outPlanes()); f.setBytesPerLine(m_cvt->outLineSizes()); f.setTimestamp(frame.timestamp()); f.setDisplayAspectRatio(frame.displayAspectRatio()); // metadata? if (fmt.isRGB()) { f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB); } else { f.setColorSpace(ColorSpace_Unknown); } // TODO: color range return f; }
static void SetColorDetailsByFFmpeg(VideoFrame *f, AVFrame* frame, AVCodecContext* codec_ctx) { ColorSpace cs = colorSpaceFromFFmpeg(av_frame_get_colorspace(frame)); if (cs == ColorSpace_Unknown) cs = colorSpaceFromFFmpeg(codec_ctx->colorspace); f->setColorSpace(cs); ColorRange cr = colorRangeFromFFmpeg(av_frame_get_color_range(frame)); if (cr == ColorRange_Unknown) { // check yuvj format. TODO: deprecated, check only for old ffmpeg? const AVPixelFormat pixfmt = (AVPixelFormat)frame->format; switch (pixfmt) { //case QTAV_PIX_FMT_C(YUVJ411P): //not in ffmpeg<2 and libav case QTAV_PIX_FMT_C(YUVJ420P): case QTAV_PIX_FMT_C(YUVJ422P): case QTAV_PIX_FMT_C(YUVJ440P): case QTAV_PIX_FMT_C(YUVJ444P): cr = ColorRange_Full; break; default: break; } } if (cr == ColorRange_Unknown) { cr = colorRangeFromFFmpeg(codec_ctx->color_range); if (cr == ColorRange_Unknown) { if (f->format().isXYZ()){ cr = ColorRange_Full; cs = ColorSpace_XYZ; // not here } else if (!f->format().isRGB()) { //qDebug("prefer limited yuv range"); cr = ColorRange_Limited; } } } f->setColorRange(cr); }
void init() { // TODO: what if other formats not supported by ffmpeg? give attributes in QtAV? if (pixfmt_ff == QTAV_PIX_FMT_C(NONE)) { qWarning("Invalid pixel format"); return; } planes = qMax(av_pix_fmt_count_planes(pixfmt_ff), 0); bpps.reserve(planes); channels.reserve(planes); bpps.resize(planes); channels.resize(planes); pixdesc = const_cast<AVPixFmtDescriptor*>(av_pix_fmt_desc_get(pixfmt_ff)); if (!pixdesc) return; initBpp(); }
bool VideoEncoderFFmpegPrivate::open() { if (codec_name.isEmpty()) { // copy ctx from muxer AVCodec *codec = avcodec_find_decoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); qDebug("tbc: %f", av_q2d(avctx->time_base)); avctx->width = width; // coded_width works, why? avctx->height = height; avctx->pix_fmt = QTAV_PIX_FMT_C(YUV420P); avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); //avctx->max_b_frames = 3;//h264 qDebug("2 tbc: %f=%d/%d", av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; // Set Option AVDictionary *param = 0; #if 0 //H.264 if(avctx->codec_id == QTAV_CODEC_ID(H264)) { av_dict_set(¶m, "preset", "slow", 0); av_dict_set(¶m, "tune", "zerolatency", 0); //av_dict_set(¶m, "profile", "main", 0); } //H.265 if(avctx->codec_id == AV_CODEC_ID_H265){ av_dict_set(¶m, "preset", "ultrafast", 0); av_dict_set(¶m, "tune", "zero-latency", 0); } #endif applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }
bool VideoDecoderVDAPrivate::open() { qDebug("opening VDA module"); if (codec_ctx->codec_id != AV_CODEC_ID_H264) { qWarning("input codec (%s) isn't H264, canceling VDA decoding", codec_ctx->codec_name); return false; } if (va_pixfmt != QTAV_PIX_FMT_C(NONE)) codec_ctx->pix_fmt = va_pixfmt; #if 0 if (!codec_ctx->extradata || codec_ctx->extradata_size < 7) { qWarning("VDA requires extradata."); return false; } #endif return true; }
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const { if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE)) return VideoFrame(); if (!frame.bits(0)) // hw surface return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt)); const VideoFormat format(frame.format()); //if (fffmt == format.pixelFormatFFmpeg()) // return *this; if (!m_cvt) { m_cvt = new ImageConverterSWS(); } m_cvt->setBrightness(m_eq[0]); m_cvt->setContrast(m_eq[1]); m_cvt->setSaturation(m_eq[2]); m_cvt->setInFormat(format.pixelFormatFFmpeg()); m_cvt->setOutFormat(fffmt); m_cvt->setInSize(frame.width(), frame.height()); m_cvt->setOutSize(frame.width(), frame.height()); QVector<const uchar*> pitch(format.planeCount()); QVector<int> stride(format.planeCount()); for (int i = 0; i < format.planeCount(); ++i) { pitch[i] = frame.bits(i); stride[i] = frame.bytesPerLine(i); } if (!m_cvt->convert(pitch.constData(), stride.constData())) { return VideoFrame(); } const VideoFormat fmt(fffmt); VideoFrame f(m_cvt->outData(), frame.width(), frame.height(), fmt); f.setBits(m_cvt->outPlanes()); f.setBytesPerLine(m_cvt->outLineSizes()); f.setTimestamp(frame.timestamp()); // metadata? if (fmt.isRGB()) { f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB); } else { f.setColorSpace(ColorSpace_Unknow); } return f; }
int VideoFrame::allocate() { Q_D(VideoFrame); if (pixelFormatFFmpeg() == QTAV_PIX_FMT_C(NONE) || width() <=0 || height() <= 0) { qWarning("Not valid format(%s) or size(%dx%d)", qPrintable(format().name()), width(), height()); return 0; } #if 0 const int align = 16; int bytes = av_image_get_buffer_size((AVPixelFormat)d->format.pixelFormatFFmpeg(), width(), height(), align); d->data.resize(bytes); av_image_fill_arrays(d->planes.data(), d->line_sizes.data() , (const uint8_t*)d->data.constData() , (AVPixelFormat)d->format.pixelFormatFFmpeg() , width(), height(), align); return bytes; #endif int bytes = avpicture_get_size((AVPixelFormat)pixelFormatFFmpeg(), width(), height()); if (d->data.size() < bytes) { d->data = QByteArray(bytes, 0); } init(); return bytes; }
bool ImageConverter::check() const { DPTR_D(const ImageConverter); return d.w_in > 0 && d.w_out > 0 && d.h_in > 0 && d.h_out > 0 && d.fmt_in != QTAV_PIX_FMT_C(NONE) && d.fmt_out != QTAV_PIX_FMT_C(NONE); }
bool VideoEncoderFFmpegPrivate::open() { nb_encoded = 0LL; if (codec_name.isEmpty()) { // copy ctx from muxer by copyAVCodecContext AVCodec *codec = avcodec_find_encoder(avctx->codec_id); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); return true; } AVCodec *codec = avcodec_find_encoder_by_name(codec_name.toUtf8().constData()); if (!codec) { const AVCodecDescriptor* cd = avcodec_descriptor_get_by_name(codec_name.toUtf8().constData()); if (cd) { codec = avcodec_find_encoder(cd->id); } } if (!codec) { qWarning() << "Can not find encoder for codec " << codec_name; return false; } if (avctx) { avcodec_free_context(&avctx); avctx = 0; } avctx = avcodec_alloc_context3(codec); avctx->width = width; // coded_width works, why? avctx->height = height; // reset format_used to user defined format. important to update default format if format is invalid format_used = VideoFormat::Format_Invalid; AVPixelFormat fffmt = (AVPixelFormat)format.pixelFormatFFmpeg(); if (codec->pix_fmts && format.isValid()) { for (int i = 0; codec->pix_fmts[i] != AVPixelFormat(-1); ++i) { if (fffmt == codec->pix_fmts[i]) { format_used = format.pixelFormat(); break; } } } //avctx->sample_aspect_ratio = AVPixelFormat hwfmt = AVPixelFormat(-1); if (av_pix_fmt_desc_get(codec->pix_fmts[0])->flags & AV_PIX_FMT_FLAG_HWACCEL) hwfmt = codec->pix_fmts[0]; bool use_hwctx = false; if (hwfmt != AVPixelFormat(-1)) { #ifdef HAVE_AVHWCTX const AVHWDeviceType dt = fromHWAName(codec_name.section(QChar('_'), -1).toUtf8().constData()); if (dt != AVHWDeviceType(-1)) { use_hwctx = true; avctx->pix_fmt = hwfmt; hw_device_ctx = NULL; AV_ENSURE(av_hwdevice_ctx_create(&hw_device_ctx, dt, hwdev.toLatin1().constData(), NULL, 0), false); avctx->hw_frames_ctx = av_hwframe_ctx_alloc(hw_device_ctx); if (!avctx->hw_frames_ctx) { qWarning("Failed to create hw frame context for '%s'", codec_name.toLatin1().constData()); return false; } // get sw formats const void *hwcfg = NULL; AVHWFramesConstraints *constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx, hwcfg); const AVPixelFormat* in_fmts = constraints->valid_sw_formats; AVPixelFormat sw_fmt = AVPixelFormat(-1); if (in_fmts) { sw_fmt = in_fmts[0]; while (*in_fmts != AVPixelFormat(-1)) { if (*in_fmts == fffmt) sw_fmt = *in_fmts; sw_fmts.append(*in_fmts); ++in_fmts; } } else { sw_fmt = QTAV_PIX_FMT_C(YUV420P); } av_hwframe_constraints_free(&constraints); format_used = VideoFormat::pixelFormatFromFFmpeg(sw_fmt); // encoder surface pool parameters AVHWFramesContext* hwfs = (AVHWFramesContext*)avctx->hw_frames_ctx->data; hwfs->format = hwfmt; // must the same as avctx->pix_fmt hwfs->sw_format = sw_fmt; // if it's not set, vaapi will choose the last valid_sw_formats, but that's wrong for vaGetImage/DeriveImage. nvenc always need sw_format // hw upload parameters. encoder's hwframes is just for parameter checking, will never be intialized, so we allocate an individual one. hwframes_ref = av_hwframe_ctx_alloc(hw_device_ctx); if (!hwframes_ref) { qWarning("Failed to create hw frame context for uploading '%s'", codec_name.toLatin1().constData()); } else { hwframes = (AVHWFramesContext*)hwframes_ref->data; hwframes->format = hwfmt; } } #endif //HAVE_AVHWCTX } if (!use_hwctx) { // no hw device (videotoolbox, wrong device name etc.), or old ffmpeg // TODO: check frame is hw frame if (hwfmt == AVPixelFormat(-1)) { // sw enc if (format_used == VideoFormat::Format_Invalid) {// requested format is not supported by sw enc if (codec->pix_fmts) { //pix_fmts[0] is always a sw format here qDebug("use first supported pixel format '%d' for sw encoder", codec->pix_fmts[0]); format_used = VideoFormat::pixelFormatFromFFmpeg((int)codec->pix_fmts[0]); } } } else { if (format_used == VideoFormat::Format_Invalid) { // requested format is not supported by hw enc qDebug("use first supported sw pixel format '%d' for hw encoder", codec->pix_fmts[1]); if (codec->pix_fmts && codec->pix_fmts[1] != AVPixelFormat(-1)) format_used = VideoFormat::pixelFormatFromFFmpeg(codec->pix_fmts[1]); } } if (format_used == VideoFormat::Format_Invalid) { qWarning("fallback to yuv420p"); format_used = VideoFormat::Format_YUV420P; } avctx->pix_fmt = (AVPixelFormat)VideoFormat::pixelFormatToFFmpeg(format_used); } if (frame_rate > 0) avctx->time_base = av_d2q(1.0/frame_rate, frame_rate*1001.0+2); else avctx->time_base = av_d2q(1.0/VideoEncoder::defaultFrameRate(), VideoEncoder::defaultFrameRate()*1001.0+2); qDebug("size: %dx%d tbc: %f=%d/%d", width, height, av_q2d(avctx->time_base), avctx->time_base.num, avctx->time_base.den); avctx->bit_rate = bit_rate; //AVDictionary *dict = 0; if(avctx->codec_id == QTAV_CODEC_ID(H264)) { avctx->gop_size = 10; //avctx->max_b_frames = 3;//h264 av_dict_set(&dict, "preset", "fast", 0); //x264 av_dict_set(&dict, "tune", "zerolatency", 0); //x264 //av_dict_set(&dict, "profile", "main", 0); // conflict with vaapi (int values) } if(avctx->codec_id == AV_CODEC_ID_HEVC){ av_dict_set(&dict, "preset", "ultrafast", 0); av_dict_set(&dict, "tune", "zero-latency", 0); } if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) { av_dict_set(&dict, "strict", "-2", 0); // mpeg2 arbitrary fps } applyOptionsForContext(); AV_ENSURE_OK(avcodec_open2(avctx, codec, &dict), false); // from mpv ao_lavc const int buffer_size = qMax<int>(qMax<int>(width*height*6+200, FF_MIN_BUFFER_SIZE), sizeof(AVPicture));//?? buffer.resize(buffer_size); return true; }