Ejemplo n.º 1
0
void VideoMaterial::setCurrentFrame(const VideoFrame &frame)
{
    DPTR_D(VideoMaterial);
    d.update_texure = true;
    d.bpp = frame.format().bitsPerPixel(0);
    d.width = frame.width();
    d.height = frame.height();
    const VideoFormat fmt(frame.format());
    // http://forum.doom9.org/archive/index.php/t-160211.html
    ColorTransform::ColorSpace cs = ColorTransform::RGB;
    if (fmt.isRGB()) {
        if (fmt.isPlanar())
            cs = ColorTransform::GBR;
    } else {
        if (frame.width() >= 1280 || frame.height() > 576) //values from mpv
            cs = ColorTransform::BT709;
        else
            cs = ColorTransform::BT601;
    }
    d.colorTransform.setInputColorSpace(cs);
    d.frame = frame;
    if (fmt != d.video_format) {
        qDebug("pixel format changed: %s => %s", qPrintable(d.video_format.name()), qPrintable(fmt.name()));
        d.video_format = fmt;
    }
}
Ejemplo n.º 2
0
bool LibAVFilterPrivate::push(Frame *frame, qreal pts)
{
    VideoFrame *vf = static_cast<VideoFrame*>(frame);
    if (width != vf->width() || height != vf->height() || pixfmt != vf->pixelFormatFFmpeg() || options_changed) {
        width = vf->width();
        height = vf->height();
        pixfmt = (AVPixelFormat)vf->pixelFormatFFmpeg();
        options_changed = false;
        if (!setup()) {
            qWarning("setup filter graph error");
            enabled = false; // skip this filter and avoid crash
            return false;
        }
    }
    Q_ASSERT(avframe);
    avframe->pts = pts * 1000000.0; // time_base is 1/1000000
    avframe->width = vf->width();
    avframe->height = vf->height();
    avframe->format = pixfmt = (AVPixelFormat)vf->pixelFormatFFmpeg();
    for (int i = 0; i < vf->planeCount(); ++i) {
        avframe->data[i] =vf->bits(i);
        avframe->linesize[i] = vf->bytesPerLine(i);
    }
    int ret = av_buffersrc_add_frame_flags(in_filter_ctx, avframe, AV_BUFFERSRC_FLAG_KEEP_REF);
    if (ret != 0) {
        qWarning("av_buffersrc_add_frame error: %s", av_err2str(ret));
        return false;
    }
    return true;
}
Ejemplo n.º 3
0
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
    if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
        return VideoFrame();
    if (!frame.constBits(0)) // hw surface
        return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
    const VideoFormat format(frame.format());
    //if (fffmt == format.pixelFormatFFmpeg())
      //  return *this;
    if (!m_cvt) {
        m_cvt = new ImageConverterSWS();
    }
    m_cvt->setBrightness(m_eq[0]);
    m_cvt->setContrast(m_eq[1]);
    m_cvt->setSaturation(m_eq[2]);
    m_cvt->setInFormat(format.pixelFormatFFmpeg());
    m_cvt->setOutFormat(fffmt);
    m_cvt->setInSize(frame.width(), frame.height());
    m_cvt->setOutSize(frame.width(), frame.height());
    m_cvt->setInRange(frame.colorRange());
    const int pal = format.hasPalette();
    QVector<const uchar*> pitch(format.planeCount() + pal);
    QVector<int> stride(format.planeCount() + pal);
    for (int i = 0; i < format.planeCount(); ++i) {
        pitch[i] = frame.constBits(i);
        stride[i] = frame.bytesPerLine(i);
    }
    const QByteArray paldata(frame.metaData(QStringLiteral("pallete")).toByteArray());
    if (pal > 0) {
        pitch[1] = (const uchar*)paldata.constData();
        stride[1] = paldata.size();
    }
    if (!m_cvt->convert(pitch.constData(), stride.constData())) {
        return VideoFrame();
    }
    const VideoFormat fmt(fffmt);
    VideoFrame f(frame.width(), frame.height(), fmt, m_cvt->outData());
    f.setBits(m_cvt->outPlanes());
    f.setBytesPerLine(m_cvt->outLineSizes());
    f.setTimestamp(frame.timestamp());
    f.setDisplayAspectRatio(frame.displayAspectRatio());
    // metadata?
    if (fmt.isRGB()) {
        f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
    } else {
        f.setColorSpace(ColorSpace_Unknown);
    }
    // TODO: color range
    return f;
}
Ejemplo n.º 4
0
bool QQuickItemRenderer::receiveFrame(const VideoFrame &frame)
{
    DPTR_D(QQuickItemRenderer);
    d.video_frame = frame;
    if (!isOpenGL()) {
        d.image = QImage((uchar*)frame.constBits(), frame.width(), frame.height(), frame.bytesPerLine(), frame.imageFormat());
        QRect r = realROI();
        if (r != QRect(0, 0, frame.width(), frame.height()))
            d.image = d.image.copy(r);
    }
    d.frame_changed = true;
//    update();  // why update slow? because of calling in a different thread?
    //QMetaObject::invokeMethod(this, "update"); // slower than directly postEvent
    QCoreApplication::postEvent(this, new QEvent(QEvent::User));
    return true;
}
Ejemplo n.º 5
0
void
ShmHolder::renderFrame(VideoFrame& src) noexcept
{
    const auto width = src.width();
    const auto height = src.height();
    const auto format = VIDEO_PIXFMT_BGRA;
    const auto frameSize = videoFrameSize(format, width, height);

    if (!resizeArea(frameSize)) {
        RING_ERR("ShmHolder[%s]: could not resize area",
                 openedName_.c_str());
        return;
    }

    {
        VideoFrame dst;
        VideoScaler scaler;

        dst.setFromMemory(area_->data + area_->writeOffset, format, width, height);
        scaler.scale(src, dst);
    }

    {
        SemGuardLock lk {area_->mutex};

        ++area_->frameGen;
        std::swap(area_->readOffset, area_->writeOffset);
        ::sem_post(&area_->frameGenMutex);
    }
}
Ejemplo n.º 6
0
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
    DPTR_D(VideoEncoderFFmpeg);
    AVFrame *f = NULL;
    if (frame.isValid()) {
        f = av_frame_alloc();
        f->format = frame.format().pixelFormatFFmpeg();
        f->width = frame.width();
        f->height = frame.height();
//        f->quality = d.avctx->global_quality;
        switch (timestampMode()) {
        case TimestampCopy:
            f->pts = int64_t(frame.timestamp()*frameRate()); // TODO: check monotically increase and fix if not. or another mode?
            break;
        case TimestampMonotonic:
            f->pts = d.nb_encoded+1;
            break;
        default:
            break;
        }
        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = frame.bytesPerLine(i);
            f->data[i] = (uint8_t*)frame.constBits(i);
        }
        if (d.avctx->width <= 0) {
            d.avctx->width = frame.width();
        }
        if (d.avctx->height <= 0) {
            d.avctx->height = frame.width();
        }
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_video2(d.avctx, &pkt, f, &got_packet);
    av_frame_free(&f);
    if (ret < 0) {
        qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
        return false; //false
    }
    d.nb_encoded++;
    if (!got_packet) {
        qWarning("no packet got");
        d.packet = Packet();
        // invalid frame means eof
        return frame.isValid();
    }
   // qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
   // qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}
Ejemplo n.º 7
0
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
    if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
        return VideoFrame();
    if (!frame.bits(0)) // hw surface
        return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
    const VideoFormat format(frame.format());
    //if (fffmt == format.pixelFormatFFmpeg())
    //  return *this;
    if (!m_cvt) {
        m_cvt = new ImageConverterSWS();
    }
    m_cvt->setBrightness(m_eq[0]);
    m_cvt->setContrast(m_eq[1]);
    m_cvt->setSaturation(m_eq[2]);
    m_cvt->setInFormat(format.pixelFormatFFmpeg());
    m_cvt->setOutFormat(fffmt);
    m_cvt->setInSize(frame.width(), frame.height());
    m_cvt->setOutSize(frame.width(), frame.height());
    QVector<const uchar*> pitch(format.planeCount());
    QVector<int> stride(format.planeCount());
    for (int i = 0; i < format.planeCount(); ++i) {
        pitch[i] = frame.bits(i);
        stride[i] = frame.bytesPerLine(i);
    }
    if (!m_cvt->convert(pitch.constData(), stride.constData())) {
        return VideoFrame();
    }
    const VideoFormat fmt(fffmt);
    VideoFrame f(m_cvt->outData(), frame.width(), frame.height(), fmt);
    f.setBits(m_cvt->outPlanes());
    f.setBytesPerLine(m_cvt->outLineSizes());
    f.setTimestamp(frame.timestamp());
    // metadata?
    if (fmt.isRGB()) {
        f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
    } else {
        f.setColorSpace(ColorSpace_Unknow);
    }
    return f;
}
Ejemplo n.º 8
0
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
    DPTR_D(VideoEncoderFFmpeg);
    AVFrame *f = NULL;
    if (frame.isValid()) {
        f = av_frame_alloc();
        f->format = frame.format().pixelFormatFFmpeg();
        f->width = frame.width();
        f->height = frame.height();
        // TODO: record last pts
        f->pts = int64_t(frame.timestamp()*frameRate());
        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = frame.bytesPerLine(i);
            f->data[i] = (uint8_t*)frame.bits(i);
        }
        if (d.avctx->width <= 0) {
            d.avctx->width = frame.width();
        }
        if (d.avctx->height <= 0) {
            d.avctx->height = frame.width();
        }
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_video2(d.avctx, &pkt, f, &got_packet);
    av_frame_free(&f);
    if (ret < 0) {
        //qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
        return false; //false
    }
    if (!got_packet) {
        qWarning("no packet got");
        return false; //false
    }
    qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
    qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}
Ejemplo n.º 9
0
bool Direct2DRenderer::receiveFrame(const VideoFrame& frame)
{
    DPTR_D(Direct2DRenderer);
    if (!d.prepareBitmap(frame.width(), frame.height()))
        return false;
    HRESULT hr = S_OK;
    //if d2d factory is D2D1_FACTORY_TYPE_SINGLE_THREADED, we need to lock
    //QMutexLocker locker(&d.img_mutex);
    //Q_UNUSED(locker);
    d.video_frame = frame;
    //TODO: if CopyFromMemory() is deep copy, mutex can be avoided
    /*if lock is required, do not use locker in if() scope, it will unlock outside the scope*/
    //TODO: d2d often crash, should we always lock? How about other renderer?
    hr = d.bitmap->CopyFromMemory(NULL //&D2D1::RectU(0, 0, image.width(), image.height()) /*&dstRect, NULL?*/,
                                  , frame.bits(0) //data.constData() //msdn: const void*
                                  , frame.bytesPerLine(0));
    if (hr != S_OK) {
        qWarning("Failed to copy from memory to bitmap (%ld)", hr);
    }
    update();
    return true;
}
Ejemplo n.º 10
0
void VideoCaptureNode::inputsUpdated( qint64 pTimeStamp )
{
	NodeControlBase::inputsUpdated( pTimeStamp );

//	if( !mNode->context()->active() )
//	{
//		return;
//	}

	if( mDevice && mDevice->timestamp() > mLastFrameTimeStamp )
	{
		fugio::Performance	P( mNode, __FUNCTION__, pTimeStamp );

		fugio::Image				Output = mValOutputImage->variant().value<fugio::Image>();

		for( int i = 0 ; i < 3 ; i++ )
		{
			Output.setBuffer( i, nullptr );
		}

		VideoFrame	VF = mDevice->frame();

		if( VF.width() != mVideoFrame.width() || VF.height() != mVideoFrame.height() || VF.pixelFormat() != mVideoFrame.pixelFormat() )
		{
			Output.setSize( VF.width(), VF.height() );

#if defined( VIDEOCAPTURE_SUPPORTED )
			switch( VF.pixelFormat() )
			{
				case CA_YUV422P:                                                             /* YUV422 Planar */
				case CA_YUVJ420P:                                                          /* YUV420 Planar Full Range (JPEG), J comes from the JPEG. (values 0-255 used) */
				case CA_YUVJ420BP:                                                          /* YUV420 Bi-Planer Full Range (JPEG), J comes fro the JPEG. (values: luma = [16,235], chroma=[16,240]) */
				case CA_JPEG_OPENDML:                                                          /* JPEG with Open-DML extensions */
				case CA_H264:                                                                  /* H264 */
					return;

				case CA_YUV420BP:                                                            /* YUV420 Bi Planar */
					Output.setFormat( fugio::ImageFormat::NV12 );
					break;

				case CA_MJPEG:                                                                /* MJPEG 2*/
					Output.setFormat( fugio::ImageFormat::YUVJ422P );
					break;

				case CA_YUV420P:                                                           /* YUV420 Planar */
					Output.setFormat( fugio::ImageFormat::YUV420P );
					break;

				case CA_UYVY422:                                                              /* Cb Y0 Cr Y1 */
					Output.setFormat( fugio::ImageFormat::UYVY422 );
					break;

				case CA_YUYV422:                                                             /* Y0 Cb Y1 Cr */
					Output.setFormat( fugio::ImageFormat::YUYV422 );
					break;

				case CA_ARGB32:                                                              /* ARGB 8:8:8:8 32bpp, ARGBARGBARGB... */
				case CA_RGBA32:                                                              /* RGBA 8:8:8:8 32bpp. */
					Output.setFormat( fugio::ImageFormat::RGBA8 );
					break;

				case CA_BGRA32:                                                             /* BGRA 8:8:8:8 32bpp, BGRABGRABGRA... */
					Output.setFormat( fugio::ImageFormat::BGRA8 );
					break;

				case CA_RGB24:                                                              /* RGB 8:8:8 24bit */
	#if defined( Q_OS_WIN )
					Output.setFormat( fugio::ImageFormat::BGR8 );
	#else
					Output.setFormat( fugio::ImageFormat::RGB8 );
	#endif
					break;
			}
#endif

			for( int i = 0 ; i < 3 ; i++ )
			{
				Output.setLineSize( i, VF.stride( i ) );
			}
		}

		if( Output.isValid() )
		{
			mVideoFrame = VF;

			for( int i = 0 ; i < 3 ; i++ )
			{
				Output.setBuffer( i, mVideoFrame.plane( i ) );
			}

			pinUpdated( mPinOutputImage );
		}

		mLastFrameTimeStamp = mDevice->timestamp();
	}
}
Ejemplo n.º 11
0
bool VideoOutput::receive(const VideoFrame& frame)
{
    DPTR_D(VideoOutput);
    setInSize(frame.width(), frame.height());
    return d.impl->receiveFrame(frame);
}
Ejemplo n.º 12
0
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
    DPTR_D(VideoEncoderFFmpeg);
    QScopedPointer<AVFrame, ScopedAVFrameDeleter> f;
    // hwupload
    AVPixelFormat pixfmt = AVPixelFormat(frame.pixelFormatFFmpeg());
    if (frame.isValid()) {
        f.reset(av_frame_alloc());
        f->format = pixfmt;
        f->width = frame.width();
        f->height = frame.height();
//        f->quality = d.avctx->global_quality;
        switch (timestampMode()) {
        case TimestampCopy:
            f->pts = int64_t(frame.timestamp()*frameRate()); // TODO: check monotically increase and fix if not. or another mode?
            break;
        case TimestampMonotonic:
            f->pts = d.nb_encoded+1;
            break;
        default:
            break;
        }

        // pts is set in muxer
        const int nb_planes = frame.planeCount();
        for (int i = 0; i < nb_planes; ++i) {
            f->linesize[i] = frame.bytesPerLine(i);
            f->data[i] = (uint8_t*)frame.constBits(i);
        }
        if (d.avctx->width <= 0) {
            d.avctx->width = frame.width();
        }
        if (d.avctx->height <= 0) {
            d.avctx->height = frame.width();
        }
#ifdef HAVE_AVHWCTX
        if (d.avctx->hw_frames_ctx) {
            // TODO: try to map to SourceSurface
            // checl valid sw_formats
            if (!d.hwframes_ref) {
                qWarning("no hw frame context for uploading");
                return false;
            }
            if (pixfmt != d.hwframes->sw_format) {
                // reinit or got an unsupported format. assume parameters will not change, so it's  the 1st init
                // check constraints
                bool init_frames_ctx = d.hwframes->sw_format == AVPixelFormat(-1);
                if (d.sw_fmts.contains(pixfmt)) { // format changed
                    init_frames_ctx = true;
                } else { // convert to supported sw format
                    pixfmt = d.sw_fmts[0];
                    f->format = pixfmt;
                    VideoFrame converted = frame.to(VideoFormat::pixelFormatFromFFmpeg(pixfmt));
                    for (int i = 0; i < converted.planeCount(); ++i) {
                        f->linesize[i] = converted.bytesPerLine(i);
                        f->data[i] = (uint8_t*)frame.constBits(i);
                    }
                }
                if (init_frames_ctx) {
                    d.hwframes->sw_format = pixfmt;
                    d.hwframes->width = frame.width();
                    d.hwframes->height = frame.height();
                    AV_ENSURE(av_hwframe_ctx_init(d.hwframes_ref), false);
                }
            }
            // upload
            QScopedPointer<AVFrame, ScopedAVFrameDeleter> hwf( av_frame_alloc());
            AV_ENSURE(av_hwframe_get_buffer(d.hwframes_ref, hwf.data(), 0), false);
            //hwf->format = d.hwframes->format; // not necessary
            //hwf->width = f->width;
            //hwf->height = f->height;
            AV_ENSURE(av_hwframe_transfer_data(hwf.data(), f.data(), 0), false);
            AV_ENSURE(av_frame_copy_props(hwf.data(), f.data()), false);
            av_frame_unref(f.data());
            av_frame_move_ref(f.data(), hwf.data());
        }
#endif //HAVE_AVHWCTX
    }
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = (uint8_t*)d.buffer.constData();
    pkt.size = d.buffer.size();
    int got_packet = 0;
    int ret = avcodec_encode_video2(d.avctx, &pkt, f.data(), &got_packet);
    if (ret < 0) {
        qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
        return false; //false
    }
    d.nb_encoded++;
    if (!got_packet) {
        qWarning("no packet got");
        d.packet = Packet();
        // invalid frame means eof
        return frame.isValid();
    }
   // qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
    d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
   // qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
    return true;
}