VideoFrame VideoDecoderContext::decodeVideo(OptionalErrorCode ec, const Packet &packet, size_t offset, size_t *decodedBytes, bool autoAllocateFrame)
{
    clear_if(ec);

    VideoFrame outFrame;
    if (!autoAllocateFrame)
    {
        outFrame = {pixelFormat(), width(), height(), 32};

        if (!outFrame.isValid())
        {
            throws_if(ec, Errors::FrameInvalid);
            return VideoFrame();
        }
    }

    int gotFrame = 0;
    auto st = decodeCommon(outFrame, packet, offset, gotFrame, avcodec_decode_video_legacy);

    if (get<1>(st)) {
        throws_if(ec, get<0>(st), *get<1>(st));
        return VideoFrame();
    }

    if (!gotFrame)
        return VideoFrame();

    outFrame.setPictureType(AV_PICTURE_TYPE_I);

    if (decodedBytes)
        *decodedBytes = get<0>(st);

    return outFrame;
}
Exemple #2
0
	VideoFrame VideoFrame::newVideoFrame(VideoFrame videoFrame){
		if(videoFrame.data->createdTexPixels){
			return newVideoFrame(videoFrame.getPixelsRef());
		}else{
			return newVideoFrame(videoFrame.getFboRef());
		}
	}
Exemple #3
0
void VideoBuffers::CheckDecodedFrames(void)
{
    QMutexLocker locker(m_lock);

    QList<VideoFrame*> recovered;
    QList<VideoFrame*>::iterator it = m_reference.begin();
    for ( ; it != m_reference.end(); ++it)
        if (!m_decoded.contains((*it)))
            recovered.append((*it));

    while (!recovered.isEmpty())
    {
        VideoFrame* frame = recovered.takeFirst();
        m_reference.removeOne(frame);
        if (frame->Discard())
        {
            delete frame;
            m_frameCount--;
        }
        else
        {
            m_unused.append(frame);
        }
    }
}
	void tryGraph() {
		if (type != Graph || !graph.initialize(option, in->format().size(), in->format().imgfmt())
				|| !graph.push(in->mpi()))
			return;
		while (auto out = graph.pull())
			push(out);
	}
bool VideoEncoderX264or5::doProcessFrame(Frame *org, Frame *dst)
{
    if (!(org && dst)) {
        utils::errorMsg("Error encoding video frame: org or dst are NULL");
        return false;
    }

    VideoFrame* rawFrame = dynamic_cast<VideoFrame*> (org);
    VideoFrame* codedFrame = dynamic_cast<VideoFrame*> (dst);

    if (!rawFrame || !codedFrame) {
        utils::errorMsg("Error encoding video frame: org and dst MUST be VideoFrame");
        return false;
    }

    if (!reconfigure(rawFrame, codedFrame)) {
        utils::errorMsg("Error encoding video frame: reconfigure failed");
        return false;
    }

    if (!fill_x264or5_picture(rawFrame)){
        utils::errorMsg("Could not fill x264_picture_t from frame");
        return false;
    }

    if (!encodeFrame(codedFrame)) {
        utils::errorMsg("Could not encode video frame");
        return false;
    }

    codedFrame->setSize(rawFrame->getWidth(), rawFrame->getHeight());

    return true;
}
Exemple #6
0
void DiscardDeint::filter(QQueue< FrameBuffer > &framesQueue)
{
    int insertAt = addFramesToDeinterlace(framesQueue);
    while (!internalQueue.isEmpty())
    {
        FrameBuffer dequeued = internalQueue.dequeue();
        VideoFrame *videoFrame = VideoFrame::fromData(dequeued.data);
        const bool TFF = isTopFieldFirst(videoFrame);
        videoFrame->setNoInterlaced();
        for (int p = 0; p < 3; ++p)
        {
            const int linesize = videoFrame->linesize[p];
            quint8 *src = videoFrame->data[p];
            quint8 *dst = videoFrame->data[p];
            const int lines = (p ? h >> 2 : h >> 1) - 1;
            if (!TFF)
            {
                memcpy(dst, src + linesize, linesize);
                src += linesize;
                dst += linesize;
            }
            dst += linesize;
            src += linesize;
            for (int i = 0; i < lines; ++i)
            {
                VideoFilters::averageTwoLines(dst, src - linesize, src + linesize, linesize);
                src += linesize << 1;
                dst += linesize << 1;
            }
            if (TFF)
                memcpy(dst, src - linesize, linesize);
        }
        framesQueue.insert(insertAt++, dequeued);
    }
}
Exemple #7
0
void VideoRenderer::drawNextFrame(){
    VideoFrame * frame = source->getNextVideoFrame();
    if(frame!=NULL){
        frame->getTextureRef().draw(0,0);
        frame->release();
    }
}
Exemple #8
0
void Window::render(const VideoFrame& frame)
{
    LogDebug("Rendering frame " << frame.getId());
    glClear(GL_COLOR_BUFFER_BIT);

    glEnableVertexAttribArray(0);
    glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
    glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);

    glEnableVertexAttribArray(1);
    glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
    glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);

    // TODO: consider linesize padding here
    // TODO: use glTexSubImage2D for more performance
    glTexImage2D(GL_TEXTURE_2D,
                 0,
                 GL_RED,
                 frame.getWidth(),
                 frame.getHeight(),
                 0,
                 GL_RED,
                 GL_UNSIGNED_BYTE,
                 frame.getLumaData());

    glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, (void*) 0);

    glDisableVertexAttribArray(1);
    glDisableVertexAttribArray(0);
    glfwSwapBuffers(glfwWindow);
}
Exemple #9
0
bool VaApiMixer::upload(const VideoFrame &frame, bool deint) {
	if (!m_glSurface)
		return false;
	static const int specs[MP_CSP_COUNT] = {
		0,					//MP_CSP_AUTO,
		VA_SRC_BT601,		//MP_CSP_BT_601,
		VA_SRC_BT709,		//MP_CSP_BT_709,
		VA_SRC_SMPTE_240,	//MP_CSP_SMPTE_240M,
		0,					//MP_CSP_RGB,
		0,					//MP_CSP_XYZ,
		0,					//MP_CSP_YCGCO,
	};
	static const int field[] = {
		// Picture = 0,   Top = 1,      Bottom = 2
		VA_FRAME_PICTURE, VA_TOP_FIELD, VA_BOTTOM_FIELD, VA_FRAME_PICTURE
	};
	const auto id = (VASurfaceID)(quintptr)frame.data(3);
	int flags = specs[frame.format().colorspace()];
	if (deint)
		flags |= field[frame.field() & VideoFrame::Interlaced];
	if (!check(vaCopySurfaceGLX(VaApi::glx(), m_glSurface, id,  flags), "Cannot copy OpenGL surface."))
		return false;
	if (!check(vaSyncSurface(VaApi::glx(), id), "Cannot sync video surface."))
		return false;
	return true;
}
Exemple #10
0
bool QPainterRenderer::preparePixmap(const VideoFrame &frame)
{
    DPTR_D(QPainterRenderer);
    // already locked in a larger scope of receive()
    QImage::Format imgfmt = frame.imageFormat();
    if (frame.constBits(0)) {
        d.video_frame = frame;
    } else {
        if (imgfmt == QImage::Format_Invalid) {
            d.video_frame = frame.to(VideoFormat::Format_RGB32);
            imgfmt = d.video_frame.imageFormat();
        } else {
            d.video_frame = frame.to(frame.pixelFormat());
        }
    }
    const bool swapRGB = (int)imgfmt < 0;
    if (swapRGB) {
        imgfmt = (QImage::Format)(-imgfmt);
    }
    // DO NOT use frameData().data() because it's temp ptr while d.image does not deep copy the data
    QImage image = QImage((uchar*)d.video_frame.constBits(), d.video_frame.width(), d.video_frame.height(), d.video_frame.bytesPerLine(), imgfmt);
    if (swapRGB)
        image = image.rgbSwapped();
    d.pixmap = QPixmap::fromImage(image);
    //Format_RGB32 is fast. see document
    return true;
}
	void avLooperRenderer::draw(int x,int y,int w,int h)
	{
		// audio -> video Sync !!
		//////////////////////////
		// 1
		//VideoFrame * frame = vHeader.getVideoFrame(int(float(aHeader2.getIndex())/float(aBuffer->sizeInSamples()))*vBuffer->getMaxSize());
		//printf("index %d of size %d = %d\n",aHeader2.getIndex(),aBuffer->sizeInSamples(),int(float(aHeader2.getIndex())/float(aBuffer->sizeInSamples()))*vBuffer->getMaxSize());
		
		// 2
		float delayToVideo = (float(aHeader2.getIndex()) / float(audioSampleRate)) * 1000.0; 
		vHeader.setDelayMs(float(maximumSizeInMs)-delayToVideo-float(videoOffsetInMs));
		//printf("avR ::DELAY is = %f || maxSize %d delayToVideo in ms = %f / index %d\n",float(maximumSizeInMs)-delayToVideo-float(videoOffsetInMs),maximumSizeInMs,delayToVideo,aHeader2.getIndex());
		//printf("AVLR:: videoDelayMs :: %f \n",float(maximumSizeInMs)-delayToVideo-float(videoOffsetInMs));
		
		VideoFrame frame = vHeader.getNextVideoFrame();
		if(frame!=NULL){
			// draw the frame texture to screen
			ofSetColor(vHeader.getOpacity(),vHeader.getOpacity(),vHeader.getOpacity());
			frame.getTextureRef().draw(x,y,w,h);
		}
		
		// draw av header interfaces
		aBuffer->draw();
		vBuffer->draw();

		vHeader.draw();
		aHeader2.draw();
		
		ofSetColor(255,255,255);
		
	}
Exemple #12
0
void VideoReader::init()
{
	// analyse InputFile
	avtranscoder::NoDisplayProgress p;
	_inputFile->analyse( p );
	_streamProperties = &_inputFile->getProperties().getStreamPropertiesWithIndex(_streamIndex);
	_videoStreamProperties = static_cast<const VideoProperties*>(_streamProperties);
	_inputFile->activateStream( _streamIndex );

	// setup decoder
	_decoder = new VideoDecoder( _inputFile->getStream( _streamIndex ) );
	_decoder->setupDecoder();

	// create src frame
	_srcFrame = new VideoFrame( _inputFile->getStream( _streamIndex ).getVideoCodec().getVideoFrameDesc() );
	VideoFrame* srcFrame = static_cast<VideoFrame*>(_srcFrame);
	// create dst frame
	if( _width == 0 )
		_width = srcFrame->desc().getWidth();
	if( _height == 0 )
		_height = srcFrame->desc().getHeight();
	VideoFrameDesc videoFrameDescToDisplay( _width, _height, getPixelFormat() );
	_dstFrame = new VideoFrame( videoFrameDescToDisplay );

	// create transform
	_transform = new VideoTransform();
}
Exemple #13
0
int main(int argc, char *argv[])
{
    QCoreApplication a(argc, argv);

    FrameReader r;
    r.setMedia(a.arguments().last());
    QQueue<qint64> t;
    int count = 0;
    qint64 t0 = QDateTime::currentMSecsSinceEpoch();
    while (r.readMore()) {
        while (r.hasEnoughVideoFrames()) {
            const VideoFrame f = r.getVideoFrame(); //TODO: if eof
            if (!f)
                continue;
            count++;
            //r.readMore();
            const qint64 now = QDateTime::currentMSecsSinceEpoch();
            const qint64 dt = now - t0;
            t.enqueue(now);
            printf("decode @%.3f count: %d, elapsed: %lld, fps: %.1f/%.1f\r", f.timestamp(), count, dt, count*1000.0/dt, t.size()*1000.0/(now - t.first()));fflush(0);
            if (t.size() > 10)
                t.dequeue();
        }
    }
    while (r.hasVideoFrame()) {
        const VideoFrame f = r.getVideoFrame();
        qDebug("pts: %.3f", f.timestamp());
    }
    qDebug("read done");
    return 0;
}
Exemple #14
0
void
ShmHolder::renderFrame(VideoFrame& src) noexcept
{
    const auto width = src.width();
    const auto height = src.height();
    const auto format = VIDEO_PIXFMT_BGRA;
    const auto frameSize = videoFrameSize(format, width, height);

    if (!resizeArea(frameSize)) {
        RING_ERR("ShmHolder[%s]: could not resize area",
                 openedName_.c_str());
        return;
    }

    {
        VideoFrame dst;
        VideoScaler scaler;

        dst.setFromMemory(area_->data + area_->writeOffset, format, width, height);
        scaler.scale(src, dst);
    }

    {
        SemGuardLock lk {area_->mutex};

        ++area_->frameGen;
        std::swap(area_->readOffset, area_->writeOffset);
        ::sem_post(&area_->frameGenMutex);
    }
}
void VideoBuffer::pushNewVideoFrame(VideoFrame & frame){
    
    int64_t time = frame.getTimestamp().epochMicroseconds();
    if(microsOneSec==-1) microsOneSec=time;
    framesOneSec++;
    int64_t diff = time-microsOneSec;
    if(diff>=1000000){
        realFps = double(framesOneSec*1000000.)/double(diff);
        framesOneSec = 0;
        microsOneSec = time-(diff-1000000);
    }
    totalFrames++;
    if(size()==0)initTime=frame.getTimestamp();
    //timeMutex.lock();

    
    if (size() >= maxSize) {
        // THIS LINE IS GIVING ME CRASHES SOMETIMES ..... SERIOUS WTF : if i dont see this happen again its fixed
        frames[ofClamp(framePos, 0, size()-1)] = frame; // Here we use the framePos variable to specify where new frames
                                  // should be stored in the video buffer instead of using the vector push_back call.
    }
    else if (size() < maxSize) {
        frames.push_back(frame);
    }
    
    while(size() > maxSize){
        frames.erase(frames.begin()+framePos);
    }
}
	void tryPostProc() {
		if (type != PP || !pp.initialize(option, in->format().size(), in->format().imgfmt()))
			return;
		const bool topFirst = in->mpi()->fields & MP_IMGFIELD_TOP_FIRST;
		push(topFirst ? topField() : bottomField());
		if (deint.doubler)
			push(!topFirst ? topField() : bottomField());
	}
Exemple #17
0
void FileGrabber::update(){
	ofVideoPlayer::update();
	if(isFrameNew()){
		VideoFrame * frame = getNextVideoFrame();
		newFrameEvent.notify(this,*frame);
		frame->release();
	}
}
	void push(mp_image *mpi) {
		mpi->colorspace = in->format().colorspace();
		mpi->levels = in->format().range();
		mpi->display_w = in->format().displaySize().width();
		mpi->display_h = in->format().displaySize().height();
		mpi->pts = p->nextPTS();
		queue->push_back(VideoFrame(true, mpi, in->field()));
		++pushed;
	}
Exemple #19
0
VideoFrame VideoDecoderVDA::frame()
{
    DPTR_D(VideoDecoderVDA);
    CVPixelBufferRef cv_buffer = (CVPixelBufferRef)d.frame->data[3];
    if (!cv_buffer) {
        qDebug("Frame buffer is empty.");
        return VideoFrame();
    }
    if (CVPixelBufferGetDataSize(cv_buffer) <= 0) {
        qDebug("Empty frame buffer");
        return VideoFrame();
    }
    VideoFormat::PixelFormat pixfmt = format_from_cv(CVPixelBufferGetPixelFormatType(cv_buffer));
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported vda pixel format: %#x", CVPixelBufferGetPixelFormatType(cv_buffer));
        return VideoFrame();
    }
    // we can map the cv buffer addresses to video frame in SurfaceInteropCVBuffer. (may need VideoSurfaceInterop::mapToTexture()
    class SurfaceInteropCVBuffer Q_DECL_FINAL: public VideoSurfaceInterop {
        bool glinterop;
        CVPixelBufferRef cvbuf; // keep ref until video frame is destroyed
    public:
        SurfaceInteropCVBuffer(CVPixelBufferRef cv, bool gl) : glinterop(gl), cvbuf(cv) {
            //CVPixelBufferRetain(cvbuf);
        }
        ~SurfaceInteropCVBuffer() {
            CVPixelBufferRelease(cvbuf);
        }
        void* mapToHost(const VideoFormat &format, void *handle, int plane) {
            Q_UNUSED(plane);
            CVPixelBufferLockBaseAddress(cvbuf, 0);
            const VideoFormat fmt(format_from_cv(CVPixelBufferGetPixelFormatType(cvbuf)));
            if (!fmt.isValid()) {
                CVPixelBufferUnlockBaseAddress(cvbuf, 0);
                return NULL;
            }
            const int w = CVPixelBufferGetWidth(cvbuf);
            const int h = CVPixelBufferGetHeight(cvbuf);
            uint8_t *src[3];
            int pitch[3];
            for (int i = 0; i <fmt.planeCount(); ++i) {
                // get address results in internal copy
                src[i] = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cvbuf, i);
                pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(cvbuf, i);
            }
            CVPixelBufferUnlockBaseAddress(cvbuf, 0);
            //CVPixelBufferRelease(cv_buffer); // release when video frame is destroyed
            VideoFrame frame(VideoFrame::fromGPU(fmt, w, h, h, src, pitch));
            if (fmt != format)
                frame = frame.to(format);
            VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
            frame.setTimestamp(f->timestamp());
            frame.setDisplayAspectRatio(f->displayAspectRatio());
            *f = frame;
            return f;
        }
Exemple #20
0
void VideoFrame::doDeepCopy(const VideoFrame &frame) {
	d.detach();
	Q_ASSERT(d->format == frame.format());
	auto p = d->buffer.data();
	for (int i=0; i<d->format.planes(); ++i) {
		const int len = d->format.bytesPerPlain(i);
		memcpy(p, frame.data(i),  len);
		p += len;
	}
}
  void printHash(const VideoFrame& frame,
                 const std::string& imgname, const std::string& imgtype)
  {
    const uint32 h =
      jenkinshash(reinterpret_cast<const byte*>(frame.getBuffer()),
                  frame.getBufSize(),
                  0);

    this->doPrintHash(h, imgname, imgtype);
  }
Exemple #22
0
// ######################################################################
bool operator==(const GenericFrame& f1, const GenericFrame& f2)
{
  if (f1.nativeType() == f2.nativeType())
    {
      switch (f1.nativeType())
        {
        case GenericFrame::NONE: return true;
        case GenericFrame::RGB_U8: return f1.asRgbU8() == f2.asRgbU8();
        case GenericFrame::RGBD: return ((f1.asRgbU8() == f2.asRgbU8()) && (f1.asGrayU16() == f2.asGrayU16()));
        case GenericFrame::RGB_F32: return f1.asRgbF32() == f2.asRgbF32();
        case GenericFrame::GRAY_U8: return f1.asGrayU8() == f2.asGrayU8();
        case GenericFrame::GRAY_F32: return f1.asGrayF32() == f2.asGrayF32();
        case GenericFrame::VIDEO:
          {
            const VideoFrame v1 = f1.asVideo();
            const VideoFrame v2 = f2.asVideo();

            if (v1.getMode() == v2.getMode())
              return std::equal(v1.getBuffer(),
                                v1.getBuffer() + v1.getBufSize(),
                                v2.getBuffer());
            else
              return v1.toRgb() == v2.toRgb();
          }
        case GenericFrame::RGB_U16:        return f1.asRgbU16() == f2.asRgbU16();
        case GenericFrame::GRAY_U16:       return f1.asGrayU16() == f2.asGrayU16();
        }
    }

  return f1.asRgbF32() == f2.asRgbF32();
}
	void avLooperRenderer::draw()
	{
		VideoFrame frame = vHeader.getNextVideoFrame();
		if(frame!=NULL){
			// draw the frame texture to screen
			frame.getTextureRef().draw(0,0);
		}
		// draw av header interfaces
		vHeader.draw();
		aHeader2.draw();
	}
Exemple #24
0
VideoFrame VideoFrame::to(const VideoFormat &fmt, const QSize& dstSize, const QRectF& roi) const
{
    if (!isValid() || !constBits(0)) {// hw surface. map to host. only supports rgb packed formats now
        Q_D(const VideoFrame);
        const QVariant v = d->metadata.value(QStringLiteral("surface_interop"));
        if (!v.isValid())
            return VideoFrame();
        VideoSurfaceInteropPtr si = v.value<VideoSurfaceInteropPtr>();
        if (!si)
            return VideoFrame();
        VideoFrame f;
        f.setDisplayAspectRatio(displayAspectRatio());
        f.setTimestamp(timestamp());
        if (si->map(HostMemorySurface, fmt, &f)) {
            if ((!dstSize.isValid() ||dstSize == QSize(width(), height())) && (!roi.isValid() || roi == QRectF(0, 0, width(), height()))) //roi is not supported now
                return f;
            return f.to(fmt, dstSize, roi);
        }
        return VideoFrame();
    }
    const int w = dstSize.width() > 0 ? dstSize.width() : width();
    const int h = dstSize.height() > 0 ? dstSize.height() : height();
    if (fmt.pixelFormatFFmpeg() == pixelFormatFFmpeg()
            && w == width() && h == height()
            // TODO: roi check.
            )
        return *this;
    Q_D(const VideoFrame);
    ImageConverterSWS conv;
    conv.setInFormat(pixelFormatFFmpeg());
    conv.setOutFormat(fmt.pixelFormatFFmpeg());
    conv.setInSize(width(), height());
    conv.setOutSize(w, h);
    conv.setInRange(colorRange());
    if (!conv.convert(d->planes.constData(), d->line_sizes.constData())) {
        qWarning() << "VideoFrame::to error: " << format() << "=>" << fmt;
        return VideoFrame();
    }
    VideoFrame f(w, h, fmt, conv.outData());
    f.setBits(conv.outPlanes());
    f.setBytesPerLine(conv.outLineSizes());
    if (fmt.isRGB()) {
        f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
    } else {
        f.setColorSpace(ColorSpace_Unknown);
    }
    // TODO: color range
    f.setTimestamp(timestamp());
    f.setDisplayAspectRatio(displayAspectRatio());
    f.d_ptr->metadata = d->metadata; // need metadata?
    return f;
}
Exemple #25
0
void VideoBuffer::setup(VideoSource & source, int size, bool allocateOnSetup){
	this->source=&source;
	totalFrames=0;
	maxSize = size;
	if(allocateOnSetup){
		for(int i=0;i<size;i++){
			VideoFrame videoFrame = VideoFrame::newVideoFrame(source.getNextVideoFrame().getPixelsRef());
			videoFrame.getTextureRef();
			newVideoFrame(videoFrame);
		}
	}
	resume();
	microsOneSec=-1;
}
Exemple #26
0
void
SinkClient::update(Observable<std::shared_ptr<VideoFrame>>* /*obs*/,
                   const std::shared_ptr<VideoFrame>& frame_p)
{
    auto& f = *frame_p;

#ifdef DEBUG_FPS
    auto currentTime = std::chrono::system_clock::now();
    const std::chrono::duration<double> seconds = currentTime - lastFrameDebug_;
    ++frameCount_;
    if (seconds.count() > 1) {
        std::ostringstream fps;
        fps << frameCount_ / seconds.count();
        // Send the framerate in smartInfo
        Smartools::getInstance().setFrameRate(id_, fps.str());
        frameCount_ = 0;
        lastFrameDebug_ = currentTime;
    }
#endif

#if HAVE_SHM
    // Send the resolution in smartInfo
    Smartools::getInstance().setResolution(id_, f.width(), f.height());
    shm_->renderFrame(f);
#endif

    if (target_.pull) {
        VideoFrame dst;
        VideoScaler scaler;
        const int width = f.width();
        const int height = f.height();
#if (defined(__ANDROID__) || defined(__APPLE__))
        const int format = VIDEO_PIXFMT_RGBA;
#else
        const int format = VIDEO_PIXFMT_BGRA;
#endif
        const auto bytes = videoFrameSize(format, width, height);

        if (bytes > 0) {
            if (auto buffer_ptr = target_.pull(bytes)) {
                buffer_ptr->format = libav_utils::libav_pixel_format(format);
                buffer_ptr->width = width;
                buffer_ptr->height = height;
                dst.setFromMemory(buffer_ptr->ptr, format, width, height);
                scaler_->scale(f, dst);
                target_.push(std::move(buffer_ptr));
            }
        }
    }
}
Exemple #27
0
int VideoDecoder::GetAVBuffer(AVCodecContext *Context, AVFrame *Frame)
{
    VideoFrame *frame  = m_videoParent->GetBuffers()->GetFrameForDecoding();

    if (!frame)
        return -1;

    if (Context->width != frame->m_rawWidth || Context->height != frame->m_rawHeight || Context->pix_fmt != frame->m_pixelFormat)
        LOG(VB_GENERAL, LOG_ERR, "Frame format changed");

    // start frame initalisation
    Frame->opaque              = frame;
    Frame->type                = FF_BUFFER_TYPE_USER;
    Frame->pkt_pts             = Context->pkt ? Context->pkt->pts : AV_NOPTS_VALUE;
    Frame->pkt_dts             = Context->pkt ? Context->pkt->dts : AV_NOPTS_VALUE;
    Frame->width               = frame->m_rawWidth;
    Frame->height              = frame->m_rawHeight;
    Frame->format              = frame->m_pixelFormat;
    Frame->sample_aspect_ratio = Context->sample_aspect_ratio;

    // initialise hardware context
    // FIXME this needs a failure mode so that we don't always fallback to a software frame
    bool initialised = false;
    AccelerationFactory* factory = AccelerationFactory::GetAccelerationFactory();
    for ( ; factory; factory = factory->NextFactory())
    {
        if (factory->InitialiseBuffer(Context, Frame, frame))
        {
            initialised = true;
            break;
        }
    }

    // or fallback to software decoding
    if (!initialised)
    {
        frame->InitialiseBuffer();
        for (int i = 0; i < 4; i++)
        {
            Frame->data[i]     = frame->m_buffer + frame->m_offsets[i];
            Frame->base[i]     = Frame->data[i];
            Frame->linesize[i] = frame->m_pitches[i];
        }
    }

    // finish frame initialisation
    Frame->extended_data       = Frame->data;

    return 0;
}
void* SurfaceInteropDXVA::mapToHost(const VideoFormat &format, void *handle, int plane)
{
    Q_UNUSED(plane);
    class ScopedD3DLock {
        IDirect3DSurface9 *mpD3D;
    public:
        ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect) : mpD3D(d3d) {
            if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
                qWarning("Failed to lock surface");
                mpD3D = 0;
            }
        }
        ~ScopedD3DLock() {
            if (mpD3D)
                mpD3D->UnlockRect();
        }
    };

    D3DLOCKED_RECT lock;
    ScopedD3DLock(m_surface, &lock);
    if (lock.Pitch == 0)
        return NULL;

    //picth >= desc.Width
    D3DSURFACE_DESC desc;
    m_surface->GetDesc(&desc);
    const VideoFormat fmt = VideoFormat(pixelFormatFromFourcc(desc.Format));
    if (!fmt.isValid()) {
        qWarning("unsupported dxva pixel format: %#x", desc.Format);
        return NULL;
    }
    //YV12 need swap, not imc3?
    // imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
    // nv12 bpp(1)==1
    // 3rd plane is not used for nv12
    int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
    quint8 *src[] = { (quint8*)lock.pBits, 0, 0}; //compute chroma later
    Q_ASSERT(src[0] && pitch[0] > 0);
    const bool swap_uv = desc.Format ==  MAKEFOURCC('I','M','C','3');
    // try to use SSE. fallback to normal copy if SSE is not supported
    VideoFrame frame(VideoFrame::fromGPU(fmt, frame_width, frame_height, desc.Height, src, pitch, true, swap_uv));
    // TODO: check rgb32 because d3d can use hw to convert
    if (format != fmt)
        frame = frame.to(format);
    VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
    frame.setTimestamp(f->timestamp());
    *f = frame;
    return f;
}
void HistogramFilter::generateHistogram()
{
	if(!m_frame)
		return;
		
	QImage image = frameImage();
	QImage histo = makeHistogram(image);
	
	VideoFrame *frame = new VideoFrame(histo,m_frame->holdTime());
	
	if(m_includeOriginalImage)
		frame->setCaptureTime(m_frame->captureTime());
	
	enqueue(frame);
}
bool
HardwareAccel::extractData(VideoFrame& input)
{
    try {
        auto inFrame = input.pointer();

        if (inFrame->format != format_) {
            std::stringstream buf;
            buf << "Frame format mismatch: expected " << av_get_pix_fmt_name(format_);
            buf << ", got " << av_get_pix_fmt_name((AVPixelFormat)inFrame->format);
            throw std::runtime_error(buf.str());
        }

        // FFmpeg requires a second frame in which to transfer the data
        // from the GPU buffer to the main memory
        auto output = std::unique_ptr<VideoFrame>(new VideoFrame());
        auto outFrame = output->pointer();
        outFrame->format = AV_PIX_FMT_YUV420P;

        extractData(input, *output);

        // move outFrame into inFrame so the caller receives extracted image data
        // but we have to delete inFrame first
        av_frame_unref(inFrame);
        av_frame_move_ref(inFrame, outFrame);
    } catch (const std::runtime_error& e) {
        fail(false);
        RING_ERR("%s", e.what());
        return false;
    }

    succeed();
    return true;
}