Example #1
0
void VideoFilter::setVideoSource(VideoSource* source)
{
	if(source == m_source)
		return;
	
	if(m_source)
	{
		disconnectVideoSource();
	}
	
	m_source = source;
	
	if(m_source)
	{
		connect(m_source, SIGNAL(frameReady()), this, SLOT(frameAvailable()));
		connect(m_source, SIGNAL(destroyed()),  this, SLOT(disconnectVideoSource()));
		m_source->registerConsumer(this);
		
		// pull in the first frame
		frameAvailable();
	}
	else
	{
		//qDebug() << "VideoFilter::setVideoSource(): "<<(QObject*)this<<" Source is NULL";
	}
}
bool QCustomVideoSurface::present(const QVideoFrame &frame){

    if(frame.isValid()) {
        QVideoFrame cloneFrame(frame); // makes a shallow copy (since QVideoFrame is explicitly shared), to get the access to the pixel data
        cloneFrame.map(QAbstractVideoBuffer::ReadOnly);
#ifdef Q_OS_ANDROID
        cv::Mat mat(cloneFrame.height(), cloneFrame.width(), CV_8UC4, (void *)cloneFrame.bits());
        emit frameAvailable(mat, QImage::Format_RGBX8888);
#else
        QImage::Format format = QVideoFrame::imageFormatFromPixelFormat(cloneFrame.pixelFormat());
        int cvtype = CV_8UC1;
        switch(format) {
            case QImage::Format_RGB32:
                cvtype = CV_8UC4;
                break;
            case QImage::Format_RGB888:
                cvtype = CV_8UC3;
                break;
            case QImage::Format_Invalid:
                qWarning("QCustomVideoSurface Warning: image format is QImage::Format_Invalid");
                return false;
            default:
                // TO DO add the new formats if find
                qWarning("QCustomVideoSurface Warning: image format is not implemented (QImage::Format %d)", format);
                return false;
        }
        cv::Mat mat(cloneFrame.height(), cloneFrame.width(), cvtype, (void *)cloneFrame.bits());
        cv::flip(mat,mat,0);
        emit frameAvailable(mat, format);
#endif
        cloneFrame.unmap();
        return true;
    }
    return false;
}
bool SensorfwRotationSensor::doConnect()
{
    Q_ASSERT(m_sensorInterface);
    if (m_bufferSize==1)
       return QObject::connect(m_sensorInterface, SIGNAL(dataAvailable(XYZ)), this, SLOT(slotDataAvailable(XYZ)));
    return QObject::connect(m_sensorInterface, SIGNAL(frameAvailable(QVector<XYZ>)),this, SLOT(slotFrameAvailable(QVector<XYZ>)));
}
bool QAndroidTextureVideoOutput::initSurfaceTexture()
{
    if (m_surfaceTexture)
        return true;

    if (!m_surface)
        return false;

    if (!m_surfaceTextureCanAttachToContext) {
        // if we have an OpenGL context in the current thread, create a texture. Otherwise, wait
        // for the GL render thread to call us back to do it.
        if (QOpenGLContext::currentContext()) {
            glGenTextures(1, &m_externalTex);
            m_glDeleter.reset(new OpenGLResourcesDeleter);
        } else if (!m_externalTex) {
            return false;
        }
    }

    QMutexLocker locker(&m_mutex);

    m_surfaceTexture = new AndroidSurfaceTexture(m_externalTex);

    if (m_surfaceTexture->surfaceTexture() != 0) {
        connect(m_surfaceTexture, SIGNAL(frameAvailable()), this, SLOT(onFrameAvailable()));
    } else {
        delete m_surfaceTexture;
        m_surfaceTexture = 0;
        if (!m_glDeleter.isNull())
            m_glDeleter->deleteTexture(m_externalTex);
        m_externalTex = 0;
    }

    return m_surfaceTexture != 0;
}
bool QAndroidVideoRendererControl::initSurfaceTexture()
{
    if (m_surfaceTexture)
        return true;

    if (!m_surface)
        return false;

    // if we have an OpenGL context in the current thread, create a texture. Otherwise, wait
    // for the GL render thread to call us back to do it.
    if (QOpenGLContext::currentContext()) {
        glGenTextures(1, &m_externalTex);
        m_glDeleter = new OpenGLResourcesDeleter;
        m_glDeleter->setTexture(m_externalTex);
    } else if (!m_externalTex) {
        return false;
    }

    m_surfaceTexture = new AndroidSurfaceTexture(m_externalTex);

    if (m_surfaceTexture->surfaceTexture() != 0) {
        connect(m_surfaceTexture, SIGNAL(frameAvailable()), this, SLOT(onFrameAvailable()));
    } else {
        delete m_surfaceTexture;
        m_surfaceTexture = 0;
        m_glDeleter->deleteLater();
        m_externalTex = 0;
        m_glDeleter = 0;
    }

    return m_surfaceTexture != 0;
}
Example #6
0
File: audio.cpp Project: Pik-9/qTox
/**
 * @brief Called on the captureTimer events to capture audio
 */
void Audio::doCapture()
{
    QMutexLocker lock(&audioLock);

    if (!alInDev || !inSubscriptions)
        return;

    ALint curSamples = 0;
    alcGetIntegerv(alInDev, ALC_CAPTURE_SAMPLES, sizeof(curSamples), &curSamples);
    if (curSamples < AUDIO_FRAME_SAMPLE_COUNT)
        return;

    int16_t buf[AUDIO_FRAME_SAMPLE_COUNT * AUDIO_CHANNELS];
    alcCaptureSamples(alInDev, buf, AUDIO_FRAME_SAMPLE_COUNT);

    for (quint32 i = 0; i < AUDIO_FRAME_SAMPLE_COUNT * AUDIO_CHANNELS; ++i)
    {
        // gain amplification with clipping to 16-bit boundaries
        int ampPCM = qBound<int>(std::numeric_limits<int16_t>::min(),
                                 qRound(buf[i] * d->inputGainFactor()),
                                 std::numeric_limits<int16_t>::max());

        buf[i] = static_cast<int16_t>(ampPCM);
    }

    emit frameAvailable(buf, AUDIO_FRAME_SAMPLE_COUNT, AUDIO_CHANNELS, AUDIO_SAMPLE_RATE);
}
bool SensorfwMagnetometer::doConnect()
{
    Q_ASSERT(m_sensorInterface);
    if (m_bufferSize==1)
        return QObject::connect(m_sensorInterface, SIGNAL(dataAvailable(MagneticField)),
                                this, SLOT(slotDataAvailable(MagneticField)));
     return QObject::connect(m_sensorInterface, SIGNAL(frameAvailable(QVector<MagneticField>)),
                             this, SLOT(slotFrameAvailable(QVector<MagneticField>)));
}
Example #8
0
MainWindow::MainWindow(Ffmpeg* grabber)
  : grabber_(grabber), button_mapper_(this) {
  ui_.setupUi(this);
  connect(grabber_, SIGNAL(frameAvailable()), SLOT(frameAvailable()));
  connect(qApp, SIGNAL(lastWindowClosed()), grabber_, SLOT(stop()));

  connect(ui_.green_button_, SIGNAL(clicked()), &button_mapper_, SLOT(map()));
  button_mapper_.setMapping(ui_.green_button_, MyLabel::Green);
  connect(ui_.red_button_, SIGNAL(clicked()), &button_mapper_, SLOT(map()));
  button_mapper_.setMapping(ui_.red_button_, MyLabel::Red);
  connect(ui_.yellow_button_, SIGNAL(clicked()), &button_mapper_, SLOT(map()));
  button_mapper_.setMapping(ui_.yellow_button_, MyLabel::Yellow);
  connect(ui_.blue_button_, SIGNAL(clicked()), &button_mapper_, SLOT(map()));
  button_mapper_.setMapping(ui_.blue_button_, MyLabel::Blue);
  connect(ui_.orange_button_, SIGNAL(clicked()), &button_mapper_, SLOT(map()));
  button_mapper_.setMapping(ui_.orange_button_, MyLabel::Orange);

  connect(&button_mapper_, SIGNAL(mapped(int)), ui_.video_label_, SLOT(setBox(int)));
}
Example #9
0
void PhVideoDecoder::frameToRgb(AVFrame *avFrame, PhVideoBuffer *buffer)
{
	int frameHeight = avFrame->height;
	if(_deinterlace)
		frameHeight = avFrame->height / 2;

	// As the following formats are deprecated (see https://libav.org/doxygen/master/pixfmt_8h.html#a9a8e335cf3be472042bc9f0cf80cd4c5)
	// we replace its with the new ones recommended by LibAv
	// in order to get ride of the warnings
	AVPixelFormat pixFormat;
	switch (_videoStream->codec->pix_fmt) {
	case AV_PIX_FMT_YUVJ420P:
		pixFormat = AV_PIX_FMT_YUV420P;
		break;
	case AV_PIX_FMT_YUVJ422P:
		pixFormat = AV_PIX_FMT_YUV422P;
		break;
	case AV_PIX_FMT_YUVJ444P:
		pixFormat = AV_PIX_FMT_YUV444P;
		break;
	case AV_PIX_FMT_YUVJ440P:
		pixFormat = AV_PIX_FMT_YUV440P;
		break;
	default:
		pixFormat = _videoStream->codec->pix_fmt;
		break;
	}

	/* Note: we output the frames in AV_PIX_FMT_BGRA rather than AV_PIX_FMT_RGB24,
	 * because this format is native to most video cards and will avoid a conversion
	 * in the video driver */
	/* sws_getCachedContext will check if the context is valid for the given parameters. It the context is not valid,
	 * it will be freed and a new one will be allocated. */
	_swsContext = sws_getCachedContext(_swsContext, avFrame->width, _videoStream->codec->height, pixFormat,
	                                   _videoStream->codec->width, frameHeight, AV_PIX_FMT_BGRA,
	                                   SWS_POINT, NULL, NULL, NULL);


	int linesize = avFrame->width * 4;
	uint8_t *rgb = buffer->rgb();
	if (0 <= sws_scale(_swsContext, (const uint8_t * const *) avFrame->data,
	                   avFrame->linesize, 0, _videoStream->codec->height, &rgb,
	                   &linesize)) {

		PhFrame frame = AVTimestamp_to_PhFrame(av_frame_get_best_effort_timestamp(avFrame));

		buffer->setFrame(frame);
		buffer->setWidth(avFrame->width);
		buffer->setHeight(frameHeight);

		// tell the video engine that we have finished decoding!
		emit frameAvailable(buffer);
	}
}
 void VideoWidget::initialize(QStatusBar *bar, SFMViewer *sfmViewer, SceneModel * sceneModel){

     // Connect surface to our slot
     connect(surface, SIGNAL(frameAvailable()), this, SLOT(frameReady()));

     processor = new ProcessingThread(this);
     connect(processor, SIGNAL(frameProcessed()), this, SLOT(onFrameProcessed()));
     connect(processor, SIGNAL(queueFull()), this, SLOT(onThreadCongested()));

	 processor->initialize(bar, sceneModel);
	 processor->setUpdateListener(sfmViewer);

     processor->start();
}
Example #11
0
/**
 * @brief Makes a copy of the vpx_image_t and emits it as a new VideoFrame.
 * @param vpxframe Frame to copy.
 */
void CoreVideoSource::pushFrame(const vpx_image_t* vpxframe)
{
    if (stopped)
        return;

    QMutexLocker locker(&biglock);

    std::shared_ptr<VideoFrame> vframe;
    int width = vpxframe->d_w;
    int height = vpxframe->d_h;

    if (subscribers <= 0)
        return;

    AVFrame* avframe = av_frame_alloc();
    if (!avframe)
        return;

    avframe->width = width;
    avframe->height = height;
    avframe->format = AV_PIX_FMT_YUV420P;

    int bufSize = av_image_alloc(avframe->data, avframe->linesize,
                                 width, height,
                                 static_cast<AVPixelFormat>(AV_PIX_FMT_YUV420P), VideoFrame::dataAlignment);

    if(bufSize < 0){
        av_frame_free(&avframe);
        return;
    }

    for (int i = 0; i < 3; ++i)
    {
        int dstStride = avframe->linesize[i];
        int srcStride = vpxframe->stride[i];
        int minStride = std::min(dstStride, srcStride);
        int size = (i == 0) ? height : height / 2;

        for (int j = 0; j < size; ++j)
        {
            uint8_t* dst = avframe->data[i] + dstStride * j;
            uint8_t* src = vpxframe->planes[i] + srcStride * j;
            memcpy(dst, src, minStride);
        }
    }

    vframe = std::make_shared<VideoFrame>(id, avframe, true);
    emit frameAvailable(vframe);
}
Example #12
0
bool CameraFrameGrabber::present(const QVideoFrame& frame) {
    if(ready) {
        beNotReady();
        if (frame.isValid()) {
            QVideoFrame cloneFrame(frame);
            cloneFrame.map(QAbstractVideoBuffer::ReadOnly);
            QImage image(cloneFrame.bits(), cloneFrame.width(), cloneFrame.height(), QVideoFrame::imageFormatFromPixelFormat(cloneFrame.pixelFormat()));
            emit frameAvailable(image);
            cloneFrame.unmap();
            return true;
        }
        return false;
    }
    return true;
}
bool VideoSurface::present(const QVideoFrame &frame)
{
    if (frame.isValid())
    {
        QVideoFrame videoFrame(frame);
        if( videoFrame.map(QAbstractVideoBuffer::ReadOnly) )
        {
            lastFrame = QImage(videoFrame.width(), videoFrame.height(), QImage::Format_RGB888);
            memcpy(lastFrame.bits(), videoFrame.bits(), videoFrame.mappedBytes());

            videoFrame.unmap();

            // Use thread for processing
            emit frameAvailable();
            return true;
        }
    }
    return false;
}
Example #14
0
void AbstractImageGrabber::grab()
{
    QImage frame;//this stores grabbed image
    QEventLoop latencyLoop;
    QElapsedTimer timer;

    if (!m_timer) {
        timer.start();
    }

    m_prevPts = -1;
    int pts = -1;

    Q_FOREVER {
        frame = captureFrame();
        
        setGrabbedFrameCount(grabbedFrameCount() + 1);
        
        pts = m_timer ? m_timer->elapsed() : timer.elapsed();
        if (m_prevPts != pts) {
            m_prevPts = pts;
            Q_EMIT frameAvailable(frame, pts);
        }

        //check if we must finish grabbing
        if (isStopRequest() || isPauseRequest())
            break;

        //wait for set by user milliseconds
        QTimer::singleShot(latency(), &latencyLoop, SLOT(quit()));
        latencyLoop.exec();
    }

    setState(isStopRequest() ? AbstractGrabber::StoppedState : AbstractGrabber::SuspendedState);

    if (isStopRequest())
        m_prevPts = -1;

    //reset stop and pause flags
    setStopRequest(false);
    setPauseRequest(false);
}
Example #15
0
/**
 * @brief Blocking. Decodes video stream and emits new frames.
 * @note Designed to run in its own thread.
 */
void CameraSource::stream()
{
    auto streamLoop = [=]() {
        AVFrame* frame = av_frame_alloc();
        if (!frame)
            return;

        AVPacket packet;
        if (av_read_frame(device->context, &packet) < 0)
            return;

        // Only keep packets from the right stream;
        if (packet.stream_index == videoStreamIndex) {
            // Decode video frame
            int frameFinished;
            avcodec_decode_video2(cctx, frame, &frameFinished, &packet);
            if (!frameFinished)
                return;

            VideoFrame* vframe = new VideoFrame(id, frame);
            emit frameAvailable(vframe->trackFrame());
        }

        // Free the packet that was allocated by av_read_frame
        av_packet_unref(&packet);
    };

    forever
    {
        QReadLocker locker{&streamMutex};

        // Exit if device is no longer valid
        if (!device) {
            break;
        }

        streamLoop();
    }
}
Example #16
0
void VideoCapture::start()
{
    emit frameAvailable(frame); //TODO: no copy
    if (!frame.isValid() || !frame.constBits(0)) { // if frame is always cloned, then size is at least width*height
        qDebug("Captured frame from hardware decoder surface.");
    }
    CaptureTask *task = new CaptureTask(this);
    // copy properties so the task will not be affect even if VideoCapture properties changed
    task->save = autoSave();
    task->original_fmt = original_fmt;
    task->quality = qual;
    task->dir = dir;
    task->name = name;
    task->format = fmt;
    task->qfmt = qfmt;
    task->frame = frame; //copy here and it's safe in capture thread because start() is called immediatly after setVideoFrame
    if (isAsync()) {
        videoCaptureThreadPool()->start(task);
    } else {
        task->run();
        delete task;
    }
}