Example #1
0
void AVThread::resetState()
{
    DPTR_D(AVThread);
    pause(false);
    if (d.writer)
        d.writer->pause(false); //stop waiting. Important when replay
    d.stop = false;
    d.demux_end = false;
    d.packets.setBlocking(true);
    d.packets.clear();
    //not neccesary context is managed by filters.
    d.filter_context = 0;
}
Example #2
0
bool AVDecoder::open()
{
    DPTR_D(AVDecoder);
    if (!d.codec_ctx) {
        qWarning("FFmpeg codec context not ready");
        return false;
    }
    AVCodec *codec = 0;
    if (!d.codec_name.isEmpty()) {
        codec = avcodec_find_decoder_by_name(d.codec_name.toUtf8().constData());
    } else {
        codec = avcodec_find_decoder(d.codec_ctx->codec_id);
    }
    if (!codec) {
        QString es(tr("No codec could be found for '%1'"));
        if (d.codec_name.isEmpty()) {
            es = es.arg(avcodec_get_name(d.codec_ctx->codec_id));
        } else {
            es = es.arg(d.codec_name);
        }
        qWarning() << es;
        AVError::ErrorCode ec(AVError::CodecError);
        switch (d.codec_ctx->coder_type) {
        case AVMEDIA_TYPE_VIDEO:
            ec = AVError::VideoCodecNotFound;
            break;
        case AVMEDIA_TYPE_AUDIO:
            ec = AVError::AudioCodecNotFound;
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            ec = AVError::SubtitleCodecNotFound;
        default:
            break;
        }
        emit error(AVError(ec, es));
        return false;
    }    
    // hwa extra init can be here
    if (!d.open()) {
        d.close();
        return false;
    }
    d.applyOptionsForDict();
    int ret = avcodec_open2(d.codec_ctx, codec, d.options.isEmpty() ? NULL : &d.dict);
    if (ret < 0) {
        qWarning("open video codec failed: %s", av_err2str(ret));
        return false;
    }
    d.is_open = true;
    return true;
}
Example #3
0
void OSDFilterGL::process()
{
    if (mShowType == ShowNone)
        return;
    DPTR_D(Filter);
    GLFilterContext *ctx = static_cast<GLFilterContext*>(d.context);
    //TODO: render off screen
#if QTAV_HAVE(GL)
    QGLWidget *glw = static_cast<QGLWidget*>(ctx->paint_device);
    if (!glw)
        return;
    glw->renderText(ctx->rect.x(), ctx->rect.y(), text(d.statistics), font());
#endif //QTAV_HAVE(GL)
}
Example #4
0
void GLWidgetRenderer::resizeGL(int w, int h)
{
    Q_UNUSED(w);
    Q_UNUSED(h);
    DPTR_D(GLWidgetRenderer);
    qDebug("%s @%d %dx%d", __FUNCTION__, __LINE__, d.out_rect.width(), d.out_rect.height());
    //TODO: if whole widget as viewport, we can set rect by glVertex, thus paint logic is the same as others
    glViewport(d.out_rect.x(), d.out_rect.y(), d.out_rect.width(), d.out_rect.height());
    //??
    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();
}
Example #5
0
bool AudioDecoder::prepare()
{
    DPTR_D(AudioDecoder);
    if (!d.codec_ctx)
        return false;
    if (!d.resampler)
        return true;
    d.resampler->setInChannelLayout(d.codec_ctx->channel_layout);
    d.resampler->setInChannels(d.codec_ctx->channels);
    d.resampler->setInSampleFormat(d.codec_ctx->sample_fmt);
    d.resampler->setInSampleRate(d.codec_ctx->sample_rate);
    d.resampler->prepare();
    return true;
}
Example #6
0
void OpenGLRendererBase::onInitializeGL()
{
    DPTR_D(OpenGLRendererBase);
    //makeCurrent();
    QOpenGLContext *ctx = const_cast<QOpenGLContext*>(QOpenGLContext::currentContext()); //qt4 returns const
    d.glv.setOpenGLContext(ctx);
    //const QByteArray extensions(reinterpret_cast<const char *>(glGetString(GL_EXTENSIONS)));
    bool hasGLSL = QOpenGLShaderProgram::hasOpenGLShaderPrograms();
    qDebug("OpenGL version: %d.%d  hasGLSL: %d", ctx->format().majorVersion(), ctx->format().minorVersion(), hasGLSL);
    initializeOpenGLFunctions();
    glEnable(GL_TEXTURE_2D);
    glDisable(GL_DEPTH_TEST);
    glClearColor(0.0, 0.0, 0.0, 0.0);
}
Example #7
0
void GLWidgetRenderer::resizeGL(int w, int h)
{
    DPTR_D(GLWidgetRenderer);
    qDebug("%s @%d %dx%d", __FUNCTION__, __LINE__, d.out_rect.width(), d.out_rect.height());
    glViewport(0, 0, w, h);
    d.setupAspectRatio();
#ifndef QT_OPENGL_ES_2
    //??
    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();
#endif //QT_OPENGL_ES_2
}
bool AudioOutputOpenSL::open()
{
    DPTR_D(AudioOutputOpenSL);
    d.available = false;
    resetStatus();
    SLDataLocator_BufferQueue bufferQueueLocator = { SL_DATALOCATOR_BUFFERQUEUE, (SLuint32)d.nb_buffers };
    SLDataFormat_PCM pcmFormat = audioFormatToSL(audioFormat());
    SLDataSource audioSrc = { &bufferQueueLocator, &pcmFormat };
    // OutputMix
    SL_RUN_CHECK_FALSE((*d.engine)->CreateOutputMix(d.engine, &d.m_outputMixObject, 0, NULL, NULL));
    SL_RUN_CHECK_FALSE((*d.m_outputMixObject)->Realize(d.m_outputMixObject, SL_BOOLEAN_FALSE));
    SLDataLocator_OutputMix outputMixLocator = { SL_DATALOCATOR_OUTPUTMIX, d.m_outputMixObject };
    SLDataSink audioSink = { &outputMixLocator, NULL };

    const SLInterfaceID ids[] = { SL_IID_BUFFERQUEUE};//, SL_IID_VOLUME };
    const SLboolean req[] = { SL_BOOLEAN_TRUE};//, SL_BOOLEAN_TRUE };
    // AudioPlayer
    SL_RUN_CHECK_FALSE((*d.engine)->CreateAudioPlayer(d.engine, &d.m_playerObject, &audioSrc, &audioSink, sizeof(ids)/sizeof(ids[0]), ids, req));
    SL_RUN_CHECK_FALSE((*d.m_playerObject)->Realize(d.m_playerObject, SL_BOOLEAN_FALSE));
    // Buffer interface
    SL_RUN_CHECK_FALSE((*d.m_playerObject)->GetInterface(d.m_playerObject, SL_IID_BUFFERQUEUE, &d.m_bufferQueueItf));
    SL_RUN_CHECK_FALSE((*d.m_bufferQueueItf)->RegisterCallback(d.m_bufferQueueItf, AudioOutputOpenSLPrivate::bufferQueueCallback, &d));
    // Play interface
    SL_RUN_CHECK_FALSE((*d.m_playerObject)->GetInterface(d.m_playerObject, SL_IID_PLAY, &d.m_playItf));
    // call when SL_PLAYSTATE_STOPPED
    SL_RUN_CHECK_FALSE((*d.m_playItf)->RegisterCallback(d.m_playItf, AudioOutputOpenSLPrivate::playCallback, this));

#if 0
    SLuint32 mask = SL_PLAYEVENT_HEADATEND;
    // TODO: what does this do?
    SL_RUN_CHECK_FALSE((*d.m_playItf)->SetPositionUpdatePeriod(d.m_playItf, 100));
    SL_RUN_CHECK_FALSE((*d.m_playItf)->SetCallbackEventsMask(d.m_playItf, mask));
#endif
    // Volume interface
    //SL_RUN_CHECK_FALSE((*d.m_playerObject)->GetInterface(d.m_playerObject, SL_IID_VOLUME, &d.m_volumeItf));

    const int kBufferSize = 1024*4;
    static char init_data[kBufferSize];
    memset(init_data, 0, sizeof(init_data));
    for (quint32 i = 0; i < d.nb_buffers; ++i) {
        SL_RUN_CHECK_FALSE((*d.m_bufferQueueItf)->Enqueue(d.m_bufferQueueItf, init_data, sizeof(init_data)));
        d.nextEnqueueInfo().data_size = sizeof(init_data);
        d.nextEnqueueInfo().timestamp = 0;
        d.bufferAdded();
        d.buffers_queued++;
    }
    SL_RUN_CHECK_FALSE((*d.m_playItf)->SetPlayState(d.m_playItf, SL_PLAYSTATE_PLAYING));
    d.available = true;
    return true;
}
Example #9
0
void QuickFBORenderer::drawFrame()
{
    DPTR_D(QuickFBORenderer);
    if (d.glctx != QOpenGLContext::currentContext()) {
        d.glctx = QOpenGLContext::currentContext();
        d.glv.setOpenGLContext(d.glctx);
    }
    if (!d.video_frame.isValid()) {
        d.glv.fill(QColor(0, 0, 0, 0));
        return;
    }
    //d.glv.setCurrentFrame(d.video_frame);
    d.glv.render(d.out_rect, normalizedROI(), d.matrix);
}
Example #10
0
bool AVDecoder::open()
{
    DPTR_D(AVDecoder);
    // codec_ctx can't be null for none-ffmpeg based decoders because we may use it's properties in those decoders
    if (!d.codec_ctx) {
        qWarning("FFmpeg codec context not ready");
        return false;
    }
    AVCodec *codec = 0;
    if (!d.codec_name.isEmpty()) {
        codec = avcodec_find_decoder_by_name(d.codec_name.toUtf8().constData());
    } else {
        codec = avcodec_find_decoder(d.codec_ctx->codec_id);
    }
    if (!codec) { // TODO: can be null for none-ffmpeg based decoders
        QString es(tr("No codec could be found for '%1'"));
        if (d.codec_name.isEmpty()) {
            es = es.arg(avcodec_get_name(d.codec_ctx->codec_id));
        } else {
            es = es.arg(d.codec_name);
        }
        qWarning() << es;
        AVError::ErrorCode ec(AVError::CodecError);
        switch (d.codec_ctx->coder_type) {
        case AVMEDIA_TYPE_VIDEO:
            ec = AVError::VideoCodecNotFound;
            break;
        case AVMEDIA_TYPE_AUDIO:
            ec = AVError::AudioCodecNotFound;
            break;
        case AVMEDIA_TYPE_SUBTITLE:
            ec = AVError::SubtitleCodecNotFound;
        default:
            break;
        }
        emit error(AVError(ec, es));
        return false;
    }    
    // hwa extra init can be here
    if (!d.open()) {
        d.close();
        return false;
    }
    // TODO: skip for none-ffmpeg based decoders
    d.applyOptionsForDict();
    av_opt_set_int(d.codec_ctx, "refcounted_frames", d.enableFrameRef(), 0); // why dict may have no effect?
    AV_ENSURE_OK(avcodec_open2(d.codec_ctx, codec, d.options.isEmpty() ? NULL : &d.dict), false);
    d.is_open = true;
    return true;
}
Example #11
0
VideoFrame VideoDecoderDXVA::frame()
{
    DPTR_D(VideoDecoderDXVA);
    if (!d.frame->opaque || !d.frame->data[0])
        return VideoFrame();
    if (d.width <= 0 || d.height <= 0 || !d.codec_ctx)
        return VideoFrame();

    class ScopedD3DLock {
    public:
        ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect)
            : mpD3D(d3d)
        {
            if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
                qWarning("Failed to lock surface");
                mpD3D = 0;
            }
        }
        ~ScopedD3DLock() {
            if (mpD3D)
                mpD3D->UnlockRect();
        }
    private:
        IDirect3DSurface9 *mpD3D;
    };

    IDirect3DSurface9 *d3d = (IDirect3DSurface9*)(uintptr_t)d.frame->data[3];
    //picth >= desc.Width
    //D3DSURFACE_DESC desc;
    //d3d->GetDesc(&desc);
    D3DLOCKED_RECT lock;
    ScopedD3DLock(d3d, &lock);
    if (lock.Pitch == 0) {
        return VideoFrame();
    }

    const VideoFormat fmt = VideoFormat((int)D3dFindFormat(d.render)->avpixfmt);
    if (!fmt.isValid()) {
        qWarning("unsupported dxva pixel format: %#x", d.render);
        return VideoFrame();
    }
    //YV12 need swap, not imc3?
    // imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
    // nv12 bpp(1)==1
    // 3rd plane is not used for nv12
    int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
    uint8_t *src[] = { (uint8_t*)lock.pBits, 0, 0}; //compute chroma later
    const bool swap_uv = d.render ==  MAKEFOURCC('I','M','C','3');
    return copyToFrame(fmt, d.surface_height, src, pitch, swap_uv);
}
Example #12
0
void XVRenderer::drawFrame()
{
    DPTR_D(XVRenderer);
    QRect roi = realROI();
    if (!d.use_shm)
        XvPutImage(d.display, d.xv_port, winId(), d.gc, d.xv_image
                   , roi.x(), roi.y(), roi.width(), roi.height()
                   , d.out_rect.x(), d.out_rect.y(), d.out_rect.width(), d.out_rect.height());
    else
        XvShmPutImage(d.display, d.xv_port, winId(), d.gc, d.xv_image
                      , roi.x(), roi.y(), roi.width(), roi.height()
                      , d.out_rect.x(), d.out_rect.y(), d.out_rect.width(), d.out_rect.height()
                      , false /*true: send event*/);
}
Example #13
0
void AVDecoder::setOptions(const QVariantHash &dict)
{
    DPTR_D(AVDecoder);
    d.options = dict;
    if (d.dict) {
        av_dict_free(&d.dict);
        d.dict = 0; //aready 0 in av_free
    }
    if (dict.isEmpty())
        return;
    QVariantHash avcodec_dict(dict);
    if (dict.contains("avcodec"))
        avcodec_dict = dict.value("avcodec").toHash();
    // workaround for VideoDecoderFFmpeg. now it does not call av_opt_set_xxx, so set here in dict
    if (dict.contains("FFmpeg"))
        avcodec_dict.unite(dict.value("FFmpeg").toHash());
    QHashIterator<QString, QVariant> i(avcodec_dict);
    while (i.hasNext()) {
        i.next();
        switch (i.value().type()) {
        case QVariant::Hash: // for example "vaapi": {...}
            continue;
        case QVariant::Bool:
            // QVariant.toByteArray(): "true" or "false", can not recognized by avcodec
            av_dict_set(&d.dict, i.key().toLower().toUtf8().constData(), QByteArray::number(i.value().toBool()), 0);
            break;
        default:
            // avcodec key and value are in lower case
            av_dict_set(&d.dict, i.key().toLower().toUtf8().constData(), i.value().toByteArray().toLower().constData(), 0);
            break;
        }
        qDebug("avcodec option: %s=>%s", i.key().toUtf8().constData(), i.value().toByteArray().constData());
    }
    if (name() == "avcodec")
        return;
    QVariantHash property_dict(dict.value(name()).toHash());
    if (property_dict.isEmpty())
        property_dict = dict.value(name().toLower()).toHash();
    if (property_dict.isEmpty())
        return;
    i = QHashIterator<QString, QVariant>(property_dict);
    while (i.hasNext()) {
        i.next();
        if (i.value().type() == QVariant::Hash) // for example "vaapi": {...}
            continue;
        setProperty(i.key().toUtf8().constData(), i.value());
        qDebug("decoder property: %s=>%s", i.key().toUtf8().constData(), i.value().toByteArray().constData());
    }
}
Example #14
0
bool AVDecoder::close()
{
    if (!isOpen()) {
        return true;
    }
    DPTR_D(AVDecoder);
    d.is_open = false;
    // hwa extra finalize can be here
    d.close();
    // TODO: reset config?
    if (d.codec_ctx) {
        AV_ENSURE_OK(avcodec_close(d.codec_ctx), false);
    }
    return true;
}
Example #15
0
void OpenGLRendererBase::onPaintGL()
{
    DPTR_D(OpenGLRendererBase);
    /* we can mix gl and qpainter.
     * QPainter painter(this);
     * painter.beginNativePainting();
     * gl functions...
     * painter.endNativePainting();
     * swapBuffers();
     */
    handlePaintEvent();
    //context()->swapBuffers(this);
    if (d.painter && d.painter->isActive())
        d.painter->end();
}
Example #16
0
bool AVThread::installFilter(Filter *filter, bool lock)
{
    DPTR_D(AVThread);
    if (lock) {
        QMutexLocker locker(&d.mutex);
        if (d.filters.contains(filter))
            return false;
        d.filters.push_back(filter);
    } else {
        if (d.filters.contains(filter))
            return false;
        d.filters.push_back(filter);
    }
    return true;
}
Example #17
0
// TODO: shall we close decoder here?
void AVThread::stop()
{
    DPTR_D(AVThread);
    d.stop = true; //stop as soon as possible
    QMutexLocker locker(&d.mutex);
    Q_UNUSED(locker);
    d.packets.setBlocking(false); //stop blocking take()
    d.packets.clear();
    pause(false);
    if (d.writer)
        d.writer->pause(false); //stop waiting
    QMutexLocker lock(&d.ready_mutex);
    d.ready = false;
    //terminate();
}
Example #18
0
bool CCPortAudio::open()
{
    DPTR_D(CCPortAudio);
    //
    d.outputParameters->channelCount = d.channels;
    PaError err = Pa_OpenStream(&d.stream, NULL, d.outputParameters, d.sample_rate, 0, paNoFlag, NULL, NULL);
    if (err == paNoError) {
        d.outputLatency = Pa_GetStreamInfo(d.stream)->outputLatency;
        d.available = true;
    } else {
        qWarning("Open portaudio stream error: %s", Pa_GetErrorText(err));
        d.available = false;
    }
    return err == paNoError;
}
Example #19
0
void VideoMaterial::getTextureCoordinates(const QRect& roi, float* t)
{
    DPTR_D(VideoMaterial);
    /*!
      tex coords: ROI/frameRect()*effective_tex_width_ratio
    */
    t[0] = (GLfloat)roi.x()*(GLfloat)d.effective_tex_width_ratio/(GLfloat)d.frame.width();
    t[1] = (GLfloat)roi.y()/(GLfloat)d.frame.height();
    t[2] = (GLfloat)(roi.x() + roi.width())*(GLfloat)d.effective_tex_width_ratio/(GLfloat)d.frame.width();
    t[3] = (GLfloat)roi.y()/(GLfloat)d.frame.height();
    t[4] = (GLfloat)(roi.x() + roi.width())*(GLfloat)d.effective_tex_width_ratio/(GLfloat)d.frame.width();
    t[5] = (GLfloat)(roi.y()+roi.height())/(GLfloat)d.frame.height();
    t[6] = (GLfloat)roi.x()*(GLfloat)d.effective_tex_width_ratio/(GLfloat)d.frame.width();
    t[7] = (GLfloat)(roi.y()+roi.height())/(GLfloat)d.frame.height();
}
Example #20
0
void QQuickItemRenderer::setFillMode(FillMode mode)
{
    DPTR_D(QQuickItemRenderer);
    if (d.fill_mode == mode)
        return;
    d_func().fill_mode = mode;
    if (d.fill_mode == Stretch) {
        setOutAspectRatioMode(RendererAspectRatio);
    } else {//compute out_rect fits video aspect ratio then compute again if crop
        setOutAspectRatioMode(VideoAspectRatio);
    }
    //m_geometryDirty = true;
    //update();
    emit fillModeChanged(mode);
}
Example #21
0
bool XVRenderer::receiveFrame(const VideoFrame& frame)
{
    DPTR_D(XVRenderer);
    if (!d.prepareImage(d.src_width, d.src_height))
        return false;
    //TODO: if date is deep copied, mutex can be avoided
    QMutexLocker locker(&d.img_mutex);
    Q_UNUSED(locker);
    d.video_frame = frame;
    d.video_frame.convertTo(VideoFormat::Format_YUV420P);
    d.xv_image->data = (char*)d.video_frame.bits();

    update();
    return true;
}
Example #22
0
void GLWidgetRenderer::paintGL()
{
    DPTR_D(GLWidgetRenderer);
    /* we can mix gl and qpainter.
     * QPainter painter(this);
     * painter.beginNativePainting();
     * gl functions...
     * painter.endNativePainting();
     * swapBuffers();
     */
    handlePaintEvent();
    swapBuffers();
    if (d.painter && d.painter->isActive())
        d.painter->end();
}
Example #23
0
bool VideoDecoderCUDA::decode(const QByteArray &encoded)
{
    if (!isAvailable())
        return false;
    DPTR_D(VideoDecoderCUDA);
    if (!d.parser) {
        qWarning("CUVID parser not ready");
        return false;
    }
    uint8_t *outBuf = 0;
    int outBufSize = 0;
    // h264_mp4toannexb_filter does not use last parameter 'keyFrame', so just set 0
    //return: 0: not changed, no outBuf allocated. >0: ok. <0: fail
    int filtered = av_bitstream_filter_filter(d.bitstream_filter_ctx, d.codec_ctx, NULL, &outBuf, &outBufSize
                                              , (const uint8_t*)encoded.constData(), encoded.size()
                                              , 0);//d.is_keyframe);
    //qDebug("%s @%d filtered=%d outBuf=%p, outBufSize=%d", __FUNCTION__, __LINE__, filtered, outBuf, outBufSize);
    if (filtered < 0) {
        qDebug("failed to filter: %s", av_err2str(filtered));
    }
    unsigned char *payload = outBuf;
    unsigned long payload_size = outBufSize;
#if 0 // see ffmpeg.c. FF_INPUT_BUFFER_PADDING_SIZE for alignment issue
    QByteArray data_with_pad;
    if (filtered > 0) {
        data_with_pad.resize(outBufSize + FF_INPUT_BUFFER_PADDING_SIZE);
        data_with_pad.fill(0);
        memcpy(data_with_pad.data(), outBuf, outBufSize);
        payload = (unsigned char*)data_with_pad.constData();
        payload_size = data_with_pad.size();
    }
#endif
    CUVIDSOURCEDATAPACKET cuvid_pkt;
    memset(&cuvid_pkt, 0, sizeof(CUVIDSOURCEDATAPACKET));
    cuvid_pkt.payload = payload;// (unsigned char *)encoded.constData();
    cuvid_pkt.payload_size = payload_size; //encoded.size();
    cuvid_pkt.flags = CUVID_PKT_TIMESTAMP;
    cuvid_pkt.timestamp = 0;// ?
    //TODO: fill NALU header for h264? https://devtalk.nvidia.com/default/topic/515571/what-the-data-format-34-cuvidparsevideodata-34-can-accept-/
    d.doParseVideoData(&cuvid_pkt);
    if (filtered > 0) {
        av_freep(&outBuf);
    }
    // callbacks are in the same thread as this. so no queue is required?
    //qDebug("frame queue size on decode: %d", d.frame_queue.size());
    return !d.frame_queue.isEmpty();
    // video thread: if dec.hasFrame() keep pkt for the next loop and not decode, direct display the frame
}
Example #24
0
bool CCPortAudio::close()
{
    DPTR_D(CCPortAudio);
    PaError err = paNoError;
    if (!d.stream) {
        return true;
    }
    err = Pa_StopStream(d.stream);
    if (err != paNoError)
        qWarning("Stop portaudio stream error: %s", Pa_GetErrorText(err));
    err = Pa_CloseStream(d.stream);
    d.stream = NULL;
    if (err != paNoError)
        qWarning("Close portaudio stream error: %s", Pa_GetErrorText(err));
    return err == paNoError;
}
Example #25
0
void GraphicsItemRenderer::paint(QPainter *painter, const QStyleOptionGraphicsItem *option, QWidget *widget)
{
	Q_UNUSED(option);
	Q_UNUSED(widget);
    DPTR_D(GraphicsItemRenderer);
    d.painter = painter;
    QPainterFilterContext *ctx = static_cast<QPainterFilterContext*>(d.filter_context);
    if (ctx) {
        ctx->painter = d.painter;
    } else {
        qWarning("FilterContext not available!");
    }
    handlePaintEvent();
    d.painter = 0; //painter may be not available outside this function
    ctx->painter = 0;
}
bool AudioOutputPortAudio::write(const QByteArray& data)
{
    DPTR_D(AudioOutputPortAudio);
    QMutexLocker lock(&d.mutex);
    Q_UNUSED(lock);
    if (!d.available)
        return false;
    if (Pa_IsStreamStopped(d.stream))
        Pa_StartStream(d.stream);
    PaError err = Pa_WriteStream(d.stream, data.constData(), data.size()/audioFormat().channels()/audioFormat().bytesPerSample());
    if (err == paUnanticipatedHostError) {
        qWarning("Write portaudio stream error: %s", Pa_GetErrorText(err));
        return   false;
    }
    return true;
}
Example #27
0
bool QQuickItemRenderer::receiveFrame(const VideoFrame &frame)
{
    DPTR_D(QQuickItemRenderer);
    d.video_frame = frame;
    if (!isOpenGL()) {
        d.image = QImage((uchar*)frame.constBits(), frame.width(), frame.height(), frame.bytesPerLine(), frame.imageFormat());
        QRect r = realROI();
        if (r != QRect(0, 0, frame.width(), frame.height()))
            d.image = d.image.copy(r);
    }
    d.frame_changed = true;
//    update();  // why update slow? because of calling in a different thread?
    //QMetaObject::invokeMethod(this, "update"); // slower than directly postEvent
    QCoreApplication::postEvent(this, new QEvent(QEvent::User));
    return true;
}
Example #28
0
void Direct2DRenderer::drawFrame()
{
    DPTR_D(Direct2DRenderer);
    D2D1_RECT_F out_rect = {
        (FLOAT)d.out_rect.left(),
        (FLOAT)d.out_rect.top(),
        (FLOAT)d.out_rect.right(),
        (FLOAT)d.out_rect.bottom()
    };
    //d.render_target->SetTransform
    d.render_target->DrawBitmap(d.bitmap
                                , &out_rect
                                , 1 //opacity
                                , D2D1_BITMAP_INTERPOLATION_MODE_LINEAR
                                , NULL);//&D2D1::RectF(0, 0, d.src_width, d.src_height));
}
Example #29
0
void AVDecoder::setOptions(const QVariantHash &dict)
{
    DPTR_D(AVDecoder);
    d.options = dict;
    // if dict is empty, can not return here, default options will be set for AVCodecContext
    // apply to AVCodecContext
    d.applyOptionsForContext();
    /* set AVDecoder meta properties.
     * we do not check whether the property exists thus we can set dynamic properties.
     */
    if (dict.isEmpty())
        return;
    if (name() == "avcodec")
        return;
    QVariant opt;
    if (dict.contains(name()))
        opt = dict.value(name());
    else if (dict.contains(name().toLower()))
        opt = dict.value(name().toLower());
    else
        return; // TODO: set property if no name() key found?
    if (opt.type() == QVariant::Hash) {
        QVariantHash property_dict(opt.toHash());
        if (property_dict.isEmpty())
            return;
        QHashIterator<QString, QVariant> i(property_dict);
        while (i.hasNext()) {
            i.next();
            if (i.value().type() == QVariant::Hash) // for example "vaapi": {...}
                continue;
            setProperty(i.key().toUtf8().constData(), i.value());
            qDebug("decoder meta property: %s=>%s", i.key().toUtf8().constData(), i.value().toByteArray().constData());
        }
    } else if (opt.type() == QVariant::Map) {
        QVariantMap property_dict(opt.toMap());
        if (property_dict.isEmpty())
            return;
        QMapIterator<QString, QVariant> i(property_dict);
        while (i.hasNext()) {
            i.next();
            if (i.value().type() == QVariant::Map) // for example "vaapi": {...}
                continue;
            setProperty(i.key().toUtf8().constData(), i.value());
            qDebug("decoder meta property: %s=>%s", i.key().toUtf8().constData(), i.value().toByteArray().constData());
        }
    }
}
Example #30
0
//TODO: call open after audio format changed?
bool AudioOutputPortAudio::open()
{
    DPTR_D(AudioOutputPortAudio);
    QMutexLocker lock(&d.mutex);
    Q_UNUSED(lock);
    d.outputParameters->sampleFormat = toPaSampleFormat(audioFormat().sampleFormat());
    d.outputParameters->channelCount = audioFormat().channels();
    PaError err = Pa_OpenStream(&d.stream, NULL, d.outputParameters, audioFormat().sampleRate(), 0, paNoFlag, NULL, NULL);
    if (err == paNoError) {
        d.outputLatency = Pa_GetStreamInfo(d.stream)->outputLatency;
        d.available = true;
    } else {
        qWarning("Open portaudio stream error: %s", Pa_GetErrorText(err));
        d.available = false;
    }
    return err == paNoError;
}