void KviHttpRequest::slotSocketReadDataReady() { KVI_ASSERT(m_p->pSocket); int iBytes = m_p->pSocket->bytesAvailable(); if(iBytes <= 0) { // assume connection closed ? slotSocketDisconnected(); return; } // FIXME: Avoid double-buffering here! KviDataBuffer oBuffer(iBytes); int iRead = m_p->pSocket->read((char *)(oBuffer.data()),iBytes); if(iRead < iBytes) { // hum.... what here ? if(iRead < 1) { slotSocketDisconnected(); return; } // FIXME // well... otherwise just wait. // FIXME ? oBuffer.resize(iRead); } processData(&oBuffer); }
bool checkThumbnails(const QList<QImage> &thumbnails, const QList<QImage> &others, bool verbose) { bool success = true; if (thumbnails.size() != others.size()) { qDebug() << "Check failed: number of pages different" << thumbnails.size() << "!=" << others.size(); return false; } int i = 1; QList<QImage>::const_iterator it(thumbnails.constBegin()); QList<QImage>::const_iterator oIt(others.constBegin()); for (; it != thumbnails.constEnd(); ++it, ++oIt, ++i) { QByteArray ba; QBuffer buffer(&ba); buffer.open(QIODevice::WriteOnly); it->save(&buffer, "PNG"); QByteArray baCheck; QBuffer oBuffer(&baCheck); oBuffer.open(QIODevice::WriteOnly); oIt->save(&oBuffer, "PNG"); if (ba != baCheck) { qDebug() << "Check failed:" << "Page" << i << "differ"; success = false; } else if (verbose) { qDebug() << "Check successful:" << "Page" << i << "identical"; } } return success; }
HRESULT FrameGrabber::BufferCB(double time, BYTE *buffer, long bufferSize) { QByteArray oBuffer(reinterpret_cast<char *>(buffer), bufferSize); emit this->frameReady(time, oBuffer); return S_OK; }
void AudioDeviceElement::readFramesLoop(AudioDeviceElement *self) { #ifdef Q_OS_WIN32 // Initialize the COM library in multithread mode. CoInitializeEx(NULL, COINIT_MULTITHREADED); #endif QString device = self->m_device; AkAudioCaps caps(self->m_caps); qint64 streamId = Ak::id(); AkFrac timeBase(1, caps.rate()); if (self->m_audioDevice->init(device, caps)) { while (self->m_readFramesLoop) { if (self->m_pause) { QThread::msleep(PAUSE_TIMEOUT); continue; } int bufferSize = self->m_bufferSize; QByteArray buffer = self->m_audioDevice->read(bufferSize); if (buffer.isEmpty()) return; QByteArray oBuffer(buffer.size(), Qt::Uninitialized); memcpy(oBuffer.data(), buffer.constData(), size_t(buffer.size())); caps.samples() = bufferSize; AkAudioPacket packet(caps, oBuffer); qint64 pts = qint64(QTime::currentTime().msecsSinceStartOfDay() / timeBase.value() / 1e3); packet.setPts(pts); packet.setTimeBase(timeBase); packet.setIndex(0); packet.setId(streamId); emit self->oStream(packet.toPacket()); } self->m_audioDevice->uninit(); } #ifdef Q_OS_WIN32 // Close COM library. CoUninitialize(); #endif }
HRESULT FrameGrabber::SampleCB(double time, IMediaSample *sample) { BYTE *buffer = NULL; LONG bufferSize = sample->GetSize(); HRESULT hr = sample->GetPointer(&buffer); if (FAILED(hr)) return S_FALSE; QByteArray oBuffer(reinterpret_cast<char *>(buffer), bufferSize); emit this->frameReady(time, oBuffer); return S_OK; }
void AudioInputElement::readFrame() { this->m_mutex.lock(); QByteArray buffer = this->m_audioDevice.read(this->m_bufferSize); this->m_mutex.unlock(); if (buffer.isEmpty()) return; QbBufferPtr oBuffer(new char[buffer.size()]); memcpy(oBuffer.data(), buffer.constData(), buffer.size()); QbCaps caps = this->m_caps; caps.setProperty("samples", this->m_bufferSize); QbPacket packet(caps, oBuffer, buffer.size()); qint64 pts = QTime::currentTime().msecsSinceStartOfDay() / this->m_timeBase.value(); packet.setPts(pts); packet.setTimeBase(this->m_timeBase); packet.setIndex(0); packet.setId(this->m_streamId); if (!this->m_threadedRead) { emit this->oStream(packet); return; } if (!this->m_threadStatus.isRunning()) { this->m_curPacket = packet; this->m_threadStatus = QtConcurrent::run(&this->m_threadPool, this->sendPacket, this, this->m_curPacket); } }
QbPacket ConvertAudio::convert(const QbAudioPacket &packet, const QbCaps &oCaps) { QbAudioCaps oAudioCaps(oCaps); int64_t iSampleLayout = channelLayouts->value(packet.caps().layout(), 0); AVSampleFormat iSampleFormat = sampleFormats->value(packet.caps().format(), AV_SAMPLE_FMT_NONE); int iSampleRate = packet.caps().rate(); int iNChannels = packet.caps().channels(); int iNSamples = packet.caps().samples(); int64_t oSampleLayout = channelLayouts->value(oAudioCaps.layout(), AV_CH_LAYOUT_STEREO); AVSampleFormat oSampleFormat = sampleFormats->value(oAudioCaps.format(), AV_SAMPLE_FMT_FLT); int oSampleRate = oAudioCaps.rate(); int oNChannels = oAudioCaps.channels(); this->m_resampleContext = swr_alloc_set_opts(this->m_resampleContext, oSampleLayout, oSampleFormat, oSampleRate, iSampleLayout, iSampleFormat, iSampleRate, 0, NULL); if (!this->m_resampleContext) return QbPacket(); if (!swr_is_initialized(this->m_resampleContext)) if (swr_init(this->m_resampleContext) < 0) return QbPacket(); // Create input audio frame. static AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); if (av_samples_fill_arrays(iFrame.data, iFrame.linesize, (const uint8_t *) packet.buffer().data(), iNChannels, iNSamples, iSampleFormat, 1) < 0) return QbPacket(); iFrame.channels = iNChannels; iFrame.channel_layout = iSampleLayout; iFrame.format = iSampleFormat; iFrame.sample_rate = iSampleRate; iFrame.nb_samples = iNSamples; iFrame.pts = iFrame.pkt_pts = packet.pts(); // Create output audio packet. int oNSamples = swr_get_delay(this->m_resampleContext, oSampleRate) + iFrame.nb_samples * (int64_t) oSampleRate / iSampleRate + 3; int oLineSize; int oBufferSize = av_samples_get_buffer_size(&oLineSize, oNChannels, oNSamples, oSampleFormat, 1); QByteArray oBuffer(oBufferSize, Qt::Uninitialized); int oNPlanes = av_sample_fmt_is_planar(oSampleFormat)? oNChannels: 1; QVector<uint8_t *> oData(oNPlanes); if (av_samples_fill_arrays(&oData.data()[0], &oLineSize, (const uint8_t *) oBuffer.data(), oNChannels, oNSamples, oSampleFormat, 1) < 0) return QbPacket(); int64_t oPts = swr_next_pts(this->m_resampleContext, iFrame.pts); // convert to destination format int outputSamples = swr_convert(this->m_resampleContext, oData.data(), oNSamples, (const uint8_t **) iFrame.data, iFrame.nb_samples); if (outputSamples < 1) return QbPacket(); oBufferSize = oBufferSize * outputSamples / oNSamples; QbBufferPtr buffer(new char[oBufferSize]); memcpy(buffer.data(), oBuffer.data(), oBufferSize); QbAudioPacket oAudioPacket; oAudioPacket.caps() = oAudioCaps; oAudioPacket.caps().samples() = outputSamples; oAudioPacket.buffer() = buffer; oAudioPacket.bufferSize() = oBufferSize; oAudioPacket.pts() = oPts; oAudioPacket.timeBase() = QbFrac(1, oAudioCaps.rate()); oAudioPacket.index() = packet.index(); oAudioPacket.id() = packet.id(); return oAudioPacket.toPacket(); }
AkPacket CaptureDShow::readFrame() { IBaseFilter *source = NULL; this->m_graph->FindFilterByName(SOURCE_FILTER_NAME, &source); if (source) { this->m_controlsMutex.lock(); QVariantMap imageControls = this->controlStatus(this->m_globalImageControls); this->m_controlsMutex.unlock(); if (this->m_localImageControls != imageControls) { QVariantMap controls = this->mapDiff(this->m_localImageControls, imageControls); this->setImageControls(source, controls); this->m_localImageControls = imageControls; } this->m_controlsMutex.lock(); QVariantMap cameraControls = this->controlStatus(this->m_globalCameraControls); this->m_controlsMutex.unlock(); if (this->m_localCameraControls != cameraControls) { QVariantMap controls = this->mapDiff(this->m_localCameraControls, cameraControls); this->setCameraControls(source, controls); this->m_localCameraControls = cameraControls; } source->Release(); } AM_MEDIA_TYPE mediaType; ZeroMemory(&mediaType, sizeof(AM_MEDIA_TYPE)); this->m_grabber->GetConnectedMediaType(&mediaType); AkCaps caps = this->capsFromMediaType(&mediaType); AkPacket packet; timeval timestamp; gettimeofday(×tamp, NULL); qint64 pts = qint64((timestamp.tv_sec + 1e-6 * timestamp.tv_usec) * this->m_timeBase.invert().value()); if (this->m_ioMethod != IoMethodDirectRead) { this->m_mutex.lock(); if (this->m_curBuffer.isEmpty()) this->m_waitCondition.wait(&this->m_mutex, 1000); if (!this->m_curBuffer.isEmpty()) { int bufferSize = this->m_curBuffer.size(); QByteArray oBuffer(bufferSize, Qt::Uninitialized); memcpy(oBuffer.data(), this->m_curBuffer.constData(), size_t(bufferSize)); packet = AkPacket(caps, oBuffer); packet.setPts(pts); packet.setTimeBase(this->m_timeBase); packet.setIndex(0); packet.setId(this->m_id); this->m_curBuffer.clear(); } this->m_mutex.unlock(); } else { long bufferSize; HRESULT hr = this->m_grabber->GetCurrentBuffer(&bufferSize, NULL); if (FAILED(hr)) return AkPacket(); QByteArray oBuffer(bufferSize, Qt::Uninitialized); hr = this->m_grabber->GetCurrentBuffer(&bufferSize, reinterpret_cast<long *>(oBuffer.data())); if (FAILED(hr)) return AkPacket(); packet = AkPacket(caps, oBuffer); packet.setPts(pts); packet.setTimeBase(this->m_timeBase); packet.setIndex(0); packet.setId(this->m_id); } return packet; }
AkPacket ConvertVideo::convert(const AkPacket &packet) { AkVideoPacket videoPacket(packet); // Convert input format. QString format = AkVideoCaps::pixelFormatToString(videoPacket.caps().format()); AVPixelFormat iFormat = av_get_pix_fmt(format.toStdString().c_str()); // Initialize rescaling context. this->m_scaleContext = sws_getCachedContext(this->m_scaleContext, videoPacket.caps().width(), videoPacket.caps().height(), iFormat, videoPacket.caps().width(), videoPacket.caps().height(), AV_PIX_FMT_BGRA, SWS_FAST_BILINEAR, NULL, NULL, NULL); if (!this->m_scaleContext) return AkPacket(); // Create iPicture. AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); if (av_image_fill_arrays((uint8_t **) iFrame.data, iFrame.linesize, (const uint8_t *) videoPacket.buffer().constData(), iFormat, videoPacket.caps().width(), videoPacket.caps().height(), 1) < 0) return AkPacket(); // Create oPicture int frameSize = av_image_get_buffer_size(AV_PIX_FMT_BGRA, videoPacket.caps().width(), videoPacket.caps().height(), 1); QByteArray oBuffer(frameSize, Qt::Uninitialized); AVFrame oFrame; memset(&oFrame, 0, sizeof(AVFrame)); if (av_image_fill_arrays((uint8_t **) oFrame.data, oFrame.linesize, (const uint8_t *) oBuffer.constData(), AV_PIX_FMT_BGRA, videoPacket.caps().width(), videoPacket.caps().height(), 1) < 0) return AkPacket(); // Convert picture format sws_scale(this->m_scaleContext, iFrame.data, iFrame.linesize, 0, videoPacket.caps().height(), oFrame.data, oFrame.linesize); // Create packet AkVideoPacket oPacket(packet); oPacket.caps().format() = AkVideoCaps::Format_bgra; oPacket.buffer() = oBuffer; return oPacket.toPacket(); }
void VCapsConvertElement::iStream(const QbPacket &packet) { if (!packet.caps().isValid() || packet.caps().mimeType() != "video/x-raw" || this->state() != ElementStatePlaying) return; if (packet.caps() == this->m_caps) { emit this->oStream(packet); return; } int iWidth = packet.caps().property("width").toInt(); int iHeight = packet.caps().property("height").toInt(); QString format = packet.caps().property("format").toString(); PixelFormat iFormat = av_get_pix_fmt(format.toStdString().c_str()); QList<QByteArray> props = this->m_caps.dynamicPropertyNames(); int oWidth = props.contains("width")? this->m_caps.property("width").toInt(): iWidth; int oHeight = props.contains("height")? this->m_caps.property("height").toInt(): iHeight; PixelFormat oFormat; if (props.contains("format")) { QString oFormatString = this->m_caps.property("format").toString(); oFormat = av_get_pix_fmt(oFormatString.toStdString().c_str()); } else oFormat = iFormat; SwsContext *scaleContext = sws_getCachedContext(NULL, iWidth, iHeight, iFormat, oWidth, oHeight, oFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL); if (!scaleContext) return; int oBufferSize = avpicture_get_size(oFormat, oWidth, oHeight); QSharedPointer<uchar> oBuffer(new uchar[oBufferSize]); AVPicture iPicture; avpicture_fill(&iPicture, (uint8_t *) packet.buffer().data(), iFormat, iWidth, iHeight); AVPicture oPicture; avpicture_fill(&oPicture, (uint8_t *) oBuffer.data(), oFormat, oWidth, oHeight); sws_scale(scaleContext, (uint8_t **) iPicture.data, iPicture.linesize, 0, iHeight, oPicture.data, oPicture.linesize); sws_freeContext(scaleContext); QbPacket oPacket(packet.caps().update(this->m_caps), oBuffer, oBufferSize); oPacket.setPts(packet.pts()); oPacket.setDuration(packet.duration()); oPacket.setTimeBase(packet.timeBase()); oPacket.setIndex(packet.index()); emit this->oStream(oPacket); }
void ACapsConvertElement::iStream(const QbPacket &packet) { if (!packet.caps().isValid() || packet.caps().mimeType() != "audio/x-raw" || this->state() != ElementStatePlaying) return; // Input Format AVSampleFormat iSampleFormat = av_get_sample_fmt(packet.caps().property("format").toString().toStdString().c_str()); int iNChannels = packet.caps().property("channels").toInt(); int64_t iChannelLayout = av_get_channel_layout(packet.caps().property("layout").toString().toStdString().c_str()); int iNPlanes = av_sample_fmt_is_planar(iSampleFormat)? iNChannels: 1; int iSampleRate = packet.caps().property("rate").toInt(); int iNSamples = packet.caps().property("samples").toInt(); if (iNSamples < 1) iNSamples = 1024; bool sameMimeType = packet.caps().mimeType() == this->m_caps.mimeType(); // Output Format AVSampleFormat oSampleFormat = (sameMimeType && this->m_caps.dynamicPropertyNames().contains("format"))? av_get_sample_fmt(this->m_caps.property("format").toString().toStdString().c_str()): iSampleFormat; int oNChannels = (sameMimeType && this->m_caps.dynamicPropertyNames().contains("channels"))? this->m_caps.property("channels").toInt(): iNChannels; int64_t oChannelLayout = (sameMimeType && this->m_caps.dynamicPropertyNames().contains("layout"))? av_get_channel_layout(this->m_caps.property("layout").toString().toStdString().c_str()): iChannelLayout; int oSampleRate = (sameMimeType && this->m_caps.dynamicPropertyNames().contains("rate"))? this->m_caps.property("rate").toInt(): iSampleRate; QVector<uint8_t *> iData(iNPlanes); int iLineSize; if (av_samples_fill_arrays(&iData.data()[0], &iLineSize, (const uint8_t *) packet.buffer().data(), iNChannels, iNSamples, iSampleFormat, 1) < 0) return; QbCaps caps1(packet.caps()); QbCaps caps2(this->m_curInputCaps); caps1.setProperty("samples", QVariant()); caps2.setProperty("samples", QVariant()); if (caps1 != caps2) { // create resampler context this->m_resampleContext = SwrContextPtr(swr_alloc(), this->deleteSwrContext); if (!this->m_resampleContext) return; // set options av_opt_set_int(this->m_resampleContext.data(), "in_channel_layout", iChannelLayout, 0); av_opt_set_int(this->m_resampleContext.data(), "in_sample_rate", iSampleRate, 0); av_opt_set_sample_fmt(this->m_resampleContext.data(), "in_sample_fmt", iSampleFormat, 0); av_opt_set_int(this->m_resampleContext.data(), "out_channel_layout", oChannelLayout, 0); av_opt_set_int(this->m_resampleContext.data(), "out_sample_rate", oSampleRate, 0); av_opt_set_sample_fmt(this->m_resampleContext.data(), "out_sample_fmt", oSampleFormat, 0); // initialize the resampling context if (swr_init(this->m_resampleContext.data()) < 0) return; this->m_curInputCaps = packet.caps(); } // compute destination number of samples int oNSamples = av_rescale_rnd(swr_get_delay(this->m_resampleContext.data(), iSampleRate) + iNSamples, oSampleRate, iSampleRate, AV_ROUND_UP); // buffer is going to be directly written to a rawaudio file, no alignment int oNPlanes = av_sample_fmt_is_planar(oSampleFormat)? oNChannels: 1; QVector<uint8_t *> oData(oNPlanes); int oLineSize; int oBufferSize = av_samples_get_buffer_size(&oLineSize, oNChannels, oNSamples, oSampleFormat, 1); QSharedPointer<uchar> oBuffer(new uchar[oBufferSize]); if (!oBuffer) return; if (av_samples_fill_arrays(&oData.data()[0], &oLineSize, (const uint8_t *) oBuffer.data(), oNChannels, oNSamples, oSampleFormat, 1) < 0) return; // convert to destination format if (swr_convert(this->m_resampleContext.data(), oData.data(), oNSamples, (const uint8_t **) iData.data(), iNSamples) < 0) return; const char *format = av_get_sample_fmt_name(oSampleFormat); char layout[256]; av_get_channel_layout_string(layout, sizeof(layout), oNChannels, oChannelLayout); QString caps = QString("audio/x-raw," "format=%1," "channels=%2," "rate=%3," "layout=%4," "samples=%5").arg(format) .arg(oNChannels) .arg(oSampleRate) .arg(layout) .arg(oNSamples); QbPacket oPacket(caps, oBuffer, oBufferSize); oPacket.setPts(packet.pts()); oPacket.setDuration(packet.duration()); oPacket.setTimeBase(packet.timeBase()); oPacket.setIndex(packet.index()); emit this->oStream(oPacket); }
QbPacket VideoStream::convert(AVFrame *iFrame) { AVPicture *oPicture; AVPixelFormat oFormat; bool delFrame = false; if (outputFormats->contains(AVPixelFormat(iFrame->format))) { oPicture = (AVPicture *) iFrame; oFormat = AVPixelFormat(iFrame->format); } else { oPicture = new AVPicture; oFormat = AV_PIX_FMT_BGRA; avpicture_alloc(oPicture, oFormat, iFrame->width, iFrame->height); this->m_scaleContext = sws_getCachedContext(this->m_scaleContext, iFrame->width, iFrame->height, AVPixelFormat(iFrame->format), iFrame->width, iFrame->height, oFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL); sws_scale(this->m_scaleContext, (uint8_t **) iFrame->data, iFrame->linesize, 0, iFrame->height, oPicture->data, oPicture->linesize); delFrame = true; } QbVideoPacket packet; packet.caps().isValid() = true; packet.caps().format() = outputFormats->value(oFormat); packet.caps().width() = iFrame->width; packet.caps().height() = iFrame->height; packet.caps().fps() = this->fps(); int frameSize = avpicture_get_size(oFormat, iFrame->width, iFrame->height); QbBufferPtr oBuffer(new char[frameSize]); avpicture_layout(oPicture, oFormat, iFrame->width, iFrame->height, (uint8_t *) oBuffer.data(), frameSize); packet.buffer() = oBuffer; packet.bufferSize() = frameSize; packet.pts() = av_frame_get_best_effort_timestamp(iFrame); packet.timeBase() = this->timeBase(); packet.index() = this->index(); packet.id() = this->id(); if (delFrame) { avpicture_free(oPicture); delete oPicture; } return packet.toPacket(); }
AkPacket ConvertAudioFFmpeg::convert(const AkAudioPacket &packet, const AkCaps &oCaps) { AkAudioCaps oAudioCaps(oCaps); int64_t iSampleLayout = channelLayouts->value(packet.caps().layout(), 0); AVSampleFormat iSampleFormat = av_get_sample_fmt(AkAudioCaps::sampleFormatToString(packet.caps().format()) .toStdString().c_str()); int iSampleRate = packet.caps().rate(); int iNChannels = packet.caps().channels(); int iNSamples = packet.caps().samples(); int64_t oSampleLayout = channelLayouts->value(oAudioCaps.layout(), AV_CH_LAYOUT_STEREO); AVSampleFormat oSampleFormat = av_get_sample_fmt(AkAudioCaps::sampleFormatToString(oAudioCaps.format()) .toStdString().c_str()); int oSampleRate = oAudioCaps.rate(); int oNChannels = oAudioCaps.channels(); this->m_resampleContext = swr_alloc_set_opts(this->m_resampleContext, oSampleLayout, oSampleFormat, oSampleRate, iSampleLayout, iSampleFormat, iSampleRate, 0, NULL); if (!this->m_resampleContext) return AkPacket(); // Create input audio frame. static AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); iFrame.format = iSampleFormat; iFrame.channels = iNChannels; iFrame.channel_layout = uint64_t(iSampleLayout); iFrame.sample_rate = iSampleRate; iFrame.nb_samples = iNSamples; iFrame.pts = iFrame.pkt_pts = packet.pts(); if (avcodec_fill_audio_frame(&iFrame, iFrame.channels, iSampleFormat, reinterpret_cast<const uint8_t *>(packet.buffer().constData()), packet.buffer().size(), 1) < 0) { return AkPacket(); } // Fill output audio frame. AVFrame oFrame; memset(&oFrame, 0, sizeof(AVFrame)); oFrame.format = oSampleFormat; oFrame.channels = oNChannels; oFrame.channel_layout = uint64_t(oSampleLayout); oFrame.sample_rate = oSampleRate; oFrame.nb_samples = int(swr_get_delay(this->m_resampleContext, oSampleRate)) + iFrame.nb_samples * oSampleRate / iSampleRate + 3; oFrame.pts = oFrame.pkt_pts = iFrame.pts * oSampleRate / iSampleRate; // Calculate the size of the audio buffer. int frameSize = av_samples_get_buffer_size(oFrame.linesize, oFrame.channels, oFrame.nb_samples, oSampleFormat, 1); QByteArray oBuffer(frameSize, Qt::Uninitialized); if (avcodec_fill_audio_frame(&oFrame, oFrame.channels, oSampleFormat, reinterpret_cast<const uint8_t *>(oBuffer.constData()), oBuffer.size(), 1) < 0) { return AkPacket(); } // convert to destination format if (swr_convert_frame(this->m_resampleContext, &oFrame, &iFrame) < 0) return AkPacket(); frameSize = av_samples_get_buffer_size(oFrame.linesize, oFrame.channels, oFrame.nb_samples, oSampleFormat, 1); oBuffer.resize(frameSize); AkAudioPacket oAudioPacket; oAudioPacket.caps() = oAudioCaps; oAudioPacket.caps().samples() = oFrame.nb_samples; oAudioPacket.buffer() = oBuffer; oAudioPacket.pts() = oFrame.pts; oAudioPacket.timeBase() = AkFrac(1, oAudioCaps.rate()); oAudioPacket.index() = packet.index(); oAudioPacket.id() = packet.id(); return oAudioPacket.toPacket(); }
QList<QbPacket> VideoStream::readPackets(AVPacket *packet) { QList<QbPacket> packets; if (!this->isValid()) return packets; AVFrame iFrame; avcodec_get_frame_defaults(&iFrame); int gotFrame; avcodec_decode_video2(this->codecContext(), &iFrame, &gotFrame, packet); if (!gotFrame) return packets; int frameSize = avpicture_get_size(this->codecContext()->pix_fmt, this->codecContext()->width, this->codecContext()->height); QSharedPointer<uchar> oBuffer(new uchar[frameSize]); if (!oBuffer) return packets; static bool sync; if (this->m_fst) { sync = av_frame_get_best_effort_timestamp(&iFrame)? false: true; this->m_pts = 0; this->m_duration = this->fps().invert().value() * this->timeBase().invert().value(); this->m_fst = false; } else this->m_pts += this->m_duration; avpicture_layout((AVPicture *) &iFrame, this->codecContext()->pix_fmt, this->codecContext()->width, this->codecContext()->height, (uint8_t *) oBuffer.data(), frameSize); QbCaps caps = this->caps(); caps.setProperty("sync", sync); QbPacket oPacket(caps, oBuffer, frameSize); oPacket.setPts(this->m_pts); oPacket.setDuration(this->m_duration); oPacket.setTimeBase(this->timeBase()); oPacket.setIndex(this->index()); packets << oPacket; return packets; }