AkPacket MultiplexElement::iStream(const AkPacket &packet) { if (this->m_inputIndex >= 0 && packet.index() != this->m_inputIndex) return AkPacket(); if (!this->m_caps.isEmpty() && !packet.caps().isCompatible(this->m_caps)) return AkPacket(); AkPacket oPacket(packet); if (this->m_outputIndex >= 0) oPacket.setIndex(this->m_outputIndex); akSend(oPacket) }
AkPacket AudioDeviceElement::iStream(const AkAudioPacket &packet) { this->m_mutex.lock(); if (this->state() != ElementStatePlaying) { this->m_mutex.unlock(); return AkPacket(); } auto device = this->m_device; this->m_mutex.unlock(); if (device == DUMMY_OUTPUT_DEVICE) QThread::usleep(ulong(1e6 * packet.caps().samples() / packet.caps().rate())); else { AkPacket iPacket; this->m_mutex.lock(); if (this->m_convert) iPacket = this->m_convert->iStream(packet.toPacket()); this->m_mutex.unlock(); if (iPacket) { this->m_mutexLib.lock(); this->m_audioDevice->write(iPacket); this->m_mutexLib.unlock(); } } return AkPacket(); }
AkPacket CaptureDShow::readFrame() { IBaseFilter *source = NULL; this->m_graph->FindFilterByName(SOURCE_FILTER_NAME, &source); if (source) { this->m_controlsMutex.lock(); QVariantMap imageControls = this->controlStatus(this->m_globalImageControls); this->m_controlsMutex.unlock(); if (this->m_localImageControls != imageControls) { QVariantMap controls = this->mapDiff(this->m_localImageControls, imageControls); this->setImageControls(source, controls); this->m_localImageControls = imageControls; } this->m_controlsMutex.lock(); QVariantMap cameraControls = this->controlStatus(this->m_globalCameraControls); this->m_controlsMutex.unlock(); if (this->m_localCameraControls != cameraControls) { QVariantMap controls = this->mapDiff(this->m_localCameraControls, cameraControls); this->setCameraControls(source, controls); this->m_localCameraControls = cameraControls; } source->Release(); } AM_MEDIA_TYPE mediaType; ZeroMemory(&mediaType, sizeof(AM_MEDIA_TYPE)); this->m_grabber->GetConnectedMediaType(&mediaType); AkCaps caps = this->capsFromMediaType(&mediaType); AkPacket packet; timeval timestamp; gettimeofday(×tamp, NULL); qint64 pts = qint64((timestamp.tv_sec + 1e-6 * timestamp.tv_usec) * this->m_timeBase.invert().value()); if (this->m_ioMethod != IoMethodDirectRead) { this->m_mutex.lock(); if (this->m_curBuffer.isEmpty()) this->m_waitCondition.wait(&this->m_mutex, 1000); if (!this->m_curBuffer.isEmpty()) { int bufferSize = this->m_curBuffer.size(); QByteArray oBuffer(bufferSize, Qt::Uninitialized); memcpy(oBuffer.data(), this->m_curBuffer.constData(), size_t(bufferSize)); packet = AkPacket(caps, oBuffer); packet.setPts(pts); packet.setTimeBase(this->m_timeBase); packet.setIndex(0); packet.setId(this->m_id); this->m_curBuffer.clear(); } this->m_mutex.unlock(); } else { long bufferSize; HRESULT hr = this->m_grabber->GetCurrentBuffer(&bufferSize, NULL); if (FAILED(hr)) return AkPacket(); QByteArray oBuffer(bufferSize, Qt::Uninitialized); hr = this->m_grabber->GetCurrentBuffer(&bufferSize, reinterpret_cast<long *>(oBuffer.data())); if (FAILED(hr)) return AkPacket(); packet = AkPacket(caps, oBuffer); packet.setPts(pts); packet.setTimeBase(this->m_timeBase); packet.setIndex(0); packet.setId(this->m_id); } return packet; }
AkPacket ConvertVideo::convert(const AkPacket &packet) { AkVideoPacket videoPacket(packet); // Convert input format. QString format = AkVideoCaps::pixelFormatToString(videoPacket.caps().format()); AVPixelFormat iFormat = av_get_pix_fmt(format.toStdString().c_str()); // Initialize rescaling context. this->m_scaleContext = sws_getCachedContext(this->m_scaleContext, videoPacket.caps().width(), videoPacket.caps().height(), iFormat, videoPacket.caps().width(), videoPacket.caps().height(), AV_PIX_FMT_BGRA, SWS_FAST_BILINEAR, NULL, NULL, NULL); if (!this->m_scaleContext) return AkPacket(); // Create iPicture. AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); if (av_image_fill_arrays((uint8_t **) iFrame.data, iFrame.linesize, (const uint8_t *) videoPacket.buffer().constData(), iFormat, videoPacket.caps().width(), videoPacket.caps().height(), 1) < 0) return AkPacket(); // Create oPicture int frameSize = av_image_get_buffer_size(AV_PIX_FMT_BGRA, videoPacket.caps().width(), videoPacket.caps().height(), 1); QByteArray oBuffer(frameSize, Qt::Uninitialized); AVFrame oFrame; memset(&oFrame, 0, sizeof(AVFrame)); if (av_image_fill_arrays((uint8_t **) oFrame.data, oFrame.linesize, (const uint8_t *) oBuffer.constData(), AV_PIX_FMT_BGRA, videoPacket.caps().width(), videoPacket.caps().height(), 1) < 0) return AkPacket(); // Convert picture format sws_scale(this->m_scaleContext, iFrame.data, iFrame.linesize, 0, videoPacket.caps().height(), oFrame.data, oFrame.linesize); // Create packet AkVideoPacket oPacket(packet); oPacket.caps().format() = AkVideoCaps::Format_bgra; oPacket.buffer() = oBuffer; return oPacket.toPacket(); }
AkPacket ConvertAudioFFmpeg::convert(const AkAudioPacket &packet, const AkCaps &oCaps) { AkAudioCaps oAudioCaps(oCaps); int64_t iSampleLayout = channelLayouts->value(packet.caps().layout(), 0); AVSampleFormat iSampleFormat = av_get_sample_fmt(AkAudioCaps::sampleFormatToString(packet.caps().format()) .toStdString().c_str()); int iSampleRate = packet.caps().rate(); int iNChannels = packet.caps().channels(); int iNSamples = packet.caps().samples(); int64_t oSampleLayout = channelLayouts->value(oAudioCaps.layout(), AV_CH_LAYOUT_STEREO); AVSampleFormat oSampleFormat = av_get_sample_fmt(AkAudioCaps::sampleFormatToString(oAudioCaps.format()) .toStdString().c_str()); int oSampleRate = oAudioCaps.rate(); int oNChannels = oAudioCaps.channels(); this->m_resampleContext = swr_alloc_set_opts(this->m_resampleContext, oSampleLayout, oSampleFormat, oSampleRate, iSampleLayout, iSampleFormat, iSampleRate, 0, NULL); if (!this->m_resampleContext) return AkPacket(); // Create input audio frame. static AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); iFrame.format = iSampleFormat; iFrame.channels = iNChannels; iFrame.channel_layout = uint64_t(iSampleLayout); iFrame.sample_rate = iSampleRate; iFrame.nb_samples = iNSamples; iFrame.pts = iFrame.pkt_pts = packet.pts(); if (avcodec_fill_audio_frame(&iFrame, iFrame.channels, iSampleFormat, reinterpret_cast<const uint8_t *>(packet.buffer().constData()), packet.buffer().size(), 1) < 0) { return AkPacket(); } // Fill output audio frame. AVFrame oFrame; memset(&oFrame, 0, sizeof(AVFrame)); oFrame.format = oSampleFormat; oFrame.channels = oNChannels; oFrame.channel_layout = uint64_t(oSampleLayout); oFrame.sample_rate = oSampleRate; oFrame.nb_samples = int(swr_get_delay(this->m_resampleContext, oSampleRate)) + iFrame.nb_samples * oSampleRate / iSampleRate + 3; oFrame.pts = oFrame.pkt_pts = iFrame.pts * oSampleRate / iSampleRate; // Calculate the size of the audio buffer. int frameSize = av_samples_get_buffer_size(oFrame.linesize, oFrame.channels, oFrame.nb_samples, oSampleFormat, 1); QByteArray oBuffer(frameSize, Qt::Uninitialized); if (avcodec_fill_audio_frame(&oFrame, oFrame.channels, oSampleFormat, reinterpret_cast<const uint8_t *>(oBuffer.constData()), oBuffer.size(), 1) < 0) { return AkPacket(); } // convert to destination format if (swr_convert_frame(this->m_resampleContext, &oFrame, &iFrame) < 0) return AkPacket(); frameSize = av_samples_get_buffer_size(oFrame.linesize, oFrame.channels, oFrame.nb_samples, oSampleFormat, 1); oBuffer.resize(frameSize); AkAudioPacket oAudioPacket; oAudioPacket.caps() = oAudioCaps; oAudioPacket.caps().samples() = oFrame.nb_samples; oAudioPacket.buffer() = oBuffer; oAudioPacket.pts() = oFrame.pts; oAudioPacket.timeBase() = AkFrac(1, oAudioCaps.rate()); oAudioPacket.index() = packet.index(); oAudioPacket.id() = packet.id(); return oAudioPacket.toPacket(); }