AkPacket AudioDeviceElement::iStream(const AkAudioPacket &packet) { this->m_mutex.lock(); if (this->state() != ElementStatePlaying) { this->m_mutex.unlock(); return AkPacket(); } auto device = this->m_device; this->m_mutex.unlock(); if (device == DUMMY_OUTPUT_DEVICE) QThread::usleep(ulong(1e6 * packet.caps().samples() / packet.caps().rate())); else { AkPacket iPacket; this->m_mutex.lock(); if (this->m_convert) iPacket = this->m_convert->iStream(packet.toPacket()); this->m_mutex.unlock(); if (iPacket) { this->m_mutexLib.lock(); this->m_audioDevice->write(iPacket); this->m_mutexLib.unlock(); } } return AkPacket(); }
AkPacket ConvertAudioFFmpeg::convert(const AkAudioPacket &packet, const AkCaps &oCaps) { AkAudioCaps oAudioCaps(oCaps); int64_t iSampleLayout = channelLayouts->value(packet.caps().layout(), 0); AVSampleFormat iSampleFormat = av_get_sample_fmt(AkAudioCaps::sampleFormatToString(packet.caps().format()) .toStdString().c_str()); int iSampleRate = packet.caps().rate(); int iNChannels = packet.caps().channels(); int iNSamples = packet.caps().samples(); int64_t oSampleLayout = channelLayouts->value(oAudioCaps.layout(), AV_CH_LAYOUT_STEREO); AVSampleFormat oSampleFormat = av_get_sample_fmt(AkAudioCaps::sampleFormatToString(oAudioCaps.format()) .toStdString().c_str()); int oSampleRate = oAudioCaps.rate(); int oNChannels = oAudioCaps.channels(); this->m_resampleContext = swr_alloc_set_opts(this->m_resampleContext, oSampleLayout, oSampleFormat, oSampleRate, iSampleLayout, iSampleFormat, iSampleRate, 0, NULL); if (!this->m_resampleContext) return AkPacket(); // Create input audio frame. static AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); iFrame.format = iSampleFormat; iFrame.channels = iNChannels; iFrame.channel_layout = uint64_t(iSampleLayout); iFrame.sample_rate = iSampleRate; iFrame.nb_samples = iNSamples; iFrame.pts = iFrame.pkt_pts = packet.pts(); if (avcodec_fill_audio_frame(&iFrame, iFrame.channels, iSampleFormat, reinterpret_cast<const uint8_t *>(packet.buffer().constData()), packet.buffer().size(), 1) < 0) { return AkPacket(); } // Fill output audio frame. AVFrame oFrame; memset(&oFrame, 0, sizeof(AVFrame)); oFrame.format = oSampleFormat; oFrame.channels = oNChannels; oFrame.channel_layout = uint64_t(oSampleLayout); oFrame.sample_rate = oSampleRate; oFrame.nb_samples = int(swr_get_delay(this->m_resampleContext, oSampleRate)) + iFrame.nb_samples * oSampleRate / iSampleRate + 3; oFrame.pts = oFrame.pkt_pts = iFrame.pts * oSampleRate / iSampleRate; // Calculate the size of the audio buffer. int frameSize = av_samples_get_buffer_size(oFrame.linesize, oFrame.channels, oFrame.nb_samples, oSampleFormat, 1); QByteArray oBuffer(frameSize, Qt::Uninitialized); if (avcodec_fill_audio_frame(&oFrame, oFrame.channels, oSampleFormat, reinterpret_cast<const uint8_t *>(oBuffer.constData()), oBuffer.size(), 1) < 0) { return AkPacket(); } // convert to destination format if (swr_convert_frame(this->m_resampleContext, &oFrame, &iFrame) < 0) return AkPacket(); frameSize = av_samples_get_buffer_size(oFrame.linesize, oFrame.channels, oFrame.nb_samples, oSampleFormat, 1); oBuffer.resize(frameSize); AkAudioPacket oAudioPacket; oAudioPacket.caps() = oAudioCaps; oAudioPacket.caps().samples() = oFrame.nb_samples; oAudioPacket.buffer() = oBuffer; oAudioPacket.pts() = oFrame.pts; oAudioPacket.timeBase() = AkFrac(1, oAudioCaps.rate()); oAudioPacket.index() = packet.index(); oAudioPacket.id() = packet.id(); return oAudioPacket.toPacket(); }