QbPacket ConvertAudio::convert(const QbAudioPacket &packet, const QbCaps &oCaps) { QbAudioCaps oAudioCaps(oCaps); int64_t iSampleLayout = channelLayouts->value(packet.caps().layout(), 0); AVSampleFormat iSampleFormat = sampleFormats->value(packet.caps().format(), AV_SAMPLE_FMT_NONE); int iSampleRate = packet.caps().rate(); int iNChannels = packet.caps().channels(); int iNSamples = packet.caps().samples(); int64_t oSampleLayout = channelLayouts->value(oAudioCaps.layout(), AV_CH_LAYOUT_STEREO); AVSampleFormat oSampleFormat = sampleFormats->value(oAudioCaps.format(), AV_SAMPLE_FMT_FLT); int oSampleRate = oAudioCaps.rate(); int oNChannels = oAudioCaps.channels(); this->m_resampleContext = swr_alloc_set_opts(this->m_resampleContext, oSampleLayout, oSampleFormat, oSampleRate, iSampleLayout, iSampleFormat, iSampleRate, 0, NULL); if (!this->m_resampleContext) return QbPacket(); if (!swr_is_initialized(this->m_resampleContext)) if (swr_init(this->m_resampleContext) < 0) return QbPacket(); // Create input audio frame. static AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); if (av_samples_fill_arrays(iFrame.data, iFrame.linesize, (const uint8_t *) packet.buffer().data(), iNChannels, iNSamples, iSampleFormat, 1) < 0) return QbPacket(); iFrame.channels = iNChannels; iFrame.channel_layout = iSampleLayout; iFrame.format = iSampleFormat; iFrame.sample_rate = iSampleRate; iFrame.nb_samples = iNSamples; iFrame.pts = iFrame.pkt_pts = packet.pts(); // Create output audio packet. int oNSamples = swr_get_delay(this->m_resampleContext, oSampleRate) + iFrame.nb_samples * (int64_t) oSampleRate / iSampleRate + 3; int oLineSize; int oBufferSize = av_samples_get_buffer_size(&oLineSize, oNChannels, oNSamples, oSampleFormat, 1); QByteArray oBuffer(oBufferSize, Qt::Uninitialized); int oNPlanes = av_sample_fmt_is_planar(oSampleFormat)? oNChannels: 1; QVector<uint8_t *> oData(oNPlanes); if (av_samples_fill_arrays(&oData.data()[0], &oLineSize, (const uint8_t *) oBuffer.data(), oNChannels, oNSamples, oSampleFormat, 1) < 0) return QbPacket(); int64_t oPts = swr_next_pts(this->m_resampleContext, iFrame.pts); // convert to destination format int outputSamples = swr_convert(this->m_resampleContext, oData.data(), oNSamples, (const uint8_t **) iFrame.data, iFrame.nb_samples); if (outputSamples < 1) return QbPacket(); oBufferSize = oBufferSize * outputSamples / oNSamples; QbBufferPtr buffer(new char[oBufferSize]); memcpy(buffer.data(), oBuffer.data(), oBufferSize); QbAudioPacket oAudioPacket; oAudioPacket.caps() = oAudioCaps; oAudioPacket.caps().samples() = outputSamples; oAudioPacket.buffer() = buffer; oAudioPacket.bufferSize() = oBufferSize; oAudioPacket.pts() = oPts; oAudioPacket.timeBase() = QbFrac(1, oAudioCaps.rate()); oAudioPacket.index() = packet.index(); oAudioPacket.id() = packet.id(); return oAudioPacket.toPacket(); }
AkPacket ConvertAudioFFmpeg::convert(const AkAudioPacket &packet, const AkCaps &oCaps) { AkAudioCaps oAudioCaps(oCaps); int64_t iSampleLayout = channelLayouts->value(packet.caps().layout(), 0); AVSampleFormat iSampleFormat = av_get_sample_fmt(AkAudioCaps::sampleFormatToString(packet.caps().format()) .toStdString().c_str()); int iSampleRate = packet.caps().rate(); int iNChannels = packet.caps().channels(); int iNSamples = packet.caps().samples(); int64_t oSampleLayout = channelLayouts->value(oAudioCaps.layout(), AV_CH_LAYOUT_STEREO); AVSampleFormat oSampleFormat = av_get_sample_fmt(AkAudioCaps::sampleFormatToString(oAudioCaps.format()) .toStdString().c_str()); int oSampleRate = oAudioCaps.rate(); int oNChannels = oAudioCaps.channels(); this->m_resampleContext = swr_alloc_set_opts(this->m_resampleContext, oSampleLayout, oSampleFormat, oSampleRate, iSampleLayout, iSampleFormat, iSampleRate, 0, NULL); if (!this->m_resampleContext) return AkPacket(); // Create input audio frame. static AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); iFrame.format = iSampleFormat; iFrame.channels = iNChannels; iFrame.channel_layout = uint64_t(iSampleLayout); iFrame.sample_rate = iSampleRate; iFrame.nb_samples = iNSamples; iFrame.pts = iFrame.pkt_pts = packet.pts(); if (avcodec_fill_audio_frame(&iFrame, iFrame.channels, iSampleFormat, reinterpret_cast<const uint8_t *>(packet.buffer().constData()), packet.buffer().size(), 1) < 0) { return AkPacket(); } // Fill output audio frame. AVFrame oFrame; memset(&oFrame, 0, sizeof(AVFrame)); oFrame.format = oSampleFormat; oFrame.channels = oNChannels; oFrame.channel_layout = uint64_t(oSampleLayout); oFrame.sample_rate = oSampleRate; oFrame.nb_samples = int(swr_get_delay(this->m_resampleContext, oSampleRate)) + iFrame.nb_samples * oSampleRate / iSampleRate + 3; oFrame.pts = oFrame.pkt_pts = iFrame.pts * oSampleRate / iSampleRate; // Calculate the size of the audio buffer. int frameSize = av_samples_get_buffer_size(oFrame.linesize, oFrame.channels, oFrame.nb_samples, oSampleFormat, 1); QByteArray oBuffer(frameSize, Qt::Uninitialized); if (avcodec_fill_audio_frame(&oFrame, oFrame.channels, oSampleFormat, reinterpret_cast<const uint8_t *>(oBuffer.constData()), oBuffer.size(), 1) < 0) { return AkPacket(); } // convert to destination format if (swr_convert_frame(this->m_resampleContext, &oFrame, &iFrame) < 0) return AkPacket(); frameSize = av_samples_get_buffer_size(oFrame.linesize, oFrame.channels, oFrame.nb_samples, oSampleFormat, 1); oBuffer.resize(frameSize); AkAudioPacket oAudioPacket; oAudioPacket.caps() = oAudioCaps; oAudioPacket.caps().samples() = oFrame.nb_samples; oAudioPacket.buffer() = oBuffer; oAudioPacket.pts() = oFrame.pts; oAudioPacket.timeBase() = AkFrac(1, oAudioCaps.rate()); oAudioPacket.index() = packet.index(); oAudioPacket.id() = packet.id(); return oAudioPacket.toPacket(); }