void video_recording_state_t::write_audio_frame(uint8 *buffer) { AVCodecContext *c = audio_stream->codec; av_samples_fill_arrays(audio_frame->data, audio_frame->linesize, buffer, 2, 2048, AV_SAMPLE_FMT_S16, 0); uint16 *base = (uint16 *)audio_frame->data[0]; for (int i = 0; i < 4096; ++i) { *base = bswap_16(*base); ++base; } AVPacket pkt; int got_packet = 0; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; if (avcodec_encode_audio2(c, &pkt, audio_frame, &got_packet) < 0) { fprintf(stderr, "Error encoding an audio frame\n"); } if (got_packet) { pkt.pts = pkt.dts = audio_frame->pts++; pkt.stream_index = audio_stream->index; if (av_interleaved_write_frame(output_context, &pkt) < 0) { fprintf(stderr, "Error writing an audio frame\n"); } if (av_interleaved_write_frame(output_context, NULL) < 0) { fprintf(stderr, "Error flushing output buffer\n"); } } }
static int allocate_buffers(FLACContext *s) { int buf_size; buf_size = av_samples_get_buffer_size(NULL, s->channels, s->max_blocksize, AV_SAMPLE_FMT_S32P, 0); if (buf_size < 0) return buf_size; av_fast_malloc(&s->decoded_buffer, &s->decoded_buffer_size, buf_size); if (!s->decoded_buffer) return AVERROR(ENOMEM); return av_samples_fill_arrays((uint8_t **)s->decoded, NULL, s->decoded_buffer, s->channels, s->max_blocksize, AV_SAMPLE_FMT_S32P, 0); }
int av_asrc_buffer_add_buffer(AVFilterContext *ctx, uint8_t *buf, int buf_size, int sample_rate, int sample_fmt, int64_t channel_layout, int planar, int64_t pts, int av_unused flags) { uint8_t *data[8] = {0}; int linesize[8]; int nb_channels = av_get_channel_layout_nb_channels(channel_layout), nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt); av_samples_fill_arrays(data, linesize, buf, nb_channels, nb_samples, sample_fmt, 16); return av_asrc_buffer_add_samples(ctx, data, linesize, nb_samples, sample_rate, sample_fmt, channel_layout, planar, pts, flags); }
int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align) { uint8_t *buf; int size = av_samples_get_buffer_size(NULL, nb_channels, nb_samples, sample_fmt, align); if (size < 0) return size; buf = (uint8_t *)av_mallocz(size); if (!buf) return AVERROR(ENOMEM); size = av_samples_fill_arrays(audio_data, linesize, buf, nb_channels, nb_samples, sample_fmt, align); if (size < 0) { av_free(buf); return size; } return 0; }
/** @internal @This is called by avcodec when allocating a new audio buffer * Used with audio decoders. * @param context current avcodec context * @param frame avframe handler entering avcodec black magic box */ static int upipe_avcdec_get_buffer_audio(struct AVCodecContext *context, AVFrame *frame) { struct upipe *upipe = context->opaque; struct upipe_avcdec *upipe_avcdec = upipe_avcdec_from_upipe(upipe); struct ubuf *ubuf_samples; uint8_t *buf; int size; frame->opaque = uref_dup(upipe_avcdec->uref); /* direct rendering - allocate ubuf for audio */ if (upipe_avcdec->context->codec->capabilities & CODEC_CAP_DR1) { ubuf_samples = ubuf_block_alloc(upipe_avcdec->ubuf_mgr, av_samples_get_buffer_size(NULL, context->channels, frame->nb_samples, context->sample_fmt, 1)); if (likely(ubuf_samples)) { ubuf_block_write(ubuf_samples, 0, &size, &buf); uref_attach_ubuf(frame->opaque, ubuf_samples); av_samples_fill_arrays(frame->data, frame->linesize, buf, context->channels, frame->nb_samples, context->sample_fmt, 1); frame->extended_data = frame->data; frame->type = FF_BUFFER_TYPE_USER; return 1; /* success */ } else { upipe_dbg_va(upipe, "ubuf allocation failed, fallback"); } } /* default : DR failed or not available */ return avcodec_default_get_buffer(context, frame); }
int COMXAudioCodecOMX::GetData(BYTE** dst, double &dts, double &pts) { if (!m_bGotFrame) return 0; int inLineSize, outLineSize; /* input audio is aligned */ int inputSize = av_samples_get_buffer_size(&inLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_pCodecContext->sample_fmt, 0); /* output audio will be packed */ int outputSize = av_samples_get_buffer_size(&outLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1); if (!m_bNoConcatenate && m_iBufferOutputUsed && (int)m_frameSize != outputSize) { LOG_TRACE_2 << "COMXAudioCodecOMX::GetData Unexpected change of size (" << m_frameSize <<" ->" << outputSize << ")"; m_bNoConcatenate = true; } // if this buffer won't fit then flush out what we have int desired_size = AUDIO_DECODE_OUTPUT_BUFFER * (m_pCodecContext->channels * GetBitsPerSample()) >> (rounded_up_channels_shift[m_pCodecContext->channels] + 4); if (m_iBufferOutputUsed && (m_iBufferOutputUsed + outputSize > desired_size || m_bNoConcatenate)) { int ret = m_iBufferOutputUsed; m_iBufferOutputUsed = 0; m_bNoConcatenate = false; dts = m_dts; pts = m_pts; *dst = m_pBufferOutput; return ret; } m_frameSize = outputSize; if (m_iBufferOutputAlloced < m_iBufferOutputUsed + outputSize) { m_pBufferOutput = (BYTE*)av_realloc(m_pBufferOutput, m_iBufferOutputUsed + outputSize + FF_INPUT_BUFFER_PADDING_SIZE); m_iBufferOutputAlloced = m_iBufferOutputUsed + outputSize; } /* need to convert format */ if(m_pCodecContext->sample_fmt != m_desiredSampleFormat) { if(m_pConvert && (m_pCodecContext->sample_fmt != m_iSampleFormat || m_channels != m_pCodecContext->channels)) { swr_free(&m_pConvert); m_channels = m_pCodecContext->channels; } if(!m_pConvert) { m_iSampleFormat = m_pCodecContext->sample_fmt; m_pConvert = swr_alloc_set_opts(NULL, av_get_default_channel_layout(m_pCodecContext->channels), m_desiredSampleFormat, m_pCodecContext->sample_rate, av_get_default_channel_layout(m_pCodecContext->channels), m_pCodecContext->sample_fmt, m_pCodecContext->sample_rate, 0, NULL); if(!m_pConvert || swr_init(m_pConvert) < 0) { LOG_TRACE_2 << "COMXAudioCodecOMX::Decode - Unable to initialise convert format " << m_pCodecContext->sample_fmt << " to " << m_desiredSampleFormat; return 0; } } /* use unaligned flag to keep output packed */ uint8_t *out_planes[m_pCodecContext->channels]; if(av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 || swr_convert(m_pConvert, out_planes, m_pFrame1->nb_samples, (const uint8_t **)m_pFrame1->data, m_pFrame1->nb_samples) < 0) { LOG_TRACE_2 << "COMXAudioCodecOMX::Decode - Unable to convert format " << (int)m_pCodecContext->sample_fmt << " to " << m_desiredSampleFormat; outputSize = 0; } } else { /* copy to a contiguous buffer */ uint8_t *out_planes[m_pCodecContext->channels]; if (av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 || av_samples_copy(out_planes, m_pFrame1->data, 0, 0, m_pFrame1->nb_samples, m_pCodecContext->channels, m_desiredSampleFormat) < 0 ) { outputSize = 0; } } m_bGotFrame = false; if (m_bFirstFrame) { char log_buf[512]; sprintf(log_buf, "COMXAudioCodecOMX::GetData size=%d/%d line=%d/%d buf=%p, desired=%d", inputSize, outputSize, inLineSize, outLineSize, m_pBufferOutput, desired_size); LOG_TRACE_2 << log_buf; m_bFirstFrame = false; } m_iBufferOutputUsed += outputSize; return 0; }
QbPacket ConvertAudio::convert(const QbAudioPacket &packet, const QbCaps &oCaps) { QbAudioCaps oAudioCaps(oCaps); int64_t iSampleLayout = channelLayouts->value(packet.caps().layout(), 0); AVSampleFormat iSampleFormat = sampleFormats->value(packet.caps().format(), AV_SAMPLE_FMT_NONE); int iSampleRate = packet.caps().rate(); int iNChannels = packet.caps().channels(); int iNSamples = packet.caps().samples(); int64_t oSampleLayout = channelLayouts->value(oAudioCaps.layout(), AV_CH_LAYOUT_STEREO); AVSampleFormat oSampleFormat = sampleFormats->value(oAudioCaps.format(), AV_SAMPLE_FMT_FLT); int oSampleRate = oAudioCaps.rate(); int oNChannels = oAudioCaps.channels(); this->m_resampleContext = swr_alloc_set_opts(this->m_resampleContext, oSampleLayout, oSampleFormat, oSampleRate, iSampleLayout, iSampleFormat, iSampleRate, 0, NULL); if (!this->m_resampleContext) return QbPacket(); if (!swr_is_initialized(this->m_resampleContext)) if (swr_init(this->m_resampleContext) < 0) return QbPacket(); // Create input audio frame. static AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); if (av_samples_fill_arrays(iFrame.data, iFrame.linesize, (const uint8_t *) packet.buffer().data(), iNChannels, iNSamples, iSampleFormat, 1) < 0) return QbPacket(); iFrame.channels = iNChannels; iFrame.channel_layout = iSampleLayout; iFrame.format = iSampleFormat; iFrame.sample_rate = iSampleRate; iFrame.nb_samples = iNSamples; iFrame.pts = iFrame.pkt_pts = packet.pts(); // Create output audio packet. int oNSamples = swr_get_delay(this->m_resampleContext, oSampleRate) + iFrame.nb_samples * (int64_t) oSampleRate / iSampleRate + 3; int oLineSize; int oBufferSize = av_samples_get_buffer_size(&oLineSize, oNChannels, oNSamples, oSampleFormat, 1); QByteArray oBuffer(oBufferSize, Qt::Uninitialized); int oNPlanes = av_sample_fmt_is_planar(oSampleFormat)? oNChannels: 1; QVector<uint8_t *> oData(oNPlanes); if (av_samples_fill_arrays(&oData.data()[0], &oLineSize, (const uint8_t *) oBuffer.data(), oNChannels, oNSamples, oSampleFormat, 1) < 0) return QbPacket(); int64_t oPts = swr_next_pts(this->m_resampleContext, iFrame.pts); // convert to destination format int outputSamples = swr_convert(this->m_resampleContext, oData.data(), oNSamples, (const uint8_t **) iFrame.data, iFrame.nb_samples); if (outputSamples < 1) return QbPacket(); oBufferSize = oBufferSize * outputSamples / oNSamples; QbBufferPtr buffer(new char[oBufferSize]); memcpy(buffer.data(), oBuffer.data(), oBufferSize); QbAudioPacket oAudioPacket; oAudioPacket.caps() = oAudioCaps; oAudioPacket.caps().samples() = outputSamples; oAudioPacket.buffer() = buffer; oAudioPacket.bufferSize() = oBufferSize; oAudioPacket.pts() = oPts; oAudioPacket.timeBase() = QbFrac(1, oAudioCaps.rate()); oAudioPacket.index() = packet.index(); oAudioPacket.id() = packet.id(); return oAudioPacket.toPacket(); }
void ACapsConvertElement::iStream(const QbPacket &packet) { if (!packet.caps().isValid() || packet.caps().mimeType() != "audio/x-raw" || this->state() != ElementStatePlaying) return; // Input Format AVSampleFormat iSampleFormat = av_get_sample_fmt(packet.caps().property("format").toString().toStdString().c_str()); int iNChannels = packet.caps().property("channels").toInt(); int64_t iChannelLayout = av_get_channel_layout(packet.caps().property("layout").toString().toStdString().c_str()); int iNPlanes = av_sample_fmt_is_planar(iSampleFormat)? iNChannels: 1; int iSampleRate = packet.caps().property("rate").toInt(); int iNSamples = packet.caps().property("samples").toInt(); if (iNSamples < 1) iNSamples = 1024; bool sameMimeType = packet.caps().mimeType() == this->m_caps.mimeType(); // Output Format AVSampleFormat oSampleFormat = (sameMimeType && this->m_caps.dynamicPropertyNames().contains("format"))? av_get_sample_fmt(this->m_caps.property("format").toString().toStdString().c_str()): iSampleFormat; int oNChannels = (sameMimeType && this->m_caps.dynamicPropertyNames().contains("channels"))? this->m_caps.property("channels").toInt(): iNChannels; int64_t oChannelLayout = (sameMimeType && this->m_caps.dynamicPropertyNames().contains("layout"))? av_get_channel_layout(this->m_caps.property("layout").toString().toStdString().c_str()): iChannelLayout; int oSampleRate = (sameMimeType && this->m_caps.dynamicPropertyNames().contains("rate"))? this->m_caps.property("rate").toInt(): iSampleRate; QVector<uint8_t *> iData(iNPlanes); int iLineSize; if (av_samples_fill_arrays(&iData.data()[0], &iLineSize, (const uint8_t *) packet.buffer().data(), iNChannels, iNSamples, iSampleFormat, 1) < 0) return; QbCaps caps1(packet.caps()); QbCaps caps2(this->m_curInputCaps); caps1.setProperty("samples", QVariant()); caps2.setProperty("samples", QVariant()); if (caps1 != caps2) { // create resampler context this->m_resampleContext = SwrContextPtr(swr_alloc(), this->deleteSwrContext); if (!this->m_resampleContext) return; // set options av_opt_set_int(this->m_resampleContext.data(), "in_channel_layout", iChannelLayout, 0); av_opt_set_int(this->m_resampleContext.data(), "in_sample_rate", iSampleRate, 0); av_opt_set_sample_fmt(this->m_resampleContext.data(), "in_sample_fmt", iSampleFormat, 0); av_opt_set_int(this->m_resampleContext.data(), "out_channel_layout", oChannelLayout, 0); av_opt_set_int(this->m_resampleContext.data(), "out_sample_rate", oSampleRate, 0); av_opt_set_sample_fmt(this->m_resampleContext.data(), "out_sample_fmt", oSampleFormat, 0); // initialize the resampling context if (swr_init(this->m_resampleContext.data()) < 0) return; this->m_curInputCaps = packet.caps(); } // compute destination number of samples int oNSamples = av_rescale_rnd(swr_get_delay(this->m_resampleContext.data(), iSampleRate) + iNSamples, oSampleRate, iSampleRate, AV_ROUND_UP); // buffer is going to be directly written to a rawaudio file, no alignment int oNPlanes = av_sample_fmt_is_planar(oSampleFormat)? oNChannels: 1; QVector<uint8_t *> oData(oNPlanes); int oLineSize; int oBufferSize = av_samples_get_buffer_size(&oLineSize, oNChannels, oNSamples, oSampleFormat, 1); QSharedPointer<uchar> oBuffer(new uchar[oBufferSize]); if (!oBuffer) return; if (av_samples_fill_arrays(&oData.data()[0], &oLineSize, (const uint8_t *) oBuffer.data(), oNChannels, oNSamples, oSampleFormat, 1) < 0) return; // convert to destination format if (swr_convert(this->m_resampleContext.data(), oData.data(), oNSamples, (const uint8_t **) iData.data(), iNSamples) < 0) return; const char *format = av_get_sample_fmt_name(oSampleFormat); char layout[256]; av_get_channel_layout_string(layout, sizeof(layout), oNChannels, oChannelLayout); QString caps = QString("audio/x-raw," "format=%1," "channels=%2," "rate=%3," "layout=%4," "samples=%5").arg(format) .arg(oNChannels) .arg(oSampleRate) .arg(layout) .arg(oNSamples); QbPacket oPacket(caps, oBuffer, oBufferSize); oPacket.setPts(packet.pts()); oPacket.setDuration(packet.duration()); oPacket.setTimeBase(packet.timeBase()); oPacket.setIndex(packet.index()); emit this->oStream(oPacket); }
int COMXAudioCodecOMX::GetData(BYTE** dst) { if (!m_bGotFrame) return 0; int inLineSize, outLineSize; /* input audio is aligned */ int inputSize = av_samples_get_buffer_size(&inLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_pCodecContext->sample_fmt, 0); /* output audio will be packed */ int outputSize = av_samples_get_buffer_size(&outLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1); bool cont = !m_pFrame1->data[1] || (m_pFrame1->data[1] == m_pFrame1->data[0] + inLineSize && inLineSize == outLineSize && inLineSize * m_pCodecContext->channels == inputSize); if (m_iBufferOutputAlloced < outputSize) { av_free(m_pBufferOutput); m_pBufferOutput = (BYTE*)av_malloc(outputSize + FF_INPUT_BUFFER_PADDING_SIZE); m_iBufferOutputAlloced = outputSize; } *dst = m_pBufferOutput; /* need to convert format */ if(m_pCodecContext->sample_fmt != m_desiredSampleFormat) { if(m_pConvert && (m_pCodecContext->sample_fmt != m_iSampleFormat || m_channels != m_pCodecContext->channels)) { swr_free(&m_pConvert); m_channels = m_pCodecContext->channels; } if(!m_pConvert) { m_iSampleFormat = m_pCodecContext->sample_fmt; m_pConvert = swr_alloc_set_opts(NULL, av_get_default_channel_layout(m_pCodecContext->channels), m_desiredSampleFormat, m_pCodecContext->sample_rate, av_get_default_channel_layout(m_pCodecContext->channels), m_pCodecContext->sample_fmt, m_pCodecContext->sample_rate, 0, NULL); if(!m_pConvert || swr_init(m_pConvert) < 0) { CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to initialise convert format %d to %d", m_pCodecContext->sample_fmt, m_desiredSampleFormat); return 0; } } /* use unaligned flag to keep output packed */ uint8_t *out_planes[m_pCodecContext->channels]; if(av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 || swr_convert(m_pConvert, out_planes, m_pFrame1->nb_samples, (const uint8_t **)m_pFrame1->data, m_pFrame1->nb_samples) < 0) { CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to convert format %d to %d", (int)m_pCodecContext->sample_fmt, m_desiredSampleFormat); outputSize = 0; } } else { /* if it is already contiguous, just return decoded frame */ if (cont) { *dst = m_pFrame1->data[0]; } else { /* copy to a contiguous buffer */ uint8_t *out_planes[m_pCodecContext->channels]; if (av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 || av_samples_copy(out_planes, m_pFrame1->data, 0, 0, m_pFrame1->nb_samples, m_pCodecContext->channels, m_desiredSampleFormat) < 0 ) { outputSize = 0; } } } if (m_bFirstFrame) { CLog::Log(LOGDEBUG, "COMXAudioCodecOMX::GetData size=%d/%d line=%d/%d cont=%d buf=%p", inputSize, outputSize, inLineSize, outLineSize, cont, *dst); m_bFirstFrame = false; } return outputSize; }
int COMXAudioCodecOMX::GetData(BYTE** dst, double &dts, double &pts) { if (!m_bGotFrame) return 0; int inLineSize, outLineSize; /* input audio is aligned */ int inputSize = av_samples_get_buffer_size(&inLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_pCodecContext->sample_fmt, 0); /* output audio will be packed */ int outputSize = av_samples_get_buffer_size(&outLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1); if (m_iBufferOutputAlloced < m_iBufferOutputUsed + outputSize) { m_pBufferOutput = (BYTE*)av_realloc(m_pBufferOutput, m_iBufferOutputUsed + outputSize + FF_INPUT_BUFFER_PADDING_SIZE); m_iBufferOutputAlloced = m_iBufferOutputUsed + outputSize; } *dst = m_pBufferOutput; /* need to convert format */ if(m_pCodecContext->sample_fmt != m_desiredSampleFormat) { if(m_pConvert && (m_pCodecContext->sample_fmt != m_iSampleFormat || m_channels != m_pCodecContext->channels)) { swr_free(&m_pConvert); m_channels = m_pCodecContext->channels; } if(!m_pConvert) { m_iSampleFormat = m_pCodecContext->sample_fmt; m_pConvert = swr_alloc_set_opts(NULL, av_get_default_channel_layout(m_pCodecContext->channels), m_desiredSampleFormat, m_pCodecContext->sample_rate, av_get_default_channel_layout(m_pCodecContext->channels), m_pCodecContext->sample_fmt, m_pCodecContext->sample_rate, 0, NULL); if(!m_pConvert || swr_init(m_pConvert) < 0) { CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to initialise convert format %d to %d", m_pCodecContext->sample_fmt, m_desiredSampleFormat); return 0; } } /* use unaligned flag to keep output packed */ uint8_t *out_planes[m_pCodecContext->channels]; if(av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 || swr_convert(m_pConvert, out_planes, m_pFrame1->nb_samples, (const uint8_t **)m_pFrame1->data, m_pFrame1->nb_samples) < 0) { CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to convert format %d to %d", (int)m_pCodecContext->sample_fmt, m_desiredSampleFormat); outputSize = 0; } } else { /* copy to a contiguous buffer */ uint8_t *out_planes[m_pCodecContext->channels]; if (av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 || av_samples_copy(out_planes, m_pFrame1->data, 0, 0, m_pFrame1->nb_samples, m_pCodecContext->channels, m_desiredSampleFormat) < 0 ) { outputSize = 0; } } int desired_size = AUDIO_DECODE_OUTPUT_BUFFER * (m_pCodecContext->channels * GetBitsPerSample()) >> (rounded_up_channels_shift[m_pCodecContext->channels] + 4); if (m_bFirstFrame) { CLog::Log(LOGDEBUG, "COMXAudioCodecOMX::GetData size=%d/%d line=%d/%d buf=%p, desired=%d", inputSize, outputSize, inLineSize, outLineSize, *dst, desired_size); m_bFirstFrame = false; } m_iBufferOutputUsed += outputSize; if (!m_bNoConcatenate && m_pCodecContext->sample_fmt == AV_SAMPLE_FMT_FLTP && m_frameSize && (int)m_frameSize != outputSize) CLog::Log(LOGERROR, "COMXAudioCodecOMX::GetData Unexpected change of size (%d->%d)", m_frameSize, outputSize); m_frameSize = outputSize; // if next buffer submitted won't fit then flush it out if (m_iBufferOutputUsed + outputSize > desired_size || m_bNoConcatenate) { int ret = m_iBufferOutputUsed; m_bGotFrame = false; m_iBufferOutputUsed = 0; dts = m_dts; pts = m_pts; return ret; } return 0; }