Example #1
0
static struct mp_audio *play(struct af_instance *af, struct mp_audio *data)
{
    struct af_resample *s = af->priv;
    struct mp_audio *in   = data;
    struct mp_audio *out  = af->data;

    out->samples = avresample_available(s->avrctx) +
        av_rescale_rnd(get_delay(s) + in->samples,
                       s->ctx.out_rate, s->ctx.in_rate, AV_ROUND_UP);

    mp_audio_realloc_min(out, out->samples);

    af->delay = get_delay(s) / (double)s->ctx.in_rate;

#if !USE_SET_CHANNEL_MAPPING
    do_reorder(in, s->reorder_in);
#endif

    if (out->samples) {
        out->samples = avresample_convert(s->avrctx,
            (uint8_t **) out->planes, out->samples * out->sstride, out->samples,
            (uint8_t **) in->planes,  in->samples  * in->sstride,  in->samples);
        if (out->samples < 0)
            return NULL; // error
    }

    *data = *out;

#if USE_SET_CHANNEL_MAPPING
    if (needs_reorder(s->reorder_out, out->nch)) {
        if (af_fmt_is_planar(out->format)) {
            reorder_planes(data, s->reorder_out);
        } else {
            int out_size = out->samples * out->sstride;
            if (talloc_get_size(s->reorder_buffer) < out_size)
                s->reorder_buffer = talloc_realloc_size(s, s->reorder_buffer, out_size);
            data->planes[0] = s->reorder_buffer;
            int out_samples = avresample_convert(s->avrctx_out,
                    (uint8_t **) data->planes, out_size, out->samples,
                    (uint8_t **) out->planes, out_size, out->samples);
            assert(out_samples == data->samples);
        }
    }
#else
    do_reorder(data, s->reorder_out);
#endif

    return data;
}
Example #2
0
//resample - see http://ffmpeg.org/pipermail/libav-user/2012-June/002164.html
static int resample_audio(AudioOutputFile *audio_output_file, AVCodecContext *audio_codec_ctx, int *num_planes_out)
{
	int i, linesize;
	uint8_t **output;
	*num_planes_out = av_sample_fmt_is_planar(audio_output_file->codec->sample_fmts[0]) ? audio_output_file->codec_ctx->channels : 1;
	linesize = audio_output_file->codec_ctx->frame_size * av_get_bytes_per_sample(audio_output_file->codec->sample_fmts[0]) * audio_output_file->codec_ctx->channels / *num_planes_out;
	output = (uint8_t**)av_malloc(*num_planes_out*sizeof(uint8_t*));
	for (i=0; i<*num_planes_out; i++) {
		output[i] = (uint8_t*)av_malloc(linesize);
	}

	if (avresample_convert(audio_output_file->aresampler, output, linesize, audio_output_file->aframe->nb_samples, audio_output_file->aframe->extended_data, audio_output_file->aframe->linesize[0], audio_output_file->aframe->nb_samples) < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not resample audio frame. Aborting.\n"));
		return -1;
	}

	audio_output_file->aframe->extended_data = output;
	for (i=0; i<*num_planes_out; i++) {
		audio_output_file->aframe->linesize[i] = linesize;
	}
	audio_codec_ctx->channel_layout = audio_output_file->aframe->channel_layout;
	audio_codec_ctx->sample_fmt = audio_output_file->aframe->format;
	audio_codec_ctx->sample_rate = audio_output_file->aframe->sample_rate;
#ifndef GPAC_USE_LIBAV
	audio_codec_ctx->channels = audio_output_file->aframe->channels;
#endif

	return 0;
}
Example #3
0
static int opus_init_resample(OpusStreamContext *s)
{
    static const float delay[16] = { 0.0 };
    const uint8_t *delayptr[2] = { (uint8_t*)delay, (uint8_t*)delay };
    int ret;

#if CONFIG_SWRESAMPLE
    av_opt_set_int(s->swr, "in_sample_rate", s->silk_samplerate, 0);
    ret = swr_init(s->swr);
#elif CONFIG_AVRESAMPLE
    av_opt_set_int(s->avr, "in_sample_rate", s->silk_samplerate, 0);
    ret = avresample_open(s->avr);
#endif
    if (ret < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "Error opening the resampler.\n");
        return ret;
    }

#if CONFIG_SWRESAMPLE
    ret = swr_convert(s->swr,
                      NULL, 0,
                      delayptr, silk_resample_delay[s->packet.bandwidth]);
#elif CONFIG_AVRESAMPLE
    ret = avresample_convert(s->avr, NULL, 0, 0, delayptr, sizeof(delay),
                             silk_resample_delay[s->packet.bandwidth]);
#endif
    if (ret < 0) {
        av_log(s->avctx, AV_LOG_ERROR,
               "Error feeding initial silence to the resampler.\n");
        return ret;
    }

    return 0;
}
Example #4
0
static int request_frame(AVFilterLink *link)
{
    AVFilterContext *ctx = link->src;
    ASyncContext      *s = ctx->priv;
    int ret = 0;
    int nb_samples;

    s->got_output = 0;
    while (ret >= 0 && !s->got_output)
        ret = ff_request_frame(ctx->inputs[0]);

    /* flush the fifo */
    if (ret == AVERROR_EOF) {
        if (s->first_pts != AV_NOPTS_VALUE)
            handle_trimming(ctx);

        if (nb_samples = get_delay(s)) {
            AVFrame *buf = ff_get_audio_buffer(link, nb_samples);
            if (!buf)
                return AVERROR(ENOMEM);
            ret = avresample_convert(s->avr, buf->extended_data,
                                     buf->linesize[0], nb_samples, NULL, 0, 0);
            if (ret <= 0) {
                av_frame_free(&buf);
                return (ret < 0) ? ret : AVERROR_EOF;
            }

            buf->pts = s->pts;
            return ff_filter_frame(link, buf);
        }
    }

    return ret;
}
Example #5
0
static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    ResampleContext   *s = ctx->priv;
    int ret = avfilter_request_frame(ctx->inputs[0]);

    /* flush the lavr delay buffer */
    if (ret == AVERROR_EOF && s->avr) {
        AVFilterBufferRef *buf;
        int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr),
                                        outlink->sample_rate,
                                        ctx->inputs[0]->sample_rate,
                                        AV_ROUND_UP);

        if (!nb_samples)
            return ret;

        buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
        if (!buf)
            return AVERROR(ENOMEM);

        ret = avresample_convert(s->avr, (void**)buf->extended_data,
                                 buf->linesize[0], nb_samples,
                                 NULL, 0, 0);
        if (ret <= 0) {
            avfilter_unref_buffer(buf);
            return (ret == 0) ? AVERROR_EOF : ret;
        }

        buf->pts = s->next_pts;
        ff_filter_samples(outlink, buf);
        return 0;
    }
    return ret;
}
Example #6
0
HRESULT CMixer::Mixing(float* pOutput, WORD out_ch, DWORD out_layout, BYTE* pInput, int samples, WORD in_ch, DWORD in_layout, enum AVSampleFormat in_sf)
{
    if (in_layout == out_layout) {
        return E_ABORT;
    }
    if (in_sf < AV_SAMPLE_FMT_U8 || in_sf > AV_SAMPLE_FMT_DBL) {
        return E_INVALIDARG;
    }

    if (!m_pAVRCxt || in_layout != last_in_layout || out_layout != last_out_layout || in_sf != last_in_sf) {
        Init(out_layout, in_layout, in_sf);
    }

    if (!m_pAVRCxt) {
        return E_FAIL;
    }

    int ret = avresample_convert(m_pAVRCxt, (void**)&pOutput, samples * out_ch, samples, (void**)&pInput, samples * in_ch, samples);
    if (ret < 0) {
        TRACE(_T("Mixer: avresample_convert failed\n"));
        return E_FAIL;
    }

    return S_OK;
}
Example #7
0
static int write_to_fifo(ASyncContext *s, AVFrame *buf)
{
    int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
                                 buf->linesize[0], buf->nb_samples);
    av_frame_free(&buf);
    return ret;
}
Example #8
0
int resample_audio( AVAudioResampleContext *avr, audio_samples_t *out, audio_samples_t *in )
{
    /* Don't call this function over different block aligns. */
    uint8_t *out_orig = *out->data;
    int out_channels = get_channel_layout_nb_channels( out->channel_layout );
    int block_align = av_get_bytes_per_sample( out->sample_format ) * out_channels;
    int request_sample_count = out->sample_count;
    if( avresample_available( avr ) > 0 )
    {
        int resampled_count = avresample_read( avr, out->data, request_sample_count );
        if( resampled_count < 0 )
            return 0;
        request_sample_count -= resampled_count;
        *out->data += resampled_count * block_align;
    }
    uint8_t **in_data = in->sample_count > 0 ? in->data : NULL;
    int in_channels  = get_channel_layout_nb_channels( in->channel_layout );
    int in_linesize  = get_linesize( in_channels, in->sample_count, in->sample_format );
    int out_linesize = get_linesize( out_channels, request_sample_count, out->sample_format );
    int resampled_count = avresample_convert( avr, out->data, out_linesize, request_sample_count,
                                                     in_data,  in_linesize,     in->sample_count );
    if( resampled_count < 0 )
        return 0;
    *out->data += resampled_count * block_align;
    return *out->data - out_orig;
}
Example #9
0
/**
 * Convert the input audio samples into the output sample format.
 * The conversion happens on a per-frame basis, the size of which is specified
 * by frame_size.
 */
static int convert_samples(uint8_t **input_data,
                           uint8_t **converted_data, const int frame_size,
                           AVAudioResampleContext *resample_context)
{
    int error;

    /** Convert the samples using the resampler. */
    if ((error = avresample_convert(resample_context, converted_data, 0,
                                    frame_size, input_data, 0, frame_size)) < 0) {
        fprintf(stderr, "Could not convert input samples (error '%s')\n",
                get_error_text(error));
        return error;
    }

    /**
     * Perform a sanity check so that the number of converted samples is
     * not greater than the number of samples to be converted.
     * If the sample rates differ, this case has to be handled differently
     */
    if (avresample_available(resample_context)) {
        fprintf(stderr, "Converted samples left over\n");
        return AVERROR_EXIT;
    }

    return 0;
}
Example #10
0
static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    ResampleContext   *s = ctx->priv;
    int ret = 0;

    s->got_output = 0;
    while (ret >= 0 && !s->got_output)
        ret = ff_request_frame(ctx->inputs[0]);

    /* flush the lavr delay buffer */
    if (ret == AVERROR_EOF && s->avr) {
        AVFrame *frame;
        int nb_samples = avresample_get_out_samples(s->avr, 0);

        if (!nb_samples)
            return ret;

        frame = ff_get_audio_buffer(outlink, nb_samples);
        if (!frame)
            return AVERROR(ENOMEM);

        ret = avresample_convert(s->avr, frame->extended_data,
                                 frame->linesize[0], nb_samples,
                                 NULL, 0, 0);
        if (ret <= 0) {
            av_frame_free(&frame);
            return (ret == 0) ? AVERROR_EOF : ret;
        }

        frame->pts = s->next_pts;
        return ff_filter_frame(outlink, frame);
    }
    return ret;
}
Example #11
0
int CMixer::Mixing(float* pOutput, int out_samples, BYTE* pInput, int in_samples)
{
    int in_ch  = av_popcount(m_in_layout);
    int out_ch = av_popcount(m_out_layout);

    float* buf  = NULL;
    if (m_in_avsf != m_in_avsf_used) { // need convert
        buf = new float[in_samples * in_ch];
        convert_to_float(m_in_avsf, (WORD)in_ch, in_samples, pInput, buf); // convert to float
        pInput = (BYTE*)buf;
    }

    // int in_plane_size  = in_samples * (av_sample_fmt_is_planar(in_avsf) ? 1 : in_ch) * av_get_bytes_per_sample(in_avsf);
    int in_plane_size  = in_samples * in_ch * av_get_bytes_per_sample(m_in_avsf_used);
    int out_plane_size = out_samples * out_ch * sizeof(float);

    out_samples = avresample_convert(m_pAVRCxt, (uint8_t**)&pOutput, in_plane_size, out_samples, (uint8_t**)&pInput, out_plane_size, in_samples);
    if (buf) {
        delete [] buf;
    }
    if (out_samples < 0) {
        TRACE(_T("Mixer: avresample_convert failed\n"));
        return 0;
    }

    return out_samples;
}
void AudioTransform::convert( const Frame& srcFrame, Frame& dstFrame )
{
	if( ! _isInit )
		_isInit = init( srcFrame, dstFrame );

	const unsigned char* srcData = srcFrame.getPtr();
	unsigned char* dstData = dstFrame.getPtr();

	int nbOutputSamplesPerChannel;
#ifdef AV_RESAMPLE_LIBRARY
	nbOutputSamplesPerChannel = avresample_convert( _audioConvertContext, (uint8_t**)&dstData, 0, dstFrame.getSize(), (uint8_t**)&srcData, 0, srcFrame.getSize() );
#else
	nbOutputSamplesPerChannel = swr_convert( _audioConvertContext, &dstData, dstFrame.getSize(), &srcData, srcFrame.getSize() );
#endif

	if( nbOutputSamplesPerChannel < 0 )
	{
		throw std::runtime_error( "unable to convert audio samples" );
	}

	int nbOutputSamples = nbOutputSamplesPerChannel * static_cast<const AudioFrame&>( dstFrame ).desc().getChannels();
	
	if( dstFrame.getSize() != nbOutputSamples )
		dstFrame.getBuffer().resize( nbOutputSamples, 0 );
	static_cast<AudioFrame&>( dstFrame ).setNbSamples( static_cast<const AudioFrame&>( srcFrame ).getNbSamples() );
}
Example #13
0
static struct mp_audio *play(struct af_instance *af, struct mp_audio *data)
{
    struct af_resample *s = af->priv;
    struct mp_audio *in   = data;
    struct mp_audio *out  = af->data;


    int in_size     = data->len;
    int in_samples  = in_size / (data->bps * data->nch);
    int out_samples = avresample_available(s->avrctx) +
        av_rescale_rnd(get_delay(s) + in_samples,
                       s->ctx.out_rate, s->ctx.in_rate, AV_ROUND_UP);
    int out_size    = out->bps * out_samples * out->nch;

    if (talloc_get_size(out->audio) < out_size)
        out->audio = talloc_realloc_size(out, out->audio, out_size);

    af->delay = out->bps * av_rescale_rnd(get_delay(s),
                                          s->ctx.out_rate, s->ctx.in_rate,
                                          AV_ROUND_UP);

#if !USE_SET_CHANNEL_MAPPING
    reorder_channels(data->audio, s->reorder_in, data->bps, data->nch, in_samples);
#endif

    out_samples = avresample_convert(s->avrctx,
            (uint8_t **) &out->audio, out_size, out_samples,
            (uint8_t **) &in->audio,  in_size,  in_samples);

    *data = *out;

#if USE_SET_CHANNEL_MAPPING
    if (needs_reorder(s->reorder_out, out->nch)) {
        if (talloc_get_size(s->reorder_buffer) < out_size)
            s->reorder_buffer = talloc_realloc_size(s, s->reorder_buffer, out_size);
        data->audio = s->reorder_buffer;
        out_samples = avresample_convert(s->avrctx_out,
                (uint8_t **) &data->audio, out_size, out_samples,
                (uint8_t **) &out->audio, out_size, out_samples);
    }
#else
    reorder_channels(data->audio, s->reorder_out, out->bps, out->nch, out_samples);
#endif

    data->len = out->bps * out_samples * out->nch;
    return data;
}
Example #14
0
void _ffmpegPostAudioFrame(struct GBAAVStream* stream, int32_t left, int32_t right) {
	struct FFmpegEncoder* encoder = (struct FFmpegEncoder*) stream;
	if (!encoder->context || !encoder->audioCodec) {
		return;
	}

	encoder->audioBuffer[encoder->currentAudioSample * 2] = left;
	encoder->audioBuffer[encoder->currentAudioSample * 2 + 1] = right;

	++encoder->currentAudioFrame;
	++encoder->currentAudioSample;

	if ((encoder->currentAudioSample * 4) < encoder->audioBufferSize) {
		return;
	}
	encoder->currentAudioSample = 0;

	int channelSize = 2 * av_get_bytes_per_sample(encoder->audio->sample_fmt);
	avresample_convert(encoder->resampleContext,
		0, 0, 0,
		(uint8_t**) &encoder->audioBuffer, 0, encoder->audioBufferSize / 4);
	if (avresample_available(encoder->resampleContext) < encoder->audioFrame->nb_samples) {
		return;
	}
#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_make_writable(encoder->audioFrame);
#endif
	avresample_read(encoder->resampleContext, encoder->audioFrame->data, encoder->postaudioBufferSize / channelSize);

	AVRational timeBase = { 1, PREFERRED_SAMPLE_RATE };
	encoder->audioFrame->pts = encoder->nextAudioPts;
	encoder->nextAudioPts = av_rescale_q(encoder->currentAudioFrame, timeBase, encoder->audioStream->time_base);

	AVPacket packet;
	av_init_packet(&packet);
	packet.data = 0;
	packet.size = 0;
	int gotData;
	avcodec_encode_audio2(encoder->audio, &packet, encoder->audioFrame, &gotData);
	if (gotData) {
		if (encoder->absf) {
			AVPacket tempPacket = packet;
			int success = av_bitstream_filter_filter(encoder->absf, encoder->audio, 0,
				&tempPacket.data, &tempPacket.size,
				packet.data, packet.size, 0);
			if (success > 0) {
#if LIBAVUTIL_VERSION_MAJOR >= 53
				tempPacket.buf = av_buffer_create(tempPacket.data, tempPacket.size, av_buffer_default_free, 0, 0);
#endif
				av_free_packet(&packet);
			}
			packet = tempPacket;
		}
		packet.stream_index = encoder->audioStream->index;
		av_interleaved_write_frame(encoder->context, &packet);
	}
	av_free_packet(&packet);
}
Example #15
0
static int resample_frame(struct AVAudioResampleContext *r,
                          struct mp_audio *out, struct mp_audio *in)
{
    return avresample_convert(r,
        out ? (uint8_t **)out->planes : NULL,
        out ? mp_audio_get_allocated_size(out) : 0,
        out ? out->samples : 0,
        in ? (uint8_t **)in->planes : NULL,
        in ? mp_audio_get_allocated_size(in) : 0,
        in ? in->samples : 0);
}
Example #16
0
/*
 * Resamples a frame if needed and pushes the audio data along
 * with a timestamp onto the framelist.
 */
static void
resampleframe(AVFrame *avframe)
{
	AVAudioResampleContext *avr;
	struct frame *frame;
	uint8_t *output;
	size_t outputlen;
	int linesize, nr, samples;

	if (avframe->format != out_sample_fmt ||
	    avframe->channel_layout != out_channel_layout ||
	    avframe->sample_rate != out_sample_rate) {
		avr = avresample_alloc_context();
		if (avr == NULL) {
			errx(1, "(%s:%d) avresample_alloc_context",
			    __FILE__, __LINE__);
		}
		av_opt_set_int(avr, "in_channel_layout",
		    avframe->channel_layout, 0);
		av_opt_set_int(avr, "out_channel_layout", out_channel_layout,
		    0);
		av_opt_set_int(avr, "in_sample_rate", avframe->sample_rate, 0);
		av_opt_set_int(avr, "out_sample_rate", out_sample_rate, 0);
		av_opt_set_int(avr, "in_sample_fmt", avframe->format, 0);
		av_opt_set_int(avr, "out_sample_fmt", out_sample_fmt, 0);
		nr = avresample_open(avr);
		if (nr < 0) {
			avresample_free(&avr);
			return;
		}
		outputlen = av_samples_get_buffer_size(&linesize, out_channels,
		    avframe->nb_samples, out_sample_fmt, 0);
		output = xmalloc(outputlen);
		samples = avresample_convert(avr, &output, linesize,
		    avframe->nb_samples, avframe->data, avframe->linesize[0],
		    avframe->nb_samples);
		outputlen = samples * out_channels *
		    av_get_bytes_per_sample(out_sample_fmt);
		avresample_close(avr);
		avresample_free(&avr);
	} else {
		outputlen = av_samples_get_buffer_size(NULL,
		    avfmt->streams[sti]->codec->channels,
		    avframe->nb_samples, avframe->format, 1);
		output = xmalloc(outputlen);
		memcpy(output, avframe->data[0], outputlen);
	}
	frame = xmalloc(sizeof(struct frame));
	frame->data = output;
	frame->size = outputlen;
	frame->pts = avframe->pkt_pts;
	flpush(frame);
}
Example #17
0
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
    AVFilterContext  *ctx = inlink->dst;
    ResampleContext    *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];

    if (s->avr) {
        AVFilterBufferRef *buf_out;
        int delay, nb_samples, ret;

        /* maximum possible samples lavr can output */
        delay      = avresample_get_delay(s->avr);
        nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay,
                                    outlink->sample_rate, inlink->sample_rate,
                                    AV_ROUND_UP);

        buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
        ret     = avresample_convert(s->avr, (void**)buf_out->extended_data,
                                     buf_out->linesize[0], nb_samples,
                                     (void**)buf->extended_data, buf->linesize[0],
                                     buf->audio->nb_samples);

        av_assert0(!avresample_available(s->avr));

        if (s->next_pts == AV_NOPTS_VALUE) {
            if (buf->pts == AV_NOPTS_VALUE) {
                av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
                       "assuming 0.\n");
                s->next_pts = 0;
            } else
                s->next_pts = av_rescale_q(buf->pts, inlink->time_base,
                                           outlink->time_base);
        }

        if (ret > 0) {
            buf_out->audio->nb_samples = ret;
            if (buf->pts != AV_NOPTS_VALUE) {
                buf_out->pts = av_rescale_q(buf->pts, inlink->time_base,
                                            outlink->time_base) -
                               av_rescale(delay, outlink->sample_rate,
                                          inlink->sample_rate);
            } else
                buf_out->pts = s->next_pts;

            s->next_pts = buf_out->pts + buf_out->audio->nb_samples;

            ff_filter_samples(outlink, buf_out);
        }
        avfilter_unref_buffer(buf);
    } else
        ff_filter_samples(outlink, buf);
}
Example #18
0
int swr_convert(
        AVAudioResampleContext *avr,
        uint8_t** output,
        int out_samples,
        const uint8_t** input,
        int in_samples)
{
    // FIXME: potential performance hit
    int out_plane_size = 0;
    int in_plane_size = 0;
    return avresample_convert(avr, output, out_plane_size, out_samples,
                              (uint8_t **)input, in_plane_size, in_samples);
}
Example #19
0
static int opus_flush_resample(OpusStreamContext *s, int nb_samples)
{
    int celt_size = av_audio_fifo_size(s->celt_delay);
    int ret, i;

#if CONFIG_SWRESAMPLE
    ret = swr_convert(s->swr,
                      (uint8_t**)s->out, nb_samples,
                      NULL, 0);
#elif CONFIG_AVRESAMPLE
    ret = avresample_convert(s->avr, (uint8_t**)s->out, s->out_size, nb_samples,
                             NULL, 0, 0);
#endif
    if (ret < 0)
        return ret;
    else if (ret != nb_samples) {
        av_log(s->avctx, AV_LOG_ERROR, "Wrong number of flushed samples: %d\n",
               ret);
        return AVERROR_BUG;
    }

    if (celt_size) {
        if (celt_size != nb_samples) {
            av_log(s->avctx, AV_LOG_ERROR, "Wrong number of CELT delay samples.\n");
            return AVERROR_BUG;
        }
        av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, nb_samples);
        for (i = 0; i < s->output_channels; i++) {
            s->fdsp->vector_fmac_scalar(s->out[i],
                                        s->celt_output[i], 1.0,
                                        nb_samples);
        }
    }

    if (s->redundancy_idx) {
        for (i = 0; i < s->output_channels; i++)
            opus_fade(s->out[i], s->out[i],
                      s->redundancy_output[i] + 120 + s->redundancy_idx,
                      ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
        s->redundancy_idx = 0;
    }

    s->out[0]   += nb_samples;
    s->out[1]   += nb_samples;
    s->out_size -= nb_samples * sizeof(float);

    return 0;
}
Example #20
0
//resample - see http://ffmpeg.org/pipermail/libav-user/2012-June/002164.html
static int resample_audio(AudioInputFile *audio_input_file, AudioInputData *audio_input_data, AVCodecContext *audio_codec_ctx, uint8_t ***output, int *num_planes_out, int num_channels, enum AVSampleFormat sample_format)
{
	int i;
	*num_planes_out = av_sample_fmt_is_planar(DC_AUDIO_SAMPLE_FORMAT) ? DC_AUDIO_NUM_CHANNELS : 1;
	*output = (uint8_t**)av_malloc(*num_planes_out*sizeof(uint8_t*));
	for (i=0; i<*num_planes_out; i++) {
		*output[i] = (uint8_t*)av_malloc(DC_AUDIO_MAX_CHUNCK_SIZE); //FIXME: fix using size below av_samples_get_buffer_size()
	}

	if (avresample_convert(audio_input_file->aresampler, *output, DC_AUDIO_MAX_CHUNCK_SIZE, audio_input_data->aframe->nb_samples, audio_input_data->aframe->extended_data, audio_input_data->aframe->linesize[0], audio_input_data->aframe->nb_samples) < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Could not resample audio frame. Aborting.\n"));
		return -1;
	}

	return 0;
}
Example #21
0
AudioBufferPtr AudioDecoderThread::resampleAudio(char* pDecodedData, int framesDecoded,
        int currentSampleFormat)
{
    if (!m_pResampleContext) {
#ifdef LIBAVRESAMPLE_VERSION
        m_pResampleContext = avresample_alloc_context();
        av_opt_set_int(m_pResampleContext, "in_channel_layout",
                av_get_default_channel_layout(m_pStream->codec->channels), 0);
        av_opt_set_int(m_pResampleContext, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_rate", m_InputSampleRate, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_rate", m_AP.m_SampleRate, 0);
        av_opt_set_int(m_pResampleContext, "in_sample_fmt",
                (AVSampleFormat)currentSampleFormat, 0);
        av_opt_set_int(m_pResampleContext, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        int err = avresample_open(m_pResampleContext);
        AVG_ASSERT(err >= 0);
#else
        m_pResampleContext = av_audio_resample_init(m_AP.m_Channels, 
                m_pStream->codec->channels, m_AP.m_SampleRate, m_InputSampleRate,
                AV_SAMPLE_FMT_S16, (AVSampleFormat)currentSampleFormat, 16, 10, 0, 0.8);
#endif
        AVG_ASSERT(m_pResampleContext);
    }
#ifdef LIBAVRESAMPLE_VERSION
    uint8_t *pResampledData;
    int leftoverSamples = avresample_available(m_pResampleContext);
    int framesAvailable = leftoverSamples +
            av_rescale_rnd(avresample_get_delay(m_pResampleContext) +
                    framesDecoded, m_AP.m_SampleRate, m_InputSampleRate, AV_ROUND_UP);
    av_samples_alloc(&pResampledData, 0, 2, framesAvailable,
            AV_SAMPLE_FMT_S16, 0);
    int framesResampled = avresample_convert(m_pResampleContext, &pResampledData, 0, 
            framesAvailable, (uint8_t**)&pDecodedData, 0, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
    av_freep(&pResampledData);
#else
    short pResampledData[AVCODEC_MAX_AUDIO_FRAME_SIZE/2];
    int framesResampled = audio_resample(m_pResampleContext, pResampledData,
            (short*)pDecodedData, framesDecoded);
    AudioBufferPtr pBuffer(new AudioBuffer(framesResampled, m_AP));
    memcpy(pBuffer->getData(), pResampledData, 
            framesResampled*m_AP.m_Channels*sizeof(short));
#endif
    return pBuffer;
}
int
AudioResamplerFfmpeg::resample(boost::uint8_t** input, int plane_size,
    int samples, boost::uint8_t** output) {
#ifdef HAVE_SWRESAMPLE_H
    return swr_convert(_context,
        output, MAX_AUDIO_FRAME_SIZE,
        const_cast<const uint8_t**>(input), samples);
#elif HAVE_AVRESAMPLE_H
    return avresample_convert(_context,
        output, 0, MAX_AUDIO_FRAME_SIZE,
        input, plane_size, samples);
#else
    UNUSED( plane_size );
    return audio_resample(_context, reinterpret_cast<short*>(*output),
        reinterpret_cast<short*>(*input), samples); 
#endif
}
Example #23
0
void FFMS_AudioSource::ResampleAndCache(CacheIterator pos) {
	AudioBlock& block = *Cache.insert(pos, AudioBlock(CurrentSample, DecodeFrame->nb_samples));
	block.Data.reserve(DecodeFrame->nb_samples * BytesPerSample);

#ifdef WITH_AVRESAMPLE
	block.Data.resize(block.Data.capacity());

	uint8_t *OutPlanes[1] = { static_cast<uint8_t *>(&block.Data[0]) };
	avresample_convert(ResampleContext,
		OutPlanes, block.Data.size(), DecodeFrame->nb_samples,
		DecodeFrame->extended_data, DecodeFrame->nb_samples * av_get_bytes_per_sample(CodecContext->sample_fmt), DecodeFrame->nb_samples);
#else
	int width = av_get_bytes_per_sample(CodecContext->sample_fmt);
	uint8_t **Data = DecodeFrame->extended_data;

	for (int s = 0; s < DecodeFrame->nb_samples; ++s) {
		for (int c = 0; c < CodecContext->channels; ++c)
			block.Data.insert(block.Data.end(), &Data[c][s * width], &Data[c][(s + 1) * width]);
	}
#endif
}
void AudioConverter::feedDecoder(AudioConverterComplexInputDataProc dataProc, void* opaque, AVFrame* srcaudio)
{
	int gotFrame, err;
	
	do
	{
		// Read input
		if (!m_avpkt.size)
		{
			UInt32 numDataPackets = 0;
			OSStatus err = feedInput(dataProc, opaque, numDataPackets);

			if (err != noErr)
				throw err;

			if (!m_avpkt.size) // numDataPackets cannot be trusted
				break;
		}

		err = avcodec_decode_audio4(m_decoder, srcaudio, &gotFrame, &m_avpkt);
		if (err < 0)
			throw std::runtime_error("avcodec_decode_audio4() failed");

		m_avpkt.size -= err;
		m_avpkt.data += err;

		if (gotFrame)
		{
			if (!m_resampler)
				setupResampler(srcaudio);
			
			// Resample PCM
			err = avresample_convert(m_resampler, nullptr, 0, 0, srcaudio->data, 0, srcaudio->nb_samples);
			if (err < 0)
				throw std::runtime_error("avresample_convert() failed");
		}
	}
	while (!gotFrame);
}
Example #25
0
int CMixer::Mixing(float* pOutput, int out_samples, BYTE* pInput, int in_samples)
{
    int in_ch  = av_popcount(m_in_layout);
    int out_ch = av_popcount(m_out_layout);

    int in_plane_nb   = av_sample_fmt_is_planar(m_in_avsf) ? in_ch : 1;
    int in_plane_size = in_samples * (av_sample_fmt_is_planar(m_in_avsf) ? 1 : in_ch) * av_get_bytes_per_sample(m_in_avsf);
    static BYTE* ppInput[AVRESAMPLE_MAX_CHANNELS];
    for (int i = 0; i < in_plane_nb; i++) {
        ppInput[i] = pInput + i * in_plane_size;
    }

    int out_plane_size = out_samples * out_ch * sizeof(float);

    out_samples = avresample_convert(m_pAVRCxt, (uint8_t**)&pOutput, out_plane_size, out_samples, ppInput, in_plane_size, in_samples);
    if (out_samples < 0) {
        TRACE(_T("Mixer: avresample_convert failed\n"));
        return 0;
    }

    return out_samples;
}
Example #26
0
int swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
                const uint8_t **in , int in_count)
{
    return avresample_convert(s, out, 0, out_count, (uint8_t**)in, 0,in_count);
}
long audio_tutorial_resample(VideoState *is, struct AVFrame *inframe) {

#ifdef __RESAMPLER__

#ifdef __LIBAVRESAMPLE__

// There is pre 1.0 libavresample and then there is above..
#if LIBAVRESAMPLE_VERSION_MAJOR == 0
    void **resample_input_bytes = (void **)inframe->extended_data;
#else
    uint8_t **resample_input_bytes = (uint8_t **)inframe->extended_data;
#endif

#else
    uint8_t **resample_input_bytes = (uint8_t **)inframe->extended_data;
#endif


    int resample_nblen = 0;
    long resample_long_bytes = 0;

    if( is->pResampledOut == NULL || inframe->nb_samples > is->resample_size) {
        
#if __LIBAVRESAMPLE__
        is->resample_size = av_rescale_rnd(avresample_get_delay(is->pSwrCtx) +
                                           inframe->nb_samples,
                                           44100,
                                           44100,
                                           AV_ROUND_UP);
#else
        is->resample_size = av_rescale_rnd(swr_get_delay(is->pSwrCtx,
                                           44100) +
                                           inframe->nb_samples,
                                           44100,
                                           44100,
                                           AV_ROUND_UP);
#endif

        if(is->pResampledOut != NULL) {
            av_free(is->pResampledOut);
            is->pResampledOut = NULL;
        }

        av_samples_alloc(&is->pResampledOut, &is->resample_lines, 2, is->resample_size,
                         AV_SAMPLE_FMT_S16, 0);

    }


#ifdef __LIBAVRESAMPLE__

// OLD API (0.0.3) ... still NEW API (1.0.0 and above).. very frustrating..
// USED IN FFMPEG 1.0 (LibAV SOMETHING!). New in FFMPEG 1.1 and libav 9
#if LIBAVRESAMPLE_VERSION_INT <= 3
    // AVResample OLD
    resample_nblen = avresample_convert(is->pSwrCtx, (void **)&is->pResampledOut, 0,
                                        is->resample_size,
                                        (void **)resample_input_bytes, 0, inframe->nb_samples);
#else
    //AVResample NEW
    resample_nblen = avresample_convert(is->pSwrCtx, (uint8_t **)&is->pResampledOut,
                                        0, is->resample_size,
                                        (uint8_t **)resample_input_bytes, 0, inframe->nb_samples);
#endif

#else
    // SWResample
    resample_nblen = swr_convert(is->pSwrCtx, (uint8_t **)&is->pResampledOut,
                                 is->resample_size,
                                 (const uint8_t **)resample_input_bytes, inframe->nb_samples);
#endif

    resample_long_bytes = av_samples_get_buffer_size(NULL, 2, resample_nblen,
                         AV_SAMPLE_FMT_S16, 1);

    if (resample_nblen < 0) {
        fprintf(stderr, "reSample to another sample format failed!\n");
        return -1;
    }

    return resample_long_bytes;

#else
    return -1;
#endif
}
Example #28
0
static void
audio_process_audio(audio_decoder_t *ad, media_buf_t *mb)
{
  const audio_class_t *ac = ad->ad_ac;
  AVFrame *frame = ad->ad_frame;
  media_pipe_t *mp = ad->ad_mp;
  media_queue_t *mq = &mp->mp_audio;
  int r;
  int got_frame;

  if(mb->mb_skip || mb->mb_stream != mq->mq_stream) 
    return;

  while(mb->mb_size) {

    if(mb->mb_cw == NULL) {
      frame->sample_rate = mb->mb_rate;
      frame->format = AV_SAMPLE_FMT_S16;
      switch(mb->mb_channels) {
      case 1:
	frame->channel_layout = AV_CH_LAYOUT_MONO;
	frame->nb_samples = mb->mb_size / 2;
	break;
      case 2:
	frame->channel_layout = AV_CH_LAYOUT_STEREO;
	frame->nb_samples = mb->mb_size / 4;
	break;
      default:
	abort();
      }
      frame->data[0] = mb->mb_data;
      frame->linesize[0] = 0;
      r = mb->mb_size;
      got_frame = 1;

    } else {

      media_codec_t *mc = mb->mb_cw;

      AVCodecContext *ctx = mc->ctx;

      if(mc->codec_id != ad->ad_in_codec_id) {
	AVCodec *codec = avcodec_find_decoder(mc->codec_id);
	TRACE(TRACE_DEBUG, "audio", "Codec changed to %s (0x%x)",
	      codec ? codec->name : "???", mc->codec_id);
	ad->ad_in_codec_id = mc->codec_id;
	ad->ad_in_sample_rate = 0;

	audio_cleanup_spdif_muxer(ad);

	ad->ad_mode = ac->ac_get_mode != NULL ?
	  ac->ac_get_mode(ad, mc->codec_id,
			  ctx ? ctx->extradata : NULL,
			  ctx ? ctx->extradata_size : 0) : AUDIO_MODE_PCM;

	if(ad->ad_mode == AUDIO_MODE_SPDIF) {
	  audio_setup_spdif_muxer(ad, codec, mq);
	} else if(ad->ad_mode == AUDIO_MODE_CODED) {
	  
	  hts_mutex_lock(&mp->mp_mutex);
	  
	  ac->ac_deliver_coded_locked(ad, mb->mb_data, mb->mb_size,
				      mb->mb_pts, mb->mb_epoch);
	  hts_mutex_unlock(&mp->mp_mutex);
	  return;
	}
      }

      if(ad->ad_spdif_muxer != NULL) {
	mb->mb_pkt.stream_index = 0;
	ad->ad_pts = mb->mb_pts;
	ad->ad_epoch = mb->mb_epoch;

	mb->mb_pts = AV_NOPTS_VALUE;
	mb->mb_dts = AV_NOPTS_VALUE;
	av_write_frame(ad->ad_spdif_muxer, &mb->mb_pkt);
	avio_flush(ad->ad_spdif_muxer->pb);
	return;
      }


      if(ad->ad_mode == AUDIO_MODE_CODED) {
	ad->ad_pts = mb->mb_pts;
	ad->ad_epoch = mb->mb_epoch;
	

      }


      if(ctx == NULL) {

	AVCodec *codec = avcodec_find_decoder(mc->codec_id);
	assert(codec != NULL); // Checked in libav.c

	ctx = mc->ctx = avcodec_alloc_context3(codec);

	if(ad->ad_stereo_downmix)
          ctx->request_channel_layout = AV_CH_LAYOUT_STEREO;

	if(avcodec_open2(mc->ctx, codec, NULL) < 0) {
	  av_freep(&mc->ctx);
	  return;
	}
      }

      r = avcodec_decode_audio4(ctx, frame, &got_frame, &mb->mb_pkt);
      if(r < 0)
	return;

      if(frame->sample_rate == 0) {
	frame->sample_rate = ctx->sample_rate;

	if(frame->sample_rate == 0 && mb->mb_cw->fmt_ctx)
	  frame->sample_rate = mb->mb_cw->fmt_ctx->sample_rate;

	if(frame->sample_rate == 0) {

          if(!ad->ad_sample_rate_fail) {
            ad->ad_sample_rate_fail = 1;
            TRACE(TRACE_ERROR, "Audio",
                  "Unable to determine sample rate");
          }
	  return;
        }
      }

      if(frame->channel_layout == 0) {
        frame->channel_layout = av_get_default_channel_layout(ctx->channels);
        if(frame->channel_layout == 0) {

          if(!ad->ad_channel_layout_fail) {
            ad->ad_channel_layout_fail = 1;
              TRACE(TRACE_ERROR, "Audio",
                    "Unable to map %d channels to channel layout");
          }
	  return;
	}
      }

      if(mp->mp_stats)
	mp_set_mq_meta(mq, ctx->codec, ctx);

    }

    if(mb->mb_pts != PTS_UNSET) {

      int od = 0, id = 0;

      if(ad->ad_avr != NULL) {
	od = avresample_available(ad->ad_avr) *
	  1000000LL / ad->ad_out_sample_rate;
	id = avresample_get_delay(ad->ad_avr) *
	  1000000LL / frame->sample_rate;
      }
      ad->ad_pts = mb->mb_pts - od - id;
      ad->ad_epoch = mb->mb_epoch;

      if(mb->mb_drive_clock)
	mp_set_current_time(mp, mb->mb_pts - ad->ad_delay,
			    mb->mb_epoch, mb->mb_delta);
      mb->mb_pts = PTS_UNSET; // No longer valid
    }


    mb->mb_data += r;
    mb->mb_size -= r;

    if(got_frame) {

      if(frame->sample_rate    != ad->ad_in_sample_rate ||
	 frame->format         != ad->ad_in_sample_format ||
	 frame->channel_layout != ad->ad_in_channel_layout ||
	 ad->ad_want_reconfig) {

	ad->ad_want_reconfig = 0;
	ad->ad_in_sample_rate    = frame->sample_rate;
	ad->ad_in_sample_format  = frame->format;
	ad->ad_in_channel_layout = frame->channel_layout;

	ac->ac_reconfig(ad);

	if(ad->ad_avr == NULL)
	  ad->ad_avr = avresample_alloc_context();
	else
	  avresample_close(ad->ad_avr);

	av_opt_set_int(ad->ad_avr, "in_sample_fmt",
		       ad->ad_in_sample_format, 0);
	av_opt_set_int(ad->ad_avr, "in_sample_rate", 
		       ad->ad_in_sample_rate, 0);
	av_opt_set_int(ad->ad_avr, "in_channel_layout",
		       ad->ad_in_channel_layout, 0);

	av_opt_set_int(ad->ad_avr, "out_sample_fmt",
		       ad->ad_out_sample_format, 0);
	av_opt_set_int(ad->ad_avr, "out_sample_rate",
		       ad->ad_out_sample_rate, 0);
	av_opt_set_int(ad->ad_avr, "out_channel_layout",
		       ad->ad_out_channel_layout, 0);

	char buf1[128];
	char buf2[128];

	av_get_channel_layout_string(buf1, sizeof(buf1), 
				     -1, ad->ad_in_channel_layout);
	av_get_channel_layout_string(buf2, sizeof(buf2), 
				     -1, ad->ad_out_channel_layout);

	TRACE(TRACE_DEBUG, "Audio",
	      "Converting from [%s %dHz %s] to [%s %dHz %s]",
	      buf1, ad->ad_in_sample_rate,
	      av_get_sample_fmt_name(ad->ad_in_sample_format),
	      buf2, ad->ad_out_sample_rate,
	      av_get_sample_fmt_name(ad->ad_out_sample_format));

	if(avresample_open(ad->ad_avr)) {
	  TRACE(TRACE_ERROR, "Audio", "Unable to open resampler");
	  avresample_free(&ad->ad_avr);
	}

        prop_set(mp->mp_prop_ctrl, "canAdjustVolume", PROP_SET_INT, 1);

	if(ac->ac_set_volume != NULL)
	  ac->ac_set_volume(ad, ad->ad_vol_scale);

      }
      if(ad->ad_avr != NULL) {
	avresample_convert(ad->ad_avr, NULL, 0, 0,
			   frame->data, frame->linesize[0],
			   frame->nb_samples);
      } else {
	int delay = 1000000LL * frame->nb_samples / frame->sample_rate;
	usleep(delay);
      }
    }
  }
}
Example #29
0
bool FeMedia::onGetData( Chunk &data )
{
	int offset=0;

	data.samples = NULL;
	data.sampleCount = 0;

	if ( (!m_audio) || end_of_file() )
		return false;

	while ( offset < m_audio->codec_ctx->sample_rate )
	{
		AVPacket *packet = m_audio->pop_packet();
		while (( packet == NULL ) && ( !end_of_file() ))
		{
			read_packet();
			packet = m_audio->pop_packet();
		}

		if ( packet == NULL )
		{
			m_audio->at_end=true;
			if ( offset > 0 )
				return true;
			return false;
		}

#if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT( 53, 25, 0 ))
		{
			sf::Lock l( m_audio->buffer_mutex );

			int bsize = MAX_AUDIO_FRAME_SIZE;
			if ( avcodec_decode_audio3(
						m_audio->codec_ctx,
						(m_audio->buffer + offset),
						&bsize, packet) < 0 )
			{
				std::cerr << "Error decoding audio." << std::endl;
				FeBaseStream::free_packet( packet );
				return false;
			}
			else
			{
				offset += bsize / sizeof( sf::Int16 );
				data.sampleCount += bsize / sizeof(sf::Int16);
				data.samples = m_audio->buffer;
			}
		}
#else
 #if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 55, 45, 0 ))
		AVFrame *frame = av_frame_alloc();
		m_audio->codec_ctx->refcounted_frames = 1;
 #else
		AVFrame *frame = avcodec_alloc_frame();
 #endif
		//
		// TODO: avcodec_decode_audio4() can return multiple frames per packet depending on the codec.
		// We don't deal with this appropriately...
		//
		int got_frame( 0 );
		int len = avcodec_decode_audio4( m_audio->codec_ctx, frame, &got_frame, packet );
		if ( len < 0 )
		{
#ifdef FE_DEBUG
			char buff[256];
			av_strerror( len, buff, 256 );
			std::cerr << "Error decoding audio: " << buff << std::endl;
#endif
		}

		if ( got_frame )
		{
			int data_size = av_samples_get_buffer_size(
				NULL,
				m_audio->codec_ctx->channels,
				frame->nb_samples,
				m_audio->codec_ctx->sample_fmt, 1);

#ifdef DO_RESAMPLE
			if ( m_audio->codec_ctx->sample_fmt == AV_SAMPLE_FMT_S16 )
#endif
			{
				sf::Lock l( m_audio->buffer_mutex );

				memcpy( (m_audio->buffer + offset), frame->data[0], data_size );
				offset += data_size / sizeof( sf::Int16 );
				data.sampleCount += data_size / sizeof(sf::Int16);
				data.samples = m_audio->buffer;
			}
#ifdef DO_RESAMPLE
			else
			{
				sf::Lock l( m_audio->buffer_mutex );

				if ( !m_audio->resample_ctx )
				{
					m_audio->resample_ctx = resample_alloc();
					if ( !m_audio->resample_ctx )
					{
						std::cerr << "Error allocating audio format converter." << std::endl;
						FeBaseStream::free_packet( packet );
						FeBaseStream::free_frame( frame );
						return false;
					}

					int64_t channel_layout = frame->channel_layout;
					if ( !channel_layout )
					{
						channel_layout = av_get_default_channel_layout(
								m_audio->codec_ctx->channels );
					}

					av_opt_set_int( m_audio->resample_ctx, "in_channel_layout", channel_layout, 0 );
					av_opt_set_int( m_audio->resample_ctx, "in_sample_fmt", frame->format, 0 );
					av_opt_set_int( m_audio->resample_ctx, "in_sample_rate", frame->sample_rate, 0 );
					av_opt_set_int( m_audio->resample_ctx, "out_channel_layout", channel_layout, 0 );
					av_opt_set_int( m_audio->resample_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0 );
					av_opt_set_int( m_audio->resample_ctx, "out_sample_rate", frame->sample_rate, 0 );

#ifdef FE_DEBUG
					std::cout << "Initializing resampler: in_sample_fmt="
						<< av_get_sample_fmt_name( (AVSampleFormat)frame->format )
						<< ", in_sample_rate=" << frame->sample_rate
						<< ", out_sample_fmt=" << av_get_sample_fmt_name( AV_SAMPLE_FMT_S16 )
						<< ", out_sample_rate=" << frame->sample_rate << std::endl;
#endif
					if ( resample_init( m_audio->resample_ctx ) < 0 )
					{
						std::cerr << "Error initializing audio format converter, input format="
							<< av_get_sample_fmt_name( (AVSampleFormat)frame->format )
							<< ", input sample rate=" << frame->sample_rate << std::endl;
						FeBaseStream::free_packet( packet );
						FeBaseStream::free_frame( frame );
						resample_free( &m_audio->resample_ctx );
						m_audio->resample_ctx = NULL;
						return false;
					}
				}
				if ( m_audio->resample_ctx )
				{
					int out_linesize;
					av_samples_get_buffer_size(
						&out_linesize,
						m_audio->codec_ctx->channels,
						frame->nb_samples,
						AV_SAMPLE_FMT_S16, 0 );

					uint8_t *tmp_ptr = (uint8_t *)(m_audio->buffer + offset);

#ifdef USE_SWRESAMPLE
					int out_samples = swr_convert(
								m_audio->resample_ctx,
								&tmp_ptr,
								frame->nb_samples,
								(const uint8_t **)frame->data,
								frame->nb_samples );
#else // USE_AVRESAMPLE
					int out_samples = avresample_convert(
								m_audio->resample_ctx,
								&tmp_ptr,
								out_linesize,
								frame->nb_samples,
								frame->data,
								frame->linesize[0],
								frame->nb_samples );
#endif
					if ( out_samples < 0 )
					{
						std::cerr << "Error performing audio conversion." << std::endl;
						FeBaseStream::free_packet( packet );
						FeBaseStream::free_frame( frame );
						break;
					}
					offset += out_samples * m_audio->codec_ctx->channels;
					data.sampleCount += out_samples * m_audio->codec_ctx->channels;
					data.samples = m_audio->buffer;
				}
			}
#endif
		}
		FeBaseStream::free_frame( frame );

#endif

		FeBaseStream::free_packet( packet );
	}

	return true;
}
Example #30
0
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext  *ctx = inlink->dst;
    ResampleContext    *s = ctx->priv;
    AVFilterLink *outlink = ctx->outputs[0];
    int ret;

    if (s->avr) {
        AVFrame *out;
        int delay, nb_samples;

        /* maximum possible samples lavr can output */
        delay      = avresample_get_delay(s->avr);
        nb_samples = avresample_get_out_samples(s->avr, in->nb_samples);

        out = ff_get_audio_buffer(outlink, nb_samples);
        if (!out) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        ret = avresample_convert(s->avr, out->extended_data, out->linesize[0],
                                 nb_samples, in->extended_data, in->linesize[0],
                                 in->nb_samples);
        if (ret <= 0) {
            av_frame_free(&out);
            if (ret < 0)
                goto fail;
        }

        av_assert0(!avresample_available(s->avr));

        if (s->next_pts == AV_NOPTS_VALUE) {
            if (in->pts == AV_NOPTS_VALUE) {
                av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
                       "assuming 0.\n");
                s->next_pts = 0;
            } else
                s->next_pts = av_rescale_q(in->pts, inlink->time_base,
                                           outlink->time_base);
        }

        if (ret > 0) {
            out->nb_samples = ret;

            ret = av_frame_copy_props(out, in);
            if (ret < 0) {
                av_frame_free(&out);
                goto fail;
            }

            out->sample_rate = outlink->sample_rate;
            /* Only convert in->pts if there is a discontinuous jump.
               This ensures that out->pts tracks the number of samples actually
               output by the resampler in the absence of such a jump.
               Otherwise, the rounding in av_rescale_q() and av_rescale()
               causes off-by-1 errors. */
            if (in->pts != AV_NOPTS_VALUE && in->pts != s->next_in_pts) {
                out->pts = av_rescale_q(in->pts, inlink->time_base,
                                            outlink->time_base) -
                               av_rescale(delay, outlink->sample_rate,
                                          inlink->sample_rate);
            } else
                out->pts = s->next_pts;

            s->next_pts = out->pts + out->nb_samples;
            s->next_in_pts = in->pts + in->nb_samples;

            ret = ff_filter_frame(outlink, out);
            s->got_output = 1;
        }

fail:
        av_frame_free(&in);
    } else {
        in->format = outlink->format;
        ret = ff_filter_frame(outlink, in);
        s->got_output = 1;
    }

    return ret;
}