void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
{
    if (link->type == AVMEDIA_TYPE_VIDEO) {
        ff_tlog(ctx,
                "link[%p s:%dx%d fmt:%s %s->%s]%s",
                link, link->w, link->h,
                av_pix_fmt_descriptors[link->format].name,
                link->src ? link->src->filter->name : "",
                link->dst ? link->dst->filter->name : "",
                end ? "\n" : "");
    } else {
        char buf[128];
        av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);

        ff_tlog(ctx,
                "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
                link, (int)link->sample_rate, buf,
                av_get_sample_fmt_name(link->format),
                link->src ? link->src->filter->name : "",
                link->dst ? link->dst->filter->name : "",
                end ? "\n" : "");
    }
}
static int setup_mp3_audio_codec(AVFormatContext* out_fmt_ctx)
{
    AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_MP3);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    AVStream *out_stream = avformat_new_stream(out_fmt_ctx, codec);
    AVCodecContext *codecCtx = out_stream->codec;
    /* put sample parameters */
    codecCtx->bit_rate = 64000;

    /* check that the encoder supports s16 pcm input */
    codecCtx->sample_fmt = AV_SAMPLE_FMT_S16P;
    if (!check_sample_fmt(codec, codecCtx->sample_fmt)) {
        fprintf(stderr, "Encoder does not support sample format %s",
            av_get_sample_fmt_name(codecCtx->sample_fmt));
        exit(1);
    }

    /* select other audio parameters supported by the encoder */
    codecCtx->sample_rate = 44100;
    codecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
    codecCtx->channels = av_get_channel_layout_nb_channels(codecCtx->channel_layout);

    /* open it */
    if (avcodec_open2(codecCtx, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    out_stream->codec->codec_tag = 0;
    if (out_fmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

}
Esempio n. 3
0
void ff_audio_mix_set_func(AudioMix *am, enum AVSampleFormat fmt,
                           enum AVMixCoeffType coeff_type, int in_channels,
                           int out_channels, int ptr_align, int samples_align,
                           const char *descr, void *mix_func)
{
    if (fmt == am->fmt && coeff_type == am->coeff_type &&
        ( in_channels ==  am->in_matrix_channels ||  in_channels == 0) &&
        (out_channels == am->out_matrix_channels || out_channels == 0)) {
        char chan_str[16];
        am->mix           = mix_func;
        am->func_descr    = descr;
        am->ptr_align     = ptr_align;
        am->samples_align = samples_align;
        if (ptr_align == 1 && samples_align == 1) {
            am->mix_generic        = mix_func;
            am->func_descr_generic = descr;
        } else {
            am->has_optimized_func = 1;
        }
        if (in_channels) {
            if (out_channels)
                snprintf(chan_str, sizeof(chan_str), "[%d to %d] ",
                         in_channels, out_channels);
            else
                snprintf(chan_str, sizeof(chan_str), "[%d to any] ",
                         in_channels);
        } else if (out_channels) {
                snprintf(chan_str, sizeof(chan_str), "[any to %d] ",
                         out_channels);
        } else {
            snprintf(chan_str, sizeof(chan_str), "[any to any] ");
        }
        av_log(am->avr, AV_LOG_DEBUG, "audio_mix: found function: [fmt=%s] "
               "[c=%s] %s(%s)\n", av_get_sample_fmt_name(fmt),
               coeff_type_names[coeff_type], chan_str, descr);
    }
}
Esempio n. 4
0
DitherContext *ff_dither_alloc(AVAudioResampleContext *avr,
                               enum AVSampleFormat out_fmt,
                               enum AVSampleFormat in_fmt,
                               int channels, int sample_rate)
{
    AVLFG seed_gen;
    DitherContext *c;
    int ch;

    if (av_get_packed_sample_fmt(out_fmt) != AV_SAMPLE_FMT_S16 ||
            av_get_bytes_per_sample(in_fmt) <= 2) {
        av_log(avr, AV_LOG_ERROR, "dithering %s to %s is not supported\n",
               av_get_sample_fmt_name(in_fmt), av_get_sample_fmt_name(out_fmt));
        return NULL;
    }

    c = av_mallocz(sizeof(*c));
    if (!c)
        return NULL;

    if (avr->dither_method == AV_RESAMPLE_DITHER_TRIANGULAR_NS &&
            sample_rate != 48000 && sample_rate != 44100) {
        av_log(avr, AV_LOG_WARNING, "sample rate must be 48000 or 44100 Hz "
               "for triangular_ns dither. using triangular_hp instead.\n");
        avr->dither_method = AV_RESAMPLE_DITHER_TRIANGULAR_HP;
    }
    c->method = avr->dither_method;
    dither_init(&c->ddsp, c->method);

    if (c->method == AV_RESAMPLE_DITHER_TRIANGULAR_NS) {
        if (sample_rate == 48000) {
            c->ns_coef_b = ns_48_coef_b;
            c->ns_coef_a = ns_48_coef_a;
        } else {
            c->ns_coef_b = ns_44_coef_b;
            c->ns_coef_a = ns_44_coef_a;
        }
    }

    /* Either s16 or s16p output format is allowed, but s16p is used
       internally, so we need to use a temp buffer and interleave if the output
       format is s16 */
    if (out_fmt != AV_SAMPLE_FMT_S16P) {
        c->s16_data = ff_audio_data_alloc(channels, 1024, AV_SAMPLE_FMT_S16P,
                                          "dither s16 buffer");
        if (!c->s16_data)
            goto fail;

        c->ac_out = ff_audio_convert_alloc(avr, out_fmt, AV_SAMPLE_FMT_S16P,
                                           channels, sample_rate);
        if (!c->ac_out)
            goto fail;
    }

    if (in_fmt != AV_SAMPLE_FMT_FLTP) {
        c->flt_data = ff_audio_data_alloc(channels, 1024, AV_SAMPLE_FMT_FLTP,
                                          "dither flt buffer");
        if (!c->flt_data)
            goto fail;

        c->ac_in = ff_audio_convert_alloc(avr, AV_SAMPLE_FMT_FLTP, in_fmt,
                                          channels, sample_rate);
        if (!c->ac_in)
            goto fail;
    }

    c->state = av_mallocz(channels * sizeof(*c->state));
    if (!c->state)
        goto fail;
    c->channels = channels;

    /* calculate thresholds for turning off dithering during periods of
       silence to avoid replacing digital silence with quiet dither noise */
    c->mute_dither_threshold = lrintf(sample_rate * MUTE_THRESHOLD_SEC);
    c->mute_reset_threshold  = c->mute_dither_threshold * 4;

    /* initialize dither states */
    av_lfg_init(&seed_gen, 0xC0FFEE);
    for (ch = 0; ch < channels; ch++) {
        DitherState *state = &c->state[ch];
        state->mute = c->mute_reset_threshold + 1;
        state->seed = av_lfg_get(&seed_gen);
        generate_dither_noise(c, state, FFMAX(32768, sample_rate / 2));
    }

    return c;

fail:
    ff_dither_free(&c);
    return NULL;
}
Esempio n. 5
0
int decode_audio(AVCodecContext  *ctx, queue_t *qa)
{
    static struct SwrContext *swr_ctx;
    static int64_t src_layout;
    static int src_freq;
    static int src_channels;
    static enum AVSampleFormat src_fmt = -1;
    static AVFrame *aFrame;

    AVPacket   pkt;
    AVPacket    pkt_tmp;
    int64_t dec_channel_layout;
    int len, len2;
    int got_frame;
    int data_size;


    if( astream.count > 192000*2)
        return -1;

    if( get_packet(qa, &pkt) == 0 )
        return 0;

 //          __asm__("int3");

    if (!aFrame)
    {
        if (!(aFrame = avcodec_alloc_frame()))
            return -1;
    } else
        avcodec_get_frame_defaults(aFrame);

    pkt_tmp = pkt;

    while(pkt_tmp.size > 0)
    {
        data_size = 192000;

//        len = avcodec_decode_audio3(ctx,(int16_t*)decoder_buffer,
//                                   &data_size, &pkt_tmp);
        got_frame = 0;
        len = avcodec_decode_audio4(ctx, aFrame, &got_frame, &pkt_tmp);

        if(len >= 0 && got_frame)
        {
            char *samples;
            int ch, plane_size;
            int planar    = av_sample_fmt_is_planar(ctx->sample_fmt);
            int data_size = av_samples_get_buffer_size(&plane_size, ctx->channels,
                                                   aFrame->nb_samples,
                                                   ctx->sample_fmt, 1);

//            if(audio_base == -1.0)
//            {
//                if (pkt.pts != AV_NOPTS_VALUE)
//                    audio_base = get_audio_base() * pkt.pts;
//                printf("audio base %f\n", audio_base);
//            };

            pkt_tmp.data += len;
            pkt_tmp.size -= len;

            dec_channel_layout =
                (aFrame->channel_layout && aFrame->channels == av_get_channel_layout_nb_channels(aFrame->channel_layout)) ?
                aFrame->channel_layout : av_get_default_channel_layout(aFrame->channels);

            if (aFrame->format          != src_fmt     ||
                dec_channel_layout      != src_layout  ||
                aFrame->sample_rate     != src_freq    ||
                !swr_ctx)
            {
                swr_free(&swr_ctx);
                swr_ctx = swr_alloc_set_opts(NULL, AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16,
                                             aFrame->sample_rate, dec_channel_layout,aFrame->format,
                                             aFrame->sample_rate, 0, NULL);
                if (!swr_ctx || swr_init(swr_ctx) < 0)
                {
                    printf("Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
                        aFrame->sample_rate,   av_get_sample_fmt_name(aFrame->format), (int)aFrame->channels,
                        aFrame->sample_rate, av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 2);
                    break;
                }

                src_layout   = dec_channel_layout;
                src_channels = aFrame->channels;
                src_freq     = aFrame->sample_rate;
                src_fmt      = aFrame->format;
            };

            if (swr_ctx)
            {
                const uint8_t **in = (const uint8_t **)aFrame->extended_data;
                uint8_t *out[] = {decoder_buffer};
                int out_count = 192000 * 3 / 2 / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
                len2 = swr_convert(swr_ctx, out, out_count, in, aFrame->nb_samples);
                if (len2 < 0) {
                    printf("swr_convert() failed\n");
                    break;
                }
                if (len2 == out_count) {
                    printf("warning: audio buffer is probably too small\n");
                    swr_init(swr_ctx);
                }
                data_size = len2 * 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);

                mutex_lock(&astream.lock);

                samples = astream.buffer+astream.count;

                memcpy(samples, decoder_buffer, data_size);
/*
            memcpy(samples, aFrame->extended_data[0], plane_size);

            if (planar && ctx->channels > 1)
            {
                uint8_t *out = ((uint8_t *)samples) + plane_size;
                for (ch = 1; ch < ctx->channels; ch++)
                {
                    memcpy(out, aFrame->extended_data[ch], plane_size);
                    out += plane_size;
                }
            }
*/
                astream.count += data_size;
                mutex_unlock(&astream.lock);
            };
       }
       else pkt_tmp.size = 0;
    }
    av_free_packet(&pkt);
    return 1;
};
Esempio n. 6
0
AudioMix *ff_audio_mix_alloc(AVAudioResampleContext *avr)
{
    AudioMix *am;
    int ret;

    am = av_mallocz(sizeof(*am));
    if (!am)
        return NULL;
    am->avr = avr;

    if (avr->internal_sample_fmt != AV_SAMPLE_FMT_S16P &&
        avr->internal_sample_fmt != AV_SAMPLE_FMT_FLTP) {
        av_log(avr, AV_LOG_ERROR, "Unsupported internal format for "
               "mixing: %s\n",
               av_get_sample_fmt_name(avr->internal_sample_fmt));
        goto error;
    }

    am->fmt          = avr->internal_sample_fmt;
    am->coeff_type   = avr->mix_coeff_type;
    am->in_layout    = avr->in_channel_layout;
    am->out_layout   = avr->out_channel_layout;
    am->in_channels  = avr->in_channels;
    am->out_channels = avr->out_channels;

    /* build matrix if the user did not already set one */
    if (avr->mix_matrix) {
        ret = ff_audio_mix_set_matrix(am, avr->mix_matrix, avr->in_channels);
        if (ret < 0)
            goto error;
        av_freep(&avr->mix_matrix);
    } else {
        int i, j;
        char in_layout_name[128];
        char out_layout_name[128];
        double *matrix_dbl = av_mallocz(avr->out_channels * avr->in_channels *
                                        sizeof(*matrix_dbl));
        if (!matrix_dbl)
            goto error;

        ret = avresample_build_matrix(avr->in_channel_layout,
                                      avr->out_channel_layout,
                                      avr->center_mix_level,
                                      avr->surround_mix_level,
                                      avr->lfe_mix_level,
                                      avr->normalize_mix_level,
                                      matrix_dbl,
                                      avr->in_channels,
                                      avr->matrix_encoding);
        if (ret < 0) {
            av_free(matrix_dbl);
            goto error;
        }

        av_get_channel_layout_string(in_layout_name, sizeof(in_layout_name),
                                     avr->in_channels, avr->in_channel_layout);
        av_get_channel_layout_string(out_layout_name, sizeof(out_layout_name),
                                     avr->out_channels, avr->out_channel_layout);
        av_log(avr, AV_LOG_DEBUG, "audio_mix: %s to %s\n",
               in_layout_name, out_layout_name);
        for (i = 0; i < avr->out_channels; i++) {
            for (j = 0; j < avr->in_channels; j++) {
                av_log(avr, AV_LOG_DEBUG, "  %0.3f ",
                       matrix_dbl[i * avr->in_channels + j]);
            }
            av_log(avr, AV_LOG_DEBUG, "\n");
        }

        ret = ff_audio_mix_set_matrix(am, matrix_dbl, avr->in_channels);
        if (ret < 0) {
            av_free(matrix_dbl);
            goto error;
        }
        av_free(matrix_dbl);
    }

    return am;

error:
    av_free(am);
    return NULL;
}
Esempio n. 7
0
AudioMix *ff_audio_mix_alloc(AVAudioResampleContext *avr)
{
    AudioMix *am;
    int ret;

    am = av_mallocz(sizeof(*am));
    if (!am)
        return NULL;
    am->avr = avr;

    if (avr->internal_sample_fmt != AV_SAMPLE_FMT_S16P &&
        avr->internal_sample_fmt != AV_SAMPLE_FMT_FLTP) {
        av_log(avr, AV_LOG_ERROR, "Unsupported internal format for "
               "mixing: %s\n",
               av_get_sample_fmt_name(avr->internal_sample_fmt));
        goto error;
    }

    am->fmt          = avr->internal_sample_fmt;
    am->coeff_type   = avr->mix_coeff_type;
    am->in_layout    = avr->in_channel_layout;
    am->out_layout   = avr->out_channel_layout;
    am->in_channels  = avr->in_channels;
    am->out_channels = avr->out_channels;

    /* build matrix if the user did not already set one */
    if (avr->mix_matrix) {
        ret = ff_audio_mix_set_matrix(am, avr->mix_matrix, avr->in_channels);
        if (ret < 0)
            goto error;
        av_freep(&avr->mix_matrix);
    } else {
        double *matrix_dbl = av_mallocz(avr->out_channels * avr->in_channels *
                                        sizeof(*matrix_dbl));
        if (!matrix_dbl)
            goto error;

        ret = avresample_build_matrix(avr->in_channel_layout,
                                      avr->out_channel_layout,
                                      avr->center_mix_level,
                                      avr->surround_mix_level,
                                      avr->lfe_mix_level,
                                      avr->normalize_mix_level,
                                      matrix_dbl,
                                      avr->in_channels,
                                      avr->matrix_encoding);
        if (ret < 0) {
            av_free(matrix_dbl);
            goto error;
        }

        ret = ff_audio_mix_set_matrix(am, matrix_dbl, avr->in_channels);
        if (ret < 0) {
            av_log(avr, AV_LOG_ERROR, "error setting mix matrix\n");
            av_free(matrix_dbl);
            goto error;
        }

        av_free(matrix_dbl);
    }

    return am;

error:
    av_free(am);
    return NULL;
}
Esempio n. 8
0
int swr_init(SwrContext *s){
    s->in_buffer_index= 0;
    s->in_buffer_count= 0;
    s->resample_in_constraint= 0;
    free_temp(&s->postin);
    free_temp(&s->midbuf);
    free_temp(&s->preout);
    free_temp(&s->in_buffer);
    swr_audio_convert_free(&s-> in_convert);
    swr_audio_convert_free(&s->out_convert);
    swr_audio_convert_free(&s->full_convert);

    s-> in.planar= s-> in_sample_fmt >= 0x100;
    s->out.planar= s->out_sample_fmt >= 0x100;
    s-> in_sample_fmt &= 0xFF;
    s->out_sample_fmt &= 0xFF;

    if(s-> in_sample_fmt >= AV_SAMPLE_FMT_NB){
        av_log(s, AV_LOG_ERROR, "Requested sample format %s is invalid\n", av_get_sample_fmt_name(s->in_sample_fmt));
        return AVERROR(EINVAL);
    }
    if(s->out_sample_fmt >= AV_SAMPLE_FMT_NB){
        av_log(s, AV_LOG_ERROR, "Requested sample format %s is invalid\n", av_get_sample_fmt_name(s->out_sample_fmt));
        return AVERROR(EINVAL);
    }

    if(   s->int_sample_fmt != AV_SAMPLE_FMT_S16
        &&s->int_sample_fmt != AV_SAMPLE_FMT_FLT){
        av_log(s, AV_LOG_ERROR, "Requested sample format %s is not supported internally, only float & S16 is supported\n", av_get_sample_fmt_name(s->int_sample_fmt));
        return AVERROR(EINVAL);
    }

    //FIXME should we allow/support using FLT on material that doesnt need it ?
    if(s->in_sample_fmt <= AV_SAMPLE_FMT_S16 || s->int_sample_fmt==AV_SAMPLE_FMT_S16){
        s->int_sample_fmt= AV_SAMPLE_FMT_S16;
    }else
        s->int_sample_fmt= AV_SAMPLE_FMT_FLT;


    if (s->out_sample_rate!=s->in_sample_rate || (s->flags & SWR_FLAG_RESAMPLE)){
        s->resample = swr_resample_init(s->resample, s->out_sample_rate, s->in_sample_rate, 16, 10, 0, 0.8);
    }else
        swr_resample_free(&s->resample);
    if(s->int_sample_fmt != AV_SAMPLE_FMT_S16 && s->resample){
        av_log(s, AV_LOG_ERROR, "Resampling only supported with internal s16 currently\n"); //FIXME
        return -1;
    }

    if(s-> in.ch_count && s-> in_ch_layout && s->in.ch_count != av_get_channel_layout_nb_channels(s-> in_ch_layout)){
        av_log(s, AV_LOG_WARNING, "Input channel layout has a different number of channels than there actually is, ignoring layout\n");
        s-> in_ch_layout= 0;
    }

    if(!s-> in_ch_layout)
        s-> in_ch_layout= av_get_default_channel_layout(s->in.ch_count);
    if(!s->out_ch_layout)
        s->out_ch_layout= av_get_default_channel_layout(s->out.ch_count);

    s->rematrix= s->out_ch_layout  !=s->in_ch_layout;

#define RSC 1 //FIXME finetune
    if(!s-> in.ch_count)
        s-> in.ch_count= av_get_channel_layout_nb_channels(s-> in_ch_layout);
    if(!s->out.ch_count)
        s->out.ch_count= av_get_channel_layout_nb_channels(s->out_ch_layout);

av_assert0(s-> in.ch_count);
av_assert0(s->out.ch_count);
    s->resample_first= RSC*s->out.ch_count/s->in.ch_count - RSC < s->out_sample_rate/(float)s-> in_sample_rate - 1.0;

    s-> in.bps= av_get_bits_per_sample_fmt(s-> in_sample_fmt)/8;
    s->int_bps= av_get_bits_per_sample_fmt(s->int_sample_fmt)/8;
    s->out.bps= av_get_bits_per_sample_fmt(s->out_sample_fmt)/8;

    if(!s->resample && !s->rematrix){
        s->full_convert = swr_audio_convert_alloc(s->out_sample_fmt,
                                                  s-> in_sample_fmt, s-> in.ch_count, 0);
        return 0;
    }

    s->in_convert = swr_audio_convert_alloc(s->int_sample_fmt,
                                            s-> in_sample_fmt, s-> in.ch_count, 0);
    s->out_convert= swr_audio_convert_alloc(s->out_sample_fmt,
                                            s->int_sample_fmt, s->out.ch_count, 0);


    s->postin= s->in;
    s->preout= s->out;
    s->midbuf= s->in;
    s->in_buffer= s->in;
    if(!s->resample_first){
        s->midbuf.ch_count= s->out.ch_count;
        s->in_buffer.ch_count = s->out.ch_count;
    }

    s->in_buffer.bps = s->postin.bps = s->midbuf.bps = s->preout.bps =  s->int_bps;
    s->in_buffer.planar = s->postin.planar = s->midbuf.planar = s->preout.planar =  1;


    if(s->rematrix && swr_rematrix_init(s)<0)
        return -1;

    return 0;
}
int decode_thread(void *arg) {

    VideoState *is = (VideoState *)arg;
    AVFormatContext *pFormatCtx = NULL;
    AVPacket pkt1, *packet = &pkt1;

    AVDictionary *io_dict = NULL;
    AVIOInterruptCB callback;

    int video_index = -1;
    int audio_index = -1;
    int i;

    is->videoStream = -1;
    is->audioStream = -1;
    is->audio_need_resample = 0;

    global_video_state = is;
    // will interrupt blocking functions if we quit!
    callback.callback = decode_interrupt_cb;
    callback.opaque = is;

    if(avio_open2(&is->io_context, is->filename, 0, &callback, &io_dict)) {
        fprintf(stderr, "Unable to open I/O for %s\n", is->filename);
        return -1;
    }

    // Open video file
    if(avformat_open_input(&pFormatCtx, is->filename, NULL, NULL) != 0) {
        return -1;    // Couldn't open file
    }

    is->pFormatCtx = pFormatCtx;

    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        return -1;    // Couldn't find stream information
    }

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, is->filename, 0);

    // Find the first video stream
    for(i = 0; i < pFormatCtx->nb_streams; i++) {
        if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
                video_index < 0) {
            video_index = i;
        }

        if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
                audio_index < 0) {
            audio_index = i;
        }
    }

    if(audio_index >= 0) {
        stream_component_open(is, audio_index);
    }

    if(video_index >= 0) {
        stream_component_open(is, video_index);
    }

    if(is->videoStream < 0 && is->audioStream < 0) {
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
        goto fail;
    }

#ifdef __RESAMPLER__

    if( audio_index >= 0
            && pFormatCtx->streams[audio_index]->codec->sample_fmt != AV_SAMPLE_FMT_S16) {
        is->audio_need_resample = 1;
        is->pResampledOut = NULL;
        is->pSwrCtx = NULL;

        printf("Configure resampler: ");

#ifdef __LIBAVRESAMPLE__
        printf("libAvResample\n");
        is->pSwrCtx = avresample_alloc_context();
#endif

#ifdef __LIBSWRESAMPLE__
        printf("libSwResample\n");
        is->pSwrCtx = swr_alloc();
#endif

        // Some MP3/WAV don't tell this so make assumtion that
        // They are stereo not 5.1
        if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                && pFormatCtx->streams[audio_index]->codec->channels == 2) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO;

        } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                   && pFormatCtx->streams[audio_index]->codec->channels == 1) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_MONO;

        } else if (pFormatCtx->streams[audio_index]->codec->channel_layout == 0
                   && pFormatCtx->streams[audio_index]->codec->channels == 0) {
            pFormatCtx->streams[audio_index]->codec->channel_layout = AV_CH_LAYOUT_STEREO;
            pFormatCtx->streams[audio_index]->codec->channels = 2;
        }

        av_opt_set_int(is->pSwrCtx, "in_channel_layout",
                       pFormatCtx->streams[audio_index]->codec->channel_layout, 0);
        av_opt_set_int(is->pSwrCtx, "in_sample_fmt",
                       pFormatCtx->streams[audio_index]->codec->sample_fmt, 0);
        av_opt_set_int(is->pSwrCtx, "in_sample_rate",
                       pFormatCtx->streams[audio_index]->codec->sample_rate, 0);

        av_opt_set_int(is->pSwrCtx, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
        av_opt_set_int(is->pSwrCtx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
        av_opt_set_int(is->pSwrCtx, "out_sample_rate", 44100, 0);

#ifdef __LIBAVRESAMPLE__

        if (avresample_open(is->pSwrCtx) < 0) {
#else

        if (swr_init(is->pSwrCtx) < 0) {
#endif
            fprintf(stderr, " ERROR!! From Samplert: %d Hz Sample format: %s\n",
                    pFormatCtx->streams[audio_index]->codec->sample_rate,
                    av_get_sample_fmt_name(pFormatCtx->streams[audio_index]->codec->sample_fmt));
            fprintf(stderr, "         To 44100 Sample format: s16\n");
            is->audio_need_resample = 0;
            is->pSwrCtx = NULL;;
        }

    }

#endif

    // main decode loop

    for(;;) {
        if(is->quit) {
            break;
        }

        // seek stuff goes here
        if(is->seek_req) {
            int stream_index = -1;
            int64_t seek_target = is->seek_pos;

            if(is->videoStream >= 0) {
                stream_index = is->videoStream;

            } else if(is->audioStream >= 0) {
                stream_index = is->audioStream;
            }

            if(stream_index >= 0) {
                seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatCtx->streams[stream_index]->time_base);
            }

            if(av_seek_frame(is->pFormatCtx, stream_index, seek_target, is->seek_flags) < 0) {
                fprintf(stderr, "%s: error while seeking\n", is->pFormatCtx->filename);

            } else {
                if(is->audioStream >= 0) {
                    packet_queue_flush(&is->audioq);
                    packet_queue_put(&is->audioq, &flush_pkt);
                }

                if(is->videoStream >= 0) {
                    packet_queue_flush(&is->videoq);
                    packet_queue_put(&is->videoq, &flush_pkt);
                }
            }

            is->seek_req = 0;
        }

        if(is->audioq.size > MAX_AUDIOQ_SIZE ||
                is->videoq.size > MAX_VIDEOQ_SIZE) {
            SDL_Delay(10);
            continue;
        }

        if(av_read_frame(is->pFormatCtx, packet) < 0) {
            if(is->pFormatCtx->pb->error == 0) {
                SDL_Delay(100); /* no error; wait for user input */
                continue;

            } else {
                break;
            }
        }

        // Is this a packet from the video stream?
        if(packet->stream_index == is->videoStream) {
            packet_queue_put(&is->videoq, packet);

        } else if(packet->stream_index == is->audioStream) {
            packet_queue_put(&is->audioq, packet);

        } else {
            av_free_packet(packet);
        }
    }

    /* all done - wait for it */
    while(!is->quit) {
        SDL_Delay(100);
    }

fail: {
        SDL_Event event;
        event.type = FF_QUIT_EVENT;
        event.user.data1 = is;
        SDL_PushEvent(&event);
    }
    return 0;
}

void stream_seek(VideoState *is, int64_t pos, int rel) {

    if(!is->seek_req) {
        is->seek_pos = pos;
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
        is->seek_req = 1;
    }
}

int main(int argc, char *argv[]) {

    SDL_Event       event;
    //double          pts;
    VideoState      *is;

    is = av_mallocz(sizeof(VideoState));

    if(argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }

    // Register all formats and codecs
    av_register_all();

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Make a screen to put our video
#ifndef __DARWIN__
    screen = SDL_SetVideoMode(640, 480, 0, 0);
#else
    screen = SDL_SetVideoMode(640, 480, 24, 0);
#endif

    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    av_strlcpy(is->filename, argv[1], 1024);

    is->pictq_mutex = SDL_CreateMutex();
    is->pictq_cond = SDL_CreateCond();

    schedule_refresh(is, 40);

    is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
    is->parse_tid = SDL_CreateThread(decode_thread, is);

    if(!is->parse_tid) {
        av_free(is);
        return -1;
    }

    av_init_packet(&flush_pkt);
    flush_pkt.data = (unsigned char *)"FLUSH";

    for(;;) {
        double incr, pos;
        SDL_WaitEvent(&event);

        switch(event.type) {
            case SDL_KEYDOWN:
                switch(event.key.keysym.sym) {
                    case SDLK_LEFT:
                        incr = -10.0;
                        goto do_seek;

                    case SDLK_RIGHT:
                        incr = 10.0;
                        goto do_seek;

                    case SDLK_UP:
                        incr = 60.0;
                        goto do_seek;

                    case SDLK_DOWN:
                        incr = -60.0;
                        goto do_seek;
do_seek:

                        if(global_video_state) {
                            pos = get_master_clock(global_video_state);
                            pos += incr;
                            stream_seek(global_video_state, (int64_t)(pos * AV_TIME_BASE), incr);
                        }

                        break;

                    default:
                        break;
                }

                break;

            case FF_QUIT_EVENT:
            case SDL_QUIT:
                is->quit = 1;
                /*
                 * If the video has finished playing, then both the picture and
                 * audio queues are waiting for more data.  Make them stop
                 * waiting and terminate normally.
                 */
                SDL_CondSignal(is->audioq.cond);
                SDL_CondSignal(is->videoq.cond);
                SDL_Quit();
                exit(0);
                break;

            case FF_ALLOC_EVENT:
                alloc_picture(event.user.data1);
                break;

            case FF_REFRESH_EVENT:
                video_refresh_timer(event.user.data1);
                break;

            default:
                break;
        }
    }

    return 0;
}
Esempio n. 10
0
bool FeMedia::onGetData( Chunk &data )
{
	int offset=0;

	data.samples = NULL;
	data.sampleCount = 0;

	if ( (!m_audio) || end_of_file() )
		return false;

	while ( offset < m_audio->codec_ctx->sample_rate )
	{
		AVPacket *packet = m_audio->pop_packet();
		while (( packet == NULL ) && ( !end_of_file() ))
		{
			read_packet();
			packet = m_audio->pop_packet();
		}

		if ( packet == NULL )
		{
			m_audio->at_end=true;
			if ( offset > 0 )
				return true;
			return false;
		}

#if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT( 53, 25, 0 ))
		{
			sf::Lock l( m_audio->buffer_mutex );

			int bsize = MAX_AUDIO_FRAME_SIZE;
			if ( avcodec_decode_audio3(
						m_audio->codec_ctx,
						(m_audio->buffer + offset),
						&bsize, packet) < 0 )
			{
				std::cerr << "Error decoding audio." << std::endl;
				FeBaseStream::free_packet( packet );
				return false;
			}
			else
			{
				offset += bsize / sizeof( sf::Int16 );
				data.sampleCount += bsize / sizeof(sf::Int16);
				data.samples = m_audio->buffer;
			}
		}
#else
 #if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 55, 45, 0 ))
		AVFrame *frame = av_frame_alloc();
		m_audio->codec_ctx->refcounted_frames = 1;
 #else
		AVFrame *frame = avcodec_alloc_frame();
 #endif
		//
		// TODO: avcodec_decode_audio4() can return multiple frames per packet depending on the codec.
		// We don't deal with this appropriately...
		//
		int got_frame( 0 );
		int len = avcodec_decode_audio4( m_audio->codec_ctx, frame, &got_frame, packet );
		if ( len < 0 )
		{
#ifdef FE_DEBUG
			char buff[256];
			av_strerror( len, buff, 256 );
			std::cerr << "Error decoding audio: " << buff << std::endl;
#endif
		}

		if ( got_frame )
		{
			int data_size = av_samples_get_buffer_size(
				NULL,
				m_audio->codec_ctx->channels,
				frame->nb_samples,
				m_audio->codec_ctx->sample_fmt, 1);

#ifdef DO_RESAMPLE
			if ( m_audio->codec_ctx->sample_fmt == AV_SAMPLE_FMT_S16 )
#endif
			{
				sf::Lock l( m_audio->buffer_mutex );

				memcpy( (m_audio->buffer + offset), frame->data[0], data_size );
				offset += data_size / sizeof( sf::Int16 );
				data.sampleCount += data_size / sizeof(sf::Int16);
				data.samples = m_audio->buffer;
			}
#ifdef DO_RESAMPLE
			else
			{
				sf::Lock l( m_audio->buffer_mutex );

				if ( !m_audio->resample_ctx )
				{
					m_audio->resample_ctx = resample_alloc();
					if ( !m_audio->resample_ctx )
					{
						std::cerr << "Error allocating audio format converter." << std::endl;
						FeBaseStream::free_packet( packet );
						FeBaseStream::free_frame( frame );
						return false;
					}

					int64_t channel_layout = frame->channel_layout;
					if ( !channel_layout )
					{
						channel_layout = av_get_default_channel_layout(
								m_audio->codec_ctx->channels );
					}

					av_opt_set_int( m_audio->resample_ctx, "in_channel_layout", channel_layout, 0 );
					av_opt_set_int( m_audio->resample_ctx, "in_sample_fmt", frame->format, 0 );
					av_opt_set_int( m_audio->resample_ctx, "in_sample_rate", frame->sample_rate, 0 );
					av_opt_set_int( m_audio->resample_ctx, "out_channel_layout", channel_layout, 0 );
					av_opt_set_int( m_audio->resample_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0 );
					av_opt_set_int( m_audio->resample_ctx, "out_sample_rate", frame->sample_rate, 0 );

#ifdef FE_DEBUG
					std::cout << "Initializing resampler: in_sample_fmt="
						<< av_get_sample_fmt_name( (AVSampleFormat)frame->format )
						<< ", in_sample_rate=" << frame->sample_rate
						<< ", out_sample_fmt=" << av_get_sample_fmt_name( AV_SAMPLE_FMT_S16 )
						<< ", out_sample_rate=" << frame->sample_rate << std::endl;
#endif
					if ( resample_init( m_audio->resample_ctx ) < 0 )
					{
						std::cerr << "Error initializing audio format converter, input format="
							<< av_get_sample_fmt_name( (AVSampleFormat)frame->format )
							<< ", input sample rate=" << frame->sample_rate << std::endl;
						FeBaseStream::free_packet( packet );
						FeBaseStream::free_frame( frame );
						resample_free( &m_audio->resample_ctx );
						m_audio->resample_ctx = NULL;
						return false;
					}
				}
				if ( m_audio->resample_ctx )
				{
					int out_linesize;
					av_samples_get_buffer_size(
						&out_linesize,
						m_audio->codec_ctx->channels,
						frame->nb_samples,
						AV_SAMPLE_FMT_S16, 0 );

					uint8_t *tmp_ptr = (uint8_t *)(m_audio->buffer + offset);

#ifdef USE_SWRESAMPLE
					int out_samples = swr_convert(
								m_audio->resample_ctx,
								&tmp_ptr,
								frame->nb_samples,
								(const uint8_t **)frame->data,
								frame->nb_samples );
#else // USE_AVRESAMPLE
					int out_samples = avresample_convert(
								m_audio->resample_ctx,
								&tmp_ptr,
								out_linesize,
								frame->nb_samples,
								frame->data,
								frame->linesize[0],
								frame->nb_samples );
#endif
					if ( out_samples < 0 )
					{
						std::cerr << "Error performing audio conversion." << std::endl;
						FeBaseStream::free_packet( packet );
						FeBaseStream::free_frame( frame );
						break;
					}
					offset += out_samples * m_audio->codec_ctx->channels;
					data.sampleCount += out_samples * m_audio->codec_ctx->channels;
					data.samples = m_audio->buffer;
				}
			}
#endif
		}
		FeBaseStream::free_frame( frame );

#endif

		FeBaseStream::free_packet( packet );
	}

	return true;
}
Esempio n. 11
0
int swr_init(struct SwrContext *s){
    s->in_buffer_index= 0;
    s->in_buffer_count= 0;
    s->resample_in_constraint= 0;
    free_temp(&s->postin);
    free_temp(&s->midbuf);
    free_temp(&s->preout);
    free_temp(&s->in_buffer);
    free_temp(&s->dither);
    swri_audio_convert_free(&s-> in_convert);
    swri_audio_convert_free(&s->out_convert);
    swri_audio_convert_free(&s->full_convert);

    s->flushed = 0;

    s-> in.planar= av_sample_fmt_is_planar(s-> in_sample_fmt);
    s->out.planar= av_sample_fmt_is_planar(s->out_sample_fmt);
    s-> in_sample_fmt= av_get_alt_sample_fmt(s-> in_sample_fmt, 0);
    s->out_sample_fmt= av_get_alt_sample_fmt(s->out_sample_fmt, 0);

    if(s-> in_sample_fmt >= AV_SAMPLE_FMT_NB){
        av_log(s, AV_LOG_ERROR, "Requested input sample format %d is invalid\n", s->in_sample_fmt);
        return AVERROR(EINVAL);
    }
    if(s->out_sample_fmt >= AV_SAMPLE_FMT_NB){
        av_log(s, AV_LOG_ERROR, "Requested output sample format %d is invalid\n", s->out_sample_fmt);
        return AVERROR(EINVAL);
    }

    //FIXME should we allow/support using FLT on material that doesnt need it ?
    if(s->in_sample_fmt <= AV_SAMPLE_FMT_S16 || s->int_sample_fmt==AV_SAMPLE_FMT_S16){
        s->int_sample_fmt= AV_SAMPLE_FMT_S16;
    }else
        s->int_sample_fmt= AV_SAMPLE_FMT_FLT;

    if(   s->int_sample_fmt != AV_SAMPLE_FMT_S16
        &&s->int_sample_fmt != AV_SAMPLE_FMT_S32
        &&s->int_sample_fmt != AV_SAMPLE_FMT_FLT){
        av_log(s, AV_LOG_ERROR, "Requested sample format %s is not supported internally, S16/S32/FLT is supported\n", av_get_sample_fmt_name(s->int_sample_fmt));
        return AVERROR(EINVAL);
    }

    if (s->out_sample_rate!=s->in_sample_rate || (s->flags & SWR_FLAG_RESAMPLE)){
        s->resample = swri_resample_init(s->resample, s->out_sample_rate, s->in_sample_rate, 16, 10, 0, 0.8, s->int_sample_fmt);
    }else
        swri_resample_free(&s->resample);
    if(    s->int_sample_fmt != AV_SAMPLE_FMT_S16
        && s->int_sample_fmt != AV_SAMPLE_FMT_S32
        && s->int_sample_fmt != AV_SAMPLE_FMT_FLT
        && s->resample){
        av_log(s, AV_LOG_ERROR, "Resampling only supported with internal s16/s32/flt\n");
        return -1;
    }

    if(!s->used_ch_count)
        s->used_ch_count= s->in.ch_count;

    if(s->used_ch_count && s-> in_ch_layout && s->used_ch_count != av_get_channel_layout_nb_channels(s-> in_ch_layout)){
        av_log(s, AV_LOG_WARNING, "Input channel layout has a different number of channels than the number of used channels, ignoring layout\n");
        s-> in_ch_layout= 0;
    }

    if(!s-> in_ch_layout)
        s-> in_ch_layout= av_get_default_channel_layout(s->used_ch_count);
    if(!s->out_ch_layout)
        s->out_ch_layout= av_get_default_channel_layout(s->out.ch_count);

    s->rematrix= s->out_ch_layout  !=s->in_ch_layout || s->rematrix_volume!=1.0 ||
                 s->rematrix_custom;

#define RSC 1 //FIXME finetune
    if(!s-> in.ch_count)
        s-> in.ch_count= av_get_channel_layout_nb_channels(s-> in_ch_layout);
    if(!s->used_ch_count)
        s->used_ch_count= s->in.ch_count;
    if(!s->out.ch_count)
        s->out.ch_count= av_get_channel_layout_nb_channels(s->out_ch_layout);

    if(!s-> in.ch_count){
        av_assert0(!s->in_ch_layout);
        av_log(s, AV_LOG_ERROR, "Input channel count and layout are unset\n");
        return -1;
    }

    if ((!s->out_ch_layout || !s->in_ch_layout) && s->used_ch_count != s->out.ch_count && !s->rematrix_custom) {
        av_log(s, AV_LOG_ERROR, "Rematrix is needed but there is not enough information to do it\n");
        return -1;
    }

av_assert0(s->used_ch_count);
av_assert0(s->out.ch_count);
    s->resample_first= RSC*s->out.ch_count/s->in.ch_count - RSC < s->out_sample_rate/(float)s-> in_sample_rate - 1.0;

    s-> in.bps= av_get_bytes_per_sample(s-> in_sample_fmt);
    s->int_bps= av_get_bytes_per_sample(s->int_sample_fmt);
    s->out.bps= av_get_bytes_per_sample(s->out_sample_fmt);
    s->in_buffer= s->in;

    if(!s->resample && !s->rematrix && !s->channel_map){
        s->full_convert = swri_audio_convert_alloc(s->out_sample_fmt,
                                                   s-> in_sample_fmt, s-> in.ch_count, NULL, 0);
        return 0;
    }

    s->in_convert = swri_audio_convert_alloc(s->int_sample_fmt,
                                             s-> in_sample_fmt, s->used_ch_count, s->channel_map, 0);
    s->out_convert= swri_audio_convert_alloc(s->out_sample_fmt,
                                             s->int_sample_fmt, s->out.ch_count, NULL, 0);


    s->postin= s->in;
    s->preout= s->out;
    s->midbuf= s->in;

    if(s->channel_map){
        s->postin.ch_count=
        s->midbuf.ch_count= s->used_ch_count;
        if(s->resample)
            s->in_buffer.ch_count= s->used_ch_count;
    }
    if(!s->resample_first){
        s->midbuf.ch_count= s->out.ch_count;
        if(s->resample)
            s->in_buffer.ch_count = s->out.ch_count;
    }

    s->postin.bps    = s->midbuf.bps    = s->preout.bps    =  s->int_bps;
    s->postin.planar = s->midbuf.planar = s->preout.planar =  1;

    if(s->resample){
        s->in_buffer.bps    = s->int_bps;
        s->in_buffer.planar = 1;
    }

    s->dither = s->preout;

    if(s->rematrix)
        return swri_rematrix_init(s);

    return 0;
}
Esempio n. 12
0
int AudioDecoder::audio_thread(void *arg)
{
    VideoState *is = (VideoState *) arg;
    AVStreamsParser* ps = is->getAVStreamsParser();
    AVFrame *frame = av_frame_alloc();
    Frame *af;
#if CONFIG_AVFILTER
    int last_serial = -1;
    int64_t dec_channel_layout;
    int reconfigure;
#endif
    int got_frame = 0;
    AVRational tb;
    int ret = 0;

    if (!frame)
        return AVERROR(ENOMEM);

    do {
        if ((got_frame = is->auddec().decode_frame(frame)) < 0)
            goto the_end;

        if (got_frame) {
                tb = (AVRational){1, frame->sample_rate};

#if CONFIG_AVFILTER
                dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));

                reconfigure =
                    cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
                            (AVSampleFormat)frame->format, av_frame_get_channels(frame))    ||
                    is->audio_filter_src.channel_layout != dec_channel_layout ||
                    is->audio_filter_src.freq           != frame->sample_rate ||
                    is->auddec().pkt_serial               != last_serial;

                if (reconfigure) {
                    char buf1[1024], buf2[1024];
                    av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
                    av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
                    av_log(NULL, AV_LOG_DEBUG,
                           "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
                           is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
                           frame->sample_rate, av_frame_get_channels(frame), av_get_sample_fmt_name((AVSampleFormat)frame->format), buf2, is->auddec().pkt_serial);

                    is->audio_filter_src.fmt            = (AVSampleFormat)frame->format;
                    is->audio_filter_src.channels       = av_frame_get_channels(frame);
                    is->audio_filter_src.channel_layout = dec_channel_layout;
                    is->audio_filter_src.freq           = frame->sample_rate;
                    last_serial                         = is->auddec().pkt_serial;

                    if ((ret = configure_audio_filters(is,gOptions. afilters, 1)) < 0)
                        goto the_end;
                }

            if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
                goto the_end;

            while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
                tb = is->out_audio_filter->inputs[0]->time_base;
#endif
                if (!(af = is->sampq().peek_writable()))
                    goto the_end;

                af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
                af->pos = av_frame_get_pkt_pos(frame);
                af->serial = is->auddec().pkt_serial;
                af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});

                av_frame_move_ref(af->frame, frame);
                is->sampq().push();

#if CONFIG_AVFILTER
                if (ps->audioq.serial != is->auddec().pkt_serial)
                    break;
            }
            if (ret == AVERROR_EOF)
                is->auddec().finished = is->auddec().pkt_serial;
#endif
        }
    } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
 the_end:
#if CONFIG_AVFILTER
    avfilter_graph_free(&is->agraph);
#endif
    av_frame_free(&frame);
    return ret;
}
Esempio n. 13
0
int avresample_open(AVAudioResampleContext *avr)
{
    int ret;

    if (avresample_is_open(avr)) {
        av_log(avr, AV_LOG_ERROR, "The resampling context is already open.\n");
        return AVERROR(EINVAL);
    }

    /* set channel mixing parameters */
    avr->in_channels = av_get_channel_layout_nb_channels(avr->in_channel_layout);
    if (avr->in_channels <= 0 || avr->in_channels > AVRESAMPLE_MAX_CHANNELS) {
        av_log(avr, AV_LOG_ERROR, "Invalid input channel layout: %"PRIu64"\n",
               avr->in_channel_layout);
        return AVERROR(EINVAL);
    }
    avr->out_channels = av_get_channel_layout_nb_channels(avr->out_channel_layout);
    if (avr->out_channels <= 0 || avr->out_channels > AVRESAMPLE_MAX_CHANNELS) {
        av_log(avr, AV_LOG_ERROR, "Invalid output channel layout: %"PRIu64"\n",
               avr->out_channel_layout);
        return AVERROR(EINVAL);
    }
    avr->resample_channels = FFMIN(avr->in_channels, avr->out_channels);
    avr->downmix_needed    = avr->in_channels  > avr->out_channels;
    avr->upmix_needed      = avr->out_channels > avr->in_channels ||
                             (!avr->downmix_needed && (avr->mix_matrix ||
                              avr->in_channel_layout != avr->out_channel_layout));
    avr->mixing_needed     = avr->downmix_needed || avr->upmix_needed;

    /* set resampling parameters */
    avr->resample_needed   = avr->in_sample_rate != avr->out_sample_rate ||
                             avr->force_resampling;

    /* select internal sample format if not specified by the user */
    if (avr->internal_sample_fmt == AV_SAMPLE_FMT_NONE &&
        (avr->mixing_needed || avr->resample_needed)) {
        enum AVSampleFormat  in_fmt = av_get_planar_sample_fmt(avr->in_sample_fmt);
        enum AVSampleFormat out_fmt = av_get_planar_sample_fmt(avr->out_sample_fmt);
        int max_bps = FFMAX(av_get_bytes_per_sample(in_fmt),
                            av_get_bytes_per_sample(out_fmt));
        if (max_bps <= 2) {
            avr->internal_sample_fmt = AV_SAMPLE_FMT_S16P;
        } else if (avr->mixing_needed) {
            avr->internal_sample_fmt = AV_SAMPLE_FMT_FLTP;
        } else {
            if (max_bps <= 4) {
                if (in_fmt  == AV_SAMPLE_FMT_S32P ||
                    out_fmt == AV_SAMPLE_FMT_S32P) {
                    if (in_fmt  == AV_SAMPLE_FMT_FLTP ||
                        out_fmt == AV_SAMPLE_FMT_FLTP) {
                        /* if one is s32 and the other is flt, use dbl */
                        avr->internal_sample_fmt = AV_SAMPLE_FMT_DBLP;
                    } else {
                        /* if one is s32 and the other is s32, s16, or u8, use s32 */
                        avr->internal_sample_fmt = AV_SAMPLE_FMT_S32P;
                    }
                } else {
                    /* if one is flt and the other is flt, s16 or u8, use flt */
                    avr->internal_sample_fmt = AV_SAMPLE_FMT_FLTP;
                }
            } else {
                /* if either is dbl, use dbl */
                avr->internal_sample_fmt = AV_SAMPLE_FMT_DBLP;
            }
        }
        av_log(avr, AV_LOG_DEBUG, "Using %s as internal sample format\n",
               av_get_sample_fmt_name(avr->internal_sample_fmt));
    }

    /* treat all mono as planar for easier comparison */
    if (avr->in_channels == 1)
        avr->in_sample_fmt = av_get_planar_sample_fmt(avr->in_sample_fmt);
    if (avr->out_channels == 1)
        avr->out_sample_fmt = av_get_planar_sample_fmt(avr->out_sample_fmt);

    /* we may need to add an extra conversion in order to remap channels if
       the output format is not planar */
    if (avr->use_channel_map && !avr->mixing_needed && !avr->resample_needed &&
        !av_sample_fmt_is_planar(avr->out_sample_fmt)) {
        avr->internal_sample_fmt = av_get_planar_sample_fmt(avr->out_sample_fmt);
    }

    /* set sample format conversion parameters */
    if (avr->resample_needed || avr->mixing_needed)
        avr->in_convert_needed = avr->in_sample_fmt != avr->internal_sample_fmt;
    else
        avr->in_convert_needed = avr->use_channel_map &&
                                 !av_sample_fmt_is_planar(avr->out_sample_fmt);

    if (avr->resample_needed || avr->mixing_needed || avr->in_convert_needed)
        avr->out_convert_needed = avr->internal_sample_fmt != avr->out_sample_fmt;
    else
        avr->out_convert_needed = avr->in_sample_fmt != avr->out_sample_fmt;

    avr->in_copy_needed = !avr->in_convert_needed && (avr->mixing_needed ||
                          (avr->use_channel_map && avr->resample_needed));

    if (avr->use_channel_map) {
        if (avr->in_copy_needed) {
            avr->remap_point = REMAP_IN_COPY;
            av_dlog(avr, "remap channels during in_copy\n");
        } else if (avr->in_convert_needed) {
            avr->remap_point = REMAP_IN_CONVERT;
            av_dlog(avr, "remap channels during in_convert\n");
        } else if (avr->out_convert_needed) {
            avr->remap_point = REMAP_OUT_CONVERT;
            av_dlog(avr, "remap channels during out_convert\n");
        } else {
            avr->remap_point = REMAP_OUT_COPY;
            av_dlog(avr, "remap channels during out_copy\n");
        }

#ifdef DEBUG
        {
            int ch;
            av_dlog(avr, "output map: ");
            if (avr->ch_map_info.do_remap)
                for (ch = 0; ch < avr->in_channels; ch++)
                    av_dlog(avr, " % 2d", avr->ch_map_info.channel_map[ch]);
            else
                av_dlog(avr, "n/a");
            av_dlog(avr, "\n");
            av_dlog(avr, "copy map:   ");
            if (avr->ch_map_info.do_copy)
                for (ch = 0; ch < avr->in_channels; ch++)
                    av_dlog(avr, " % 2d", avr->ch_map_info.channel_copy[ch]);
            else
                av_dlog(avr, "n/a");
            av_dlog(avr, "\n");
            av_dlog(avr, "zero map:   ");
            if (avr->ch_map_info.do_zero)
                for (ch = 0; ch < avr->in_channels; ch++)
                    av_dlog(avr, " % 2d", avr->ch_map_info.channel_zero[ch]);
            else
                av_dlog(avr, "n/a");
            av_dlog(avr, "\n");
            av_dlog(avr, "input map:  ");
            for (ch = 0; ch < avr->in_channels; ch++)
                av_dlog(avr, " % 2d", avr->ch_map_info.input_map[ch]);
            av_dlog(avr, "\n");
        }
#endif
    } else
        avr->remap_point = REMAP_NONE;

    /* allocate buffers */
    if (avr->in_copy_needed || avr->in_convert_needed) {
        avr->in_buffer = ff_audio_data_alloc(FFMAX(avr->in_channels, avr->out_channels),
                                             0, avr->internal_sample_fmt,
                                             "in_buffer");
        if (!avr->in_buffer) {
            ret = AVERROR(EINVAL);
            goto error;
        }
    }
    if (avr->resample_needed) {
        avr->resample_out_buffer = ff_audio_data_alloc(avr->out_channels,
                                                       1024, avr->internal_sample_fmt,
                                                       "resample_out_buffer");
        if (!avr->resample_out_buffer) {
            ret = AVERROR(EINVAL);
            goto error;
        }
    }
    if (avr->out_convert_needed) {
        avr->out_buffer = ff_audio_data_alloc(avr->out_channels, 0,
                                              avr->out_sample_fmt, "out_buffer");
        if (!avr->out_buffer) {
            ret = AVERROR(EINVAL);
            goto error;
        }
    }
    avr->out_fifo = av_audio_fifo_alloc(avr->out_sample_fmt, avr->out_channels,
                                        1024);
    if (!avr->out_fifo) {
        ret = AVERROR(ENOMEM);
        goto error;
    }

    /* setup contexts */
    if (avr->in_convert_needed) {
        avr->ac_in = ff_audio_convert_alloc(avr, avr->internal_sample_fmt,
                                            avr->in_sample_fmt, avr->in_channels,
                                            avr->in_sample_rate,
                                            avr->remap_point == REMAP_IN_CONVERT);
        if (!avr->ac_in) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }
    if (avr->out_convert_needed) {
        enum AVSampleFormat src_fmt;
        if (avr->in_convert_needed)
            src_fmt = avr->internal_sample_fmt;
        else
            src_fmt = avr->in_sample_fmt;
        avr->ac_out = ff_audio_convert_alloc(avr, avr->out_sample_fmt, src_fmt,
                                             avr->out_channels,
                                             avr->out_sample_rate,
                                             avr->remap_point == REMAP_OUT_CONVERT);
        if (!avr->ac_out) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }
    if (avr->resample_needed) {
        avr->resample = ff_audio_resample_init(avr);
        if (!avr->resample) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }
    if (avr->mixing_needed) {
        avr->am = ff_audio_mix_alloc(avr);
        if (!avr->am) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }

    return 0;

error:
    avresample_close(avr);
    return ret;
}
Esempio n. 14
0
static void
audio_process_audio(audio_decoder_t *ad, media_buf_t *mb)
{
  const audio_class_t *ac = ad->ad_ac;
  AVFrame *frame = ad->ad_frame;
  media_pipe_t *mp = ad->ad_mp;
  media_queue_t *mq = &mp->mp_audio;
  int r;
  int got_frame;
  AVPacket avpkt;
  int offset = 0;

  if(mb->mb_skip || mb->mb_stream != mq->mq_stream) 
    return;

  while(offset < mb->mb_size) {

    if(mb->mb_cw == NULL) {
      frame->sample_rate = mb->mb_rate;
      frame->format = AV_SAMPLE_FMT_S16;
      switch(mb->mb_channels) {
      case 1:
	frame->channel_layout = AV_CH_LAYOUT_MONO;
	frame->nb_samples = mb->mb_size / 2;
	break;
      case 2:
	frame->channel_layout = AV_CH_LAYOUT_STEREO;
	frame->nb_samples = mb->mb_size / 4;
	break;
      default:
	abort();
      }
      frame->data[0] = mb->mb_data;
      frame->linesize[0] = 0;
      r = mb->mb_size;
      got_frame = 1;

    } else {

      av_init_packet(&avpkt);
      avpkt.data = mb->mb_data + offset;
      avpkt.size = mb->mb_size - offset;
      
      r = avcodec_decode_audio4(mb->mb_cw->codec_ctx, frame,
				&got_frame, &avpkt);
      if(r < 0)
	return;

      if(frame->sample_rate == 0)
	frame->sample_rate = mb->mb_cw->codec_ctx->sample_rate;
    
      if(frame->sample_rate == 0)
	return;

      if(mp->mp_stats)
	mp_set_mq_meta(mq, mb->mb_cw->codec, mb->mb_cw->codec_ctx);

    }

    if(offset == 0 && mb->mb_pts != AV_NOPTS_VALUE) {
        
      int od = 0, id = 0;
          
      if(ad->ad_avr != NULL) {
	od = avresample_available(ad->ad_avr) *
	  1000000LL / ad->ad_out_sample_rate;
	id = avresample_get_delay(ad->ad_avr) *
	  1000000LL / frame->sample_rate;
      }
      ad->ad_pts = mb->mb_pts - od - id;
      ad->ad_epoch = mb->mb_epoch;
      //        printf("od=%-20d id=%-20d PTS=%-20ld oPTS=%-20ld\n",
      // od, id, mb->mb_pts, pts);
        
      if(mb->mb_drive_clock)
	mp_set_current_time(mp, mb->mb_pts - ad->ad_delay,
			    mb->mb_epoch, mb->mb_delta);
    }

    offset += r;

    if(got_frame) {

      if(frame->sample_rate    != ad->ad_in_sample_rate ||
	 frame->format         != ad->ad_in_sample_format ||
	 frame->channel_layout != ad->ad_in_channel_layout) {
          
	ad->ad_in_sample_rate    = frame->sample_rate;
	ad->ad_in_sample_format  = frame->format;
	ad->ad_in_channel_layout = frame->channel_layout;

	ac->ac_reconfig(ad);

	if(ad->ad_avr == NULL)
	  ad->ad_avr = avresample_alloc_context();
	else
	  avresample_close(ad->ad_avr);
          
	av_opt_set_int(ad->ad_avr, "in_sample_fmt",
		       ad->ad_in_sample_format, 0);
	av_opt_set_int(ad->ad_avr, "in_sample_rate", 
		       ad->ad_in_sample_rate, 0);
	av_opt_set_int(ad->ad_avr, "in_channel_layout",
		       ad->ad_in_channel_layout, 0);

	av_opt_set_int(ad->ad_avr, "out_sample_fmt",
		       ad->ad_out_sample_format, 0);
	av_opt_set_int(ad->ad_avr, "out_sample_rate",
		       ad->ad_out_sample_rate, 0);
	av_opt_set_int(ad->ad_avr, "out_channel_layout",
		       ad->ad_out_channel_layout, 0);
          
	char buf1[128];
	char buf2[128];

	av_get_channel_layout_string(buf1, sizeof(buf1), 
				     -1, ad->ad_in_channel_layout);
	av_get_channel_layout_string(buf2, sizeof(buf2), 
				     -1, ad->ad_out_channel_layout);

	TRACE(TRACE_DEBUG, "Audio",
	      "Converting from [%s %dHz %s] to [%s %dHz %s]",
	      buf1, ad->ad_in_sample_rate,
	      av_get_sample_fmt_name(ad->ad_in_sample_format),
	      buf2, ad->ad_out_sample_rate,
	      av_get_sample_fmt_name(ad->ad_out_sample_format));

	if(avresample_open(ad->ad_avr)) {
	  TRACE(TRACE_ERROR, "AudioQueue", "Unable to open resampler");
	  avresample_free(&ad->ad_avr);
	}
      }
      if(ad->ad_avr != NULL)
	avresample_convert(ad->ad_avr, NULL, 0, 0,
			   frame->data, frame->linesize[0],
			   frame->nb_samples);
    }
  }
}
int main (int argc, char **argv)
{
    int ret = 0, got_frame;
    if (argc != 4 && argc != 5) {
        fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n\n"
                "If the -refcount option is specified, the program use the\n"
                "reference counting frame system which allows keeping a copy of\n"
                "the data for longer than one decode call.\n"
                "\n", argv[0]);
        exit(1);
    }
    if (argc == 5 && !strcmp(argv[1], "-refcount")) {
        refcount = 1;
        argv++;
    }
    src_filename = argv[1];
    video_dst_filename = argv[2];
    audio_dst_filename = argv[3];
    /* register all formats and codecs */
    av_register_all();
    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }
    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }
    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;
        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }
        /* allocate image where the decoded image will be put */
        width = video_dec_ctx->width;
        height = video_dec_ctx->height;
        pix_fmt = video_dec_ctx->pix_fmt;
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             width, height, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;
    }
    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
            ret = 1;
            goto end;
        }
    }
    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);
    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }
    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_packet_unref(&orig_pkt);
    }
    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);
    printf("Demuxing succeeded.\n");
    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(pix_fmt), width, height,
               video_dst_filename);
    }
    if (audio_stream) {
        enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
        int n_channels = audio_dec_ctx->channels;
        const char *fmt;
        if (av_sample_fmt_is_planar(sfmt)) {
            const char *packed = av_get_sample_fmt_name(sfmt);
            printf("Warning: the sample format the decoder produced is planar "
                   "(%s). This example will output the first channel only.\n",
                   packed ? packed : "?");
            sfmt = av_get_packed_sample_fmt(sfmt);
            n_channels = 1;
        }
        if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
            goto end;
        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, n_channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }
end:
    avcodec_close(video_dec_ctx);
    avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_frame_free(&frame);
    av_free(video_dst_data[0]);
    return ret < 0;
}
Esempio n. 16
0
hb_audio_resample_t* hb_audio_resample_init(enum AVSampleFormat sample_fmt,
                                            int hb_amixdown, int normalize_mix)
{
    hb_audio_resample_t *resample = calloc(1, sizeof(hb_audio_resample_t));
    if (resample == NULL)
    {
        hb_error("hb_audio_resample_init: failed to allocate resample");
        goto fail;
    }

    // avresample context, initialized in hb_audio_resample_update()
    resample->avresample = NULL;

    // we don't support planar output yet
    if (av_sample_fmt_is_planar(sample_fmt))
    {
        hb_error("hb_audio_resample_init: planar output not supported ('%s')",
                 av_get_sample_fmt_name(sample_fmt));
        goto fail;
    }

    // convert mixdown to channel_layout/matrix_encoding combo
    int matrix_encoding;
    uint64_t channel_layout = hb_ff_mixdown_xlat(hb_amixdown, &matrix_encoding);

    /*
     * When downmixing, Dual Mono to Mono is a special case:
     * the audio must remain 2-channel until all conversions are done.
     */
    if (hb_amixdown == HB_AMIXDOWN_LEFT || hb_amixdown == HB_AMIXDOWN_RIGHT)
    {
        channel_layout                 = AV_CH_LAYOUT_STEREO;
        resample->dual_mono_downmix    = 1;
        resample->dual_mono_right_only = (hb_amixdown == HB_AMIXDOWN_RIGHT);
    }
    else
    {
        resample->dual_mono_downmix = 0;
    }

    // requested output channel_layout, sample_fmt
    resample->out.channels = av_get_channel_layout_nb_channels(channel_layout);
    resample->out.channel_layout      = channel_layout;
    resample->out.matrix_encoding     = matrix_encoding;
    resample->out.normalize_mix_level = normalize_mix;
    resample->out.sample_fmt          = sample_fmt;
    resample->out.sample_size         = av_get_bytes_per_sample(sample_fmt);

    // set default input characteristics
    resample->in.sample_fmt         = resample->out.sample_fmt;
    resample->in.channel_layout     = resample->out.channel_layout;
    resample->in.center_mix_level   = HB_MIXLEV_DEFAULT;
    resample->in.surround_mix_level = HB_MIXLEV_DEFAULT;

    // by default, no conversion needed
    resample->resample_needed = 0;
    return resample;

fail:
    hb_audio_resample_free(resample);
    return NULL;
}
Esempio n. 17
0
int AudioLoader::decode_audio_frame(AVCodecContext* audioCtx,
                                    float* output,
                                    int* outputSize,
                                    AVPacket* packet) {

    // _dataSize  input = number of bytes available for write in buff
    //           output = number of bytes actually written (actual: FLT data)
    //E_DEBUG(EAlgorithm, "decode_audio_frame, available bytes in buffer = " << _dataSize);
    int gotFrame = 0;
    av_frame_unref(_decodedFrame); //avcodec_get_frame_defaults(_decodedFrame);

    int len = avcodec_decode_audio4(audioCtx, _decodedFrame, &gotFrame, packet);

    if (len < 0) return len; // error handling should be done outside

    if (gotFrame) {
        int inputSamples = _decodedFrame->nb_samples;
        int inputPlaneSize = av_samples_get_buffer_size(NULL, _nChannels, inputSamples,
                                                        audioCtx->sample_fmt, 1);
        int outputPlaneSize = av_samples_get_buffer_size(NULL, _nChannels, inputSamples,
                                                        AV_SAMPLE_FMT_FLT, 1);
        // the size of the output buffer in samples
        int outputBufferSamples = *outputSize / 
                (av_get_bytes_per_sample(AV_SAMPLE_FMT_FLT) * _nChannels);

        if (outputBufferSamples < inputSamples) { 
            // this should never happen, throw exception here
            throw EssentiaException("AudioLoader: Insufficient buffer size for format conversion");
        }

        if (audioCtx->sample_fmt == AV_SAMPLE_FMT_FLT) {
            // TODO: no need in this check? Not many of common formats support FLT
            // no conversion needed, direct copy from our frame to output buffer
            memcpy(output, _decodedFrame->data[0], inputPlaneSize);
        }
        else {
          int samplesWrittern = avresample_convert(_convertCtxAv, 
                                          (uint8_t**) &output, 
                                          outputPlaneSize,
                                          outputBufferSamples, 
                                          (uint8_t**)_decodedFrame->data,               
                                          inputPlaneSize, 
                                          inputSamples);

          if (samplesWrittern < inputSamples) {
              // TODO: there may be data remaining in the internal FIFO buffer
              // to get this data: call avresample_convert() with NULL input 
              // Test if this happens in practice
              ostringstream msg;
              msg << "AudioLoader: Incomplete format conversion (some samples missing)"
                  << " from " << av_get_sample_fmt_name(_audioCtx->sample_fmt)
                  << " to "   << av_get_sample_fmt_name(AV_SAMPLE_FMT_FLT);
              throw EssentiaException(msg);
          }
        }
        *outputSize = outputPlaneSize;
    }
    else {
      E_DEBUG(EAlgorithm, "AudioLoader: tried to decode packet but didn't get any frame...");
      *outputSize = 0;
    }

    return len;
}
Esempio n. 18
0
int avresample_open(AVAudioResampleContext *avr)
{
    int ret;

    /* set channel mixing parameters */
    avr->in_channels = av_get_channel_layout_nb_channels(avr->in_channel_layout);
    if (avr->in_channels <= 0 || avr->in_channels > AVRESAMPLE_MAX_CHANNELS) {
        av_log(avr, AV_LOG_ERROR, "Invalid input channel layout: %"PRIu64"\n",
               avr->in_channel_layout);
        return AVERROR(EINVAL);
    }
    avr->out_channels = av_get_channel_layout_nb_channels(avr->out_channel_layout);
    if (avr->out_channels <= 0 || avr->out_channels > AVRESAMPLE_MAX_CHANNELS) {
        av_log(avr, AV_LOG_ERROR, "Invalid output channel layout: %"PRIu64"\n",
               avr->out_channel_layout);
        return AVERROR(EINVAL);
    }
    avr->resample_channels = FFMIN(avr->in_channels, avr->out_channels);
    avr->downmix_needed    = avr->in_channels  > avr->out_channels;
    avr->upmix_needed      = avr->out_channels > avr->in_channels ||
                             (!avr->downmix_needed && (avr->am->matrix ||
                              avr->in_channel_layout != avr->out_channel_layout));
    avr->mixing_needed     = avr->downmix_needed || avr->upmix_needed;

    /* set resampling parameters */
    avr->resample_needed   = avr->in_sample_rate != avr->out_sample_rate ||
                             avr->force_resampling;

    /* select internal sample format if not specified by the user */
    if (avr->internal_sample_fmt == AV_SAMPLE_FMT_NONE &&
        (avr->mixing_needed || avr->resample_needed)) {
        enum AVSampleFormat  in_fmt = av_get_planar_sample_fmt(avr->in_sample_fmt);
        enum AVSampleFormat out_fmt = av_get_planar_sample_fmt(avr->out_sample_fmt);
        int max_bps = FFMAX(av_get_bytes_per_sample(in_fmt),
                            av_get_bytes_per_sample(out_fmt));
        if (max_bps <= 2) {
            avr->internal_sample_fmt = AV_SAMPLE_FMT_S16P;
        } else if (avr->mixing_needed) {
            avr->internal_sample_fmt = AV_SAMPLE_FMT_FLTP;
        } else {
            if (max_bps <= 4) {
                if (in_fmt  == AV_SAMPLE_FMT_S32P ||
                    out_fmt == AV_SAMPLE_FMT_S32P) {
                    if (in_fmt  == AV_SAMPLE_FMT_FLTP ||
                        out_fmt == AV_SAMPLE_FMT_FLTP) {
                        /* if one is s32 and the other is flt, use dbl */
                        avr->internal_sample_fmt = AV_SAMPLE_FMT_DBLP;
                    } else {
                        /* if one is s32 and the other is s32, s16, or u8, use s32 */
                        avr->internal_sample_fmt = AV_SAMPLE_FMT_S32P;
                    }
                } else {
                    /* if one is flt and the other is flt, s16 or u8, use flt */
                    avr->internal_sample_fmt = AV_SAMPLE_FMT_FLTP;
                }
            } else {
                /* if either is dbl, use dbl */
                avr->internal_sample_fmt = AV_SAMPLE_FMT_DBLP;
            }
        }
        av_log(avr, AV_LOG_DEBUG, "Using %s as internal sample format\n",
               av_get_sample_fmt_name(avr->internal_sample_fmt));
    }

    /* set sample format conversion parameters */
    if (avr->in_channels == 1)
        avr->in_sample_fmt = av_get_planar_sample_fmt(avr->in_sample_fmt);
    if (avr->out_channels == 1)
        avr->out_sample_fmt = av_get_planar_sample_fmt(avr->out_sample_fmt);
    avr->in_convert_needed = (avr->resample_needed || avr->mixing_needed) &&
                              avr->in_sample_fmt != avr->internal_sample_fmt;
    if (avr->resample_needed || avr->mixing_needed)
        avr->out_convert_needed = avr->internal_sample_fmt != avr->out_sample_fmt;
    else
        avr->out_convert_needed = avr->in_sample_fmt != avr->out_sample_fmt;

    /* allocate buffers */
    if (avr->mixing_needed || avr->in_convert_needed) {
        avr->in_buffer = ff_audio_data_alloc(FFMAX(avr->in_channels, avr->out_channels),
                                             0, avr->internal_sample_fmt,
                                             "in_buffer");
        if (!avr->in_buffer) {
            ret = AVERROR(EINVAL);
            goto error;
        }
    }
    if (avr->resample_needed) {
        avr->resample_out_buffer = ff_audio_data_alloc(avr->out_channels,
                                                       0, avr->internal_sample_fmt,
                                                       "resample_out_buffer");
        if (!avr->resample_out_buffer) {
            ret = AVERROR(EINVAL);
            goto error;
        }
    }
    if (avr->out_convert_needed) {
        avr->out_buffer = ff_audio_data_alloc(avr->out_channels, 0,
                                              avr->out_sample_fmt, "out_buffer");
        if (!avr->out_buffer) {
            ret = AVERROR(EINVAL);
            goto error;
        }
    }
    avr->out_fifo = av_audio_fifo_alloc(avr->out_sample_fmt, avr->out_channels,
                                        1024);
    if (!avr->out_fifo) {
        ret = AVERROR(ENOMEM);
        goto error;
    }

    /* setup contexts */
    if (avr->in_convert_needed) {
        avr->ac_in = ff_audio_convert_alloc(avr, avr->internal_sample_fmt,
                                            avr->in_sample_fmt, avr->in_channels);
        if (!avr->ac_in) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }
    if (avr->out_convert_needed) {
        enum AVSampleFormat src_fmt;
        if (avr->in_convert_needed)
            src_fmt = avr->internal_sample_fmt;
        else
            src_fmt = avr->in_sample_fmt;
        avr->ac_out = ff_audio_convert_alloc(avr, avr->out_sample_fmt, src_fmt,
                                             avr->out_channels);
        if (!avr->ac_out) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }
    if (avr->resample_needed) {
        avr->resample = ff_audio_resample_init(avr);
        if (!avr->resample) {
            ret = AVERROR(ENOMEM);
            goto error;
        }
    }
    if (avr->mixing_needed) {
        ret = ff_audio_mix_init(avr);
        if (ret < 0)
            goto error;
    }

    return 0;

error:
    avresample_close(avr);
    return ret;
}
Esempio n. 19
0
static int config_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = ctx->inputs[0];
    ResampleContext   *s = ctx->priv;
    char buf1[64], buf2[64];
    int ret;

    if (s->avr) {
        avresample_close(s->avr);
        avresample_free(&s->avr);
    }

    if (inlink->channel_layout == outlink->channel_layout &&
        inlink->sample_rate    == outlink->sample_rate    &&
        (inlink->format        == outlink->format ||
        (av_get_channel_layout_nb_channels(inlink->channel_layout)  == 1 &&
         av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 &&
         av_get_planar_sample_fmt(inlink->format) ==
         av_get_planar_sample_fmt(outlink->format))))
        return 0;

    if (!(s->avr = avresample_alloc_context()))
        return AVERROR(ENOMEM);

    if (s->options) {
        int ret;
        AVDictionaryEntry *e = NULL;
        while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
            av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value);

        ret = av_opt_set_dict(s->avr, &s->options);
        if (ret < 0)
            return ret;
    }

    av_opt_set_int(s->avr,  "in_channel_layout", inlink ->channel_layout, 0);
    av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0);
    av_opt_set_int(s->avr,  "in_sample_fmt",     inlink ->format,         0);
    av_opt_set_int(s->avr, "out_sample_fmt",     outlink->format,         0);
    av_opt_set_int(s->avr,  "in_sample_rate",    inlink ->sample_rate,    0);
    av_opt_set_int(s->avr, "out_sample_rate",    outlink->sample_rate,    0);

    if ((ret = avresample_open(s->avr)) < 0)
        return ret;

    outlink->time_base = (AVRational){ 1, outlink->sample_rate };
    s->next_pts        = AV_NOPTS_VALUE;
    s->next_in_pts     = AV_NOPTS_VALUE;

    av_get_channel_layout_string(buf1, sizeof(buf1),
                                 -1, inlink ->channel_layout);
    av_get_channel_layout_string(buf2, sizeof(buf2),
                                 -1, outlink->channel_layout);
    av_log(ctx, AV_LOG_VERBOSE,
           "fmt:%s srate:%d cl:%s -> fmt:%s srate:%d cl:%s\n",
           av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1,
           av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2);

    return 0;
}
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
    AVCodecContext *enc_ctx, const char *filter_spec)
{
    char args[512];
    int ret = 0;
    AVFilter *bufferSrc = NULL;
    AVFilter *bufferSink = NULL;
    AVFilterContext* bufferSrcCtx = NULL;
    AVFilterContext* bufferSinkCtx = NULL;
    AVFilterInOut* outputs = avfilter_inout_alloc();
    AVFilterInOut* inputs = avfilter_inout_alloc();
    AVFilterGraph* filterGraph = avfilter_graph_alloc();

    if (!outputs || !inputs || !filterGraph) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
        bufferSrc = avfilter_get_by_name("buffer");
        bufferSink = avfilter_get_by_name("buffersink");
        if (!bufferSrc || !bufferSink) {
            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        snprintf(args, sizeof(args),
            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
            dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
            dec_ctx->time_base.num, dec_ctx->time_base.den,
            dec_ctx->sample_aspect_ratio.num,
            dec_ctx->sample_aspect_ratio.den);

        ret = avfilter_graph_create_filter(&bufferSrcCtx, bufferSrc, "in",
            args, NULL, filterGraph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
            goto end;
        }

        ret = avfilter_graph_create_filter(&bufferSinkCtx, bufferSink, "out",
            NULL, NULL, filterGraph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
            goto end;
        }

        ret = av_opt_set_bin(bufferSinkCtx, "pix_fmts",
            (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
            AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
            goto end;
        }
    }
    else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
        bufferSrc = avfilter_get_by_name("abuffer");
        bufferSink = avfilter_get_by_name("abuffersink");
        if (!bufferSrc || !bufferSink) {
            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        if (!dec_ctx->channel_layout) {
            dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
        }
        snprintf(args, sizeof(args),
            "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
            dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
            av_get_sample_fmt_name(dec_ctx->sample_fmt),
            dec_ctx->channel_layout);
        ret = avfilter_graph_create_filter(&bufferSrcCtx, bufferSrc, "in", args, NULL, filterGraph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
            goto end;
        }

        ret = avfilter_graph_create_filter(&bufferSinkCtx, bufferSink, "out", NULL, NULL, filterGraph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
            goto end;
        }

        ret = av_opt_set_bin(bufferSinkCtx, "sample_fmts", (uint8_t*)&enc_ctx->sample_fmt,
            sizeof(enc_ctx->sample_fmt), AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
            goto end;
        }

        ret = av_opt_set_bin(bufferSinkCtx, "channel_layouts", (uint8_t*)&enc_ctx->channel_layout,
            sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
            goto end;
        }

        ret = av_opt_set_bin(bufferSinkCtx, "sample_rates", (uint8_t*)&enc_ctx->sample_rate,
            sizeof(enc_ctx->sample_rate), AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
            goto end;
        }
    }
    else {
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    /* Endpoints for the filter graph. */
    outputs->name = av_strdup("in");
    outputs->filter_ctx = bufferSrcCtx;
    outputs->pad_idx = 0;
    outputs->next = NULL;

    inputs->name = av_strdup("out");
    inputs->filter_ctx = bufferSinkCtx;
    inputs->pad_idx = 0;
    inputs->next = NULL;

    if (!outputs->name || !inputs->name) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    if ((ret = avfilter_graph_parse_ptr(filterGraph, filter_spec,
        &inputs, &outputs, NULL)) < 0)
        goto end;

    if ((ret = avfilter_graph_config(filterGraph, NULL)) < 0)
        goto end;

    /* Fill FilteringContext */
    fctx->BuffersrcCtx = bufferSrcCtx;
    fctx->BuffersinkCtx = bufferSinkCtx;
    fctx->FilterGraph = filterGraph;

end:
    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);

    return ret;
}
Esempio n. 21
0
static void
audio_process_audio(audio_decoder_t *ad, media_buf_t *mb)
{
  const audio_class_t *ac = ad->ad_ac;
  AVFrame *frame = ad->ad_frame;
  media_pipe_t *mp = ad->ad_mp;
  media_queue_t *mq = &mp->mp_audio;
  int r;
  int got_frame;

  if(mb->mb_skip || mb->mb_stream != mq->mq_stream) 
    return;

  while(mb->mb_size) {

    if(mb->mb_cw == NULL) {
      frame->sample_rate = mb->mb_rate;
      frame->format = AV_SAMPLE_FMT_S16;
      switch(mb->mb_channels) {
      case 1:
	frame->channel_layout = AV_CH_LAYOUT_MONO;
	frame->nb_samples = mb->mb_size / 2;
	break;
      case 2:
	frame->channel_layout = AV_CH_LAYOUT_STEREO;
	frame->nb_samples = mb->mb_size / 4;
	break;
      default:
	abort();
      }
      frame->data[0] = mb->mb_data;
      frame->linesize[0] = 0;
      r = mb->mb_size;
      got_frame = 1;

    } else {

      media_codec_t *mc = mb->mb_cw;

      AVCodecContext *ctx = mc->ctx;

      if(mc->codec_id != ad->ad_in_codec_id) {
	AVCodec *codec = avcodec_find_decoder(mc->codec_id);
	TRACE(TRACE_DEBUG, "audio", "Codec changed to %s (0x%x)",
	      codec ? codec->name : "???", mc->codec_id);
	ad->ad_in_codec_id = mc->codec_id;
	ad->ad_in_sample_rate = 0;

	audio_cleanup_spdif_muxer(ad);

	ad->ad_mode = ac->ac_get_mode != NULL ?
	  ac->ac_get_mode(ad, mc->codec_id,
			  ctx ? ctx->extradata : NULL,
			  ctx ? ctx->extradata_size : 0) : AUDIO_MODE_PCM;

	if(ad->ad_mode == AUDIO_MODE_SPDIF) {
	  audio_setup_spdif_muxer(ad, codec, mq);
	} else if(ad->ad_mode == AUDIO_MODE_CODED) {
	  
	  hts_mutex_lock(&mp->mp_mutex);
	  
	  ac->ac_deliver_coded_locked(ad, mb->mb_data, mb->mb_size,
				      mb->mb_pts, mb->mb_epoch);
	  hts_mutex_unlock(&mp->mp_mutex);
	  return;
	}
      }

      if(ad->ad_spdif_muxer != NULL) {
	mb->mb_pkt.stream_index = 0;
	ad->ad_pts = mb->mb_pts;
	ad->ad_epoch = mb->mb_epoch;

	mb->mb_pts = AV_NOPTS_VALUE;
	mb->mb_dts = AV_NOPTS_VALUE;
	av_write_frame(ad->ad_spdif_muxer, &mb->mb_pkt);
	avio_flush(ad->ad_spdif_muxer->pb);
	return;
      }


      if(ad->ad_mode == AUDIO_MODE_CODED) {
	ad->ad_pts = mb->mb_pts;
	ad->ad_epoch = mb->mb_epoch;
	

      }


      if(ctx == NULL) {

	AVCodec *codec = avcodec_find_decoder(mc->codec_id);
	assert(codec != NULL); // Checked in libav.c

	ctx = mc->ctx = avcodec_alloc_context3(codec);

	if(ad->ad_stereo_downmix)
          ctx->request_channel_layout = AV_CH_LAYOUT_STEREO;

	if(avcodec_open2(mc->ctx, codec, NULL) < 0) {
	  av_freep(&mc->ctx);
	  return;
	}
      }

      r = avcodec_decode_audio4(ctx, frame, &got_frame, &mb->mb_pkt);
      if(r < 0)
	return;

      if(frame->sample_rate == 0) {
	frame->sample_rate = ctx->sample_rate;

	if(frame->sample_rate == 0 && mb->mb_cw->fmt_ctx)
	  frame->sample_rate = mb->mb_cw->fmt_ctx->sample_rate;

	if(frame->sample_rate == 0) {

          if(!ad->ad_sample_rate_fail) {
            ad->ad_sample_rate_fail = 1;
            TRACE(TRACE_ERROR, "Audio",
                  "Unable to determine sample rate");
          }
	  return;
        }
      }

      if(frame->channel_layout == 0) {
        frame->channel_layout = av_get_default_channel_layout(ctx->channels);
        if(frame->channel_layout == 0) {

          if(!ad->ad_channel_layout_fail) {
            ad->ad_channel_layout_fail = 1;
              TRACE(TRACE_ERROR, "Audio",
                    "Unable to map %d channels to channel layout");
          }
	  return;
	}
      }

      if(mp->mp_stats)
	mp_set_mq_meta(mq, ctx->codec, ctx);

    }

    if(mb->mb_pts != PTS_UNSET) {

      int od = 0, id = 0;

      if(ad->ad_avr != NULL) {
	od = avresample_available(ad->ad_avr) *
	  1000000LL / ad->ad_out_sample_rate;
	id = avresample_get_delay(ad->ad_avr) *
	  1000000LL / frame->sample_rate;
      }
      ad->ad_pts = mb->mb_pts - od - id;
      ad->ad_epoch = mb->mb_epoch;

      if(mb->mb_drive_clock)
	mp_set_current_time(mp, mb->mb_pts - ad->ad_delay,
			    mb->mb_epoch, mb->mb_delta);
      mb->mb_pts = PTS_UNSET; // No longer valid
    }


    mb->mb_data += r;
    mb->mb_size -= r;

    if(got_frame) {

      if(frame->sample_rate    != ad->ad_in_sample_rate ||
	 frame->format         != ad->ad_in_sample_format ||
	 frame->channel_layout != ad->ad_in_channel_layout ||
	 ad->ad_want_reconfig) {

	ad->ad_want_reconfig = 0;
	ad->ad_in_sample_rate    = frame->sample_rate;
	ad->ad_in_sample_format  = frame->format;
	ad->ad_in_channel_layout = frame->channel_layout;

	ac->ac_reconfig(ad);

	if(ad->ad_avr == NULL)
	  ad->ad_avr = avresample_alloc_context();
	else
	  avresample_close(ad->ad_avr);

	av_opt_set_int(ad->ad_avr, "in_sample_fmt",
		       ad->ad_in_sample_format, 0);
	av_opt_set_int(ad->ad_avr, "in_sample_rate", 
		       ad->ad_in_sample_rate, 0);
	av_opt_set_int(ad->ad_avr, "in_channel_layout",
		       ad->ad_in_channel_layout, 0);

	av_opt_set_int(ad->ad_avr, "out_sample_fmt",
		       ad->ad_out_sample_format, 0);
	av_opt_set_int(ad->ad_avr, "out_sample_rate",
		       ad->ad_out_sample_rate, 0);
	av_opt_set_int(ad->ad_avr, "out_channel_layout",
		       ad->ad_out_channel_layout, 0);

	char buf1[128];
	char buf2[128];

	av_get_channel_layout_string(buf1, sizeof(buf1), 
				     -1, ad->ad_in_channel_layout);
	av_get_channel_layout_string(buf2, sizeof(buf2), 
				     -1, ad->ad_out_channel_layout);

	TRACE(TRACE_DEBUG, "Audio",
	      "Converting from [%s %dHz %s] to [%s %dHz %s]",
	      buf1, ad->ad_in_sample_rate,
	      av_get_sample_fmt_name(ad->ad_in_sample_format),
	      buf2, ad->ad_out_sample_rate,
	      av_get_sample_fmt_name(ad->ad_out_sample_format));

	if(avresample_open(ad->ad_avr)) {
	  TRACE(TRACE_ERROR, "Audio", "Unable to open resampler");
	  avresample_free(&ad->ad_avr);
	}

        prop_set(mp->mp_prop_ctrl, "canAdjustVolume", PROP_SET_INT, 1);

	if(ac->ac_set_volume != NULL)
	  ac->ac_set_volume(ad, ad->ad_vol_scale);

      }
      if(ad->ad_avr != NULL) {
	avresample_convert(ad->ad_avr, NULL, 0, 0,
			   frame->data, frame->linesize[0],
			   frame->nb_samples);
      } else {
	int delay = 1000000LL * frame->nb_samples / frame->sample_rate;
	usleep(delay);
      }
    }
  }
}
Esempio n. 22
0
int getAVAudioInfo(StreamPtr stream, ALuint *rate, ALenum *channels, ALenum *type)
{
    if(!stream || stream->CodecCtx->codec_type != AVMEDIA_TYPE_AUDIO)
        return 1;

    /* Get the sample type for OpenAL given the format detected by ffmpeg. */
    if(stream->CodecCtx->sample_fmt == AV_SAMPLE_FMT_U8)
        *type = AL_UNSIGNED_BYTE_SOFT;
    else if(stream->CodecCtx->sample_fmt == AV_SAMPLE_FMT_S16)
        *type = AL_SHORT_SOFT;
    else if(stream->CodecCtx->sample_fmt == AV_SAMPLE_FMT_S32)
        *type = AL_INT_SOFT;
    else if(stream->CodecCtx->sample_fmt == AV_SAMPLE_FMT_FLT)
        *type = AL_FLOAT_SOFT;
    else if(stream->CodecCtx->sample_fmt == AV_SAMPLE_FMT_DBL)
        *type = AL_DOUBLE_SOFT;
    else
    {
        fprintf(stderr, "Unsupported ffmpeg sample format: %s\n",
                av_get_sample_fmt_name(stream->CodecCtx->sample_fmt));
        return 1;
    }

    /* Get the OpenAL channel configuration using the channel layout detected
     * by ffmpeg. NOTE: some file types may not specify a channel layout. In
     * that case, one must be guessed based on the channel count. */
    if(stream->CodecCtx->channel_layout == AV_CH_LAYOUT_MONO)
        *channels = AL_MONO_SOFT;
    else if(stream->CodecCtx->channel_layout == AV_CH_LAYOUT_STEREO)
        *channels = AL_STEREO_SOFT;
    else if(stream->CodecCtx->channel_layout == AV_CH_LAYOUT_QUAD)
        *channels = AL_QUAD_SOFT;
    else if(stream->CodecCtx->channel_layout == AV_CH_LAYOUT_5POINT1_BACK)
        *channels = AL_5POINT1_SOFT;
    else if(stream->CodecCtx->channel_layout == AV_CH_LAYOUT_7POINT1)
        *channels = AL_7POINT1_SOFT;
    else if(stream->CodecCtx->channel_layout == 0)
    {
        /* Unknown channel layout. Try to guess. */
        if(stream->CodecCtx->channels == 1)
            *channels = AL_MONO_SOFT;
        else if(stream->CodecCtx->channels == 2)
            *channels = AL_STEREO_SOFT;
        else
        {
            fprintf(stderr, "Unsupported ffmpeg raw channel count: %d\n",
                    stream->CodecCtx->channels);
            return 1;
        }
    }
    else
    {
        char str[1024];
        av_get_channel_layout_string(str, sizeof(str), stream->CodecCtx->channels,
                                     stream->CodecCtx->channel_layout);
        fprintf(stderr, "Unsupported ffmpeg channel layout: %s\n", str);
        return 1;
    }

    *rate = stream->CodecCtx->sample_rate;

    return 0;
}
Esempio n. 23
0
/*
 * Audio encoding example
 */
static void audio_encode_example(const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    AVFrame *frame;
    AVPacket pkt;
    int i, j, k, ret, got_output;
    int buffer_size;
    FILE *f;
    uint16_t *samples;
    float t, tincr;

    printf("Encode audio file %s\n", filename);

    /* find the MP2 encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate audio codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 64000;

    /* check that the encoder supports s16 pcm input */
    c->sample_fmt = AV_SAMPLE_FMT_S16;
    if (!check_sample_fmt(codec, c->sample_fmt)) {
        fprintf(stderr, "Encoder does not support sample format %s",
                av_get_sample_fmt_name(c->sample_fmt));
        exit(1);
    }

    /* select other audio parameters supported by the encoder */
    c->sample_rate    = select_sample_rate(codec);
    c->channel_layout = select_channel_layout(codec);
    c->channels       = av_get_channel_layout_nb_channels(c->channel_layout);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    /* frame containing input raw audio */
    frame = avcodec_alloc_frame();
    if (!frame) {
        fprintf(stderr, "Could not allocate audio frame\n");
        exit(1);
    }

    frame->nb_samples     = c->frame_size;
    frame->format         = c->sample_fmt;
    frame->channel_layout = c->channel_layout;

    /* the codec gives us the frame size, in samples,
     * we calculate the size of the samples buffer in bytes */
    buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
                                             c->sample_fmt, 0);
    samples = av_malloc(buffer_size);
    if (!samples) {
        fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
                buffer_size);
        exit(1);
    }
    /* setup the data pointers in the AVFrame */
    ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
                                   (const uint8_t*)samples, buffer_size, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not setup audio frame\n");
        exit(1);
    }

    /* encode a single tone sound */
    t = 0;
    tincr = 2 * M_PI * 440.0 / c->sample_rate;
    for(i=0;i<200;i++) {
        av_init_packet(&pkt);
        pkt.data = NULL; // packet data will be allocated by the encoder
        pkt.size = 0;

        for (j = 0; j < c->frame_size; j++) {
            samples[2*j] = (int)(sin(t) * 10000);

            for (k = 1; k < c->channels; k++)
                samples[2*j + k] = samples[2*j];
            t += tincr;
        }
        /* encode the samples */
        ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding audio frame\n");
            exit(1);
        }
        if (got_output) {
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }
    fclose(f);

    av_freep(&samples);
    avcodec_free_frame(&frame);
    avcodec_close(c);
    av_free(c);
}
Esempio n. 24
0
av_cold int swr_init(struct SwrContext *s){
    int ret;
    char l1[1024], l2[1024];

    clear_context(s);

    if(s-> in_sample_fmt >= AV_SAMPLE_FMT_NB){
        av_log(s, AV_LOG_ERROR, "Requested input sample format %d is invalid\n", s->in_sample_fmt);
        return AVERROR(EINVAL);
    }
    if(s->out_sample_fmt >= AV_SAMPLE_FMT_NB){
        av_log(s, AV_LOG_ERROR, "Requested output sample format %d is invalid\n", s->out_sample_fmt);
        return AVERROR(EINVAL);
    }

    s->out.ch_count  = s-> user_out_ch_count;
    s-> in.ch_count  = s->  user_in_ch_count;
    s->used_ch_count = s->user_used_ch_count;

    s-> in_ch_layout = s-> user_in_ch_layout;
    s->out_ch_layout = s->user_out_ch_layout;

    s->int_sample_fmt= s->user_int_sample_fmt;

    if(av_get_channel_layout_nb_channels(s-> in_ch_layout) > SWR_CH_MAX) {
        av_log(s, AV_LOG_WARNING, "Input channel layout 0x%"PRIx64" is invalid or unsupported.\n", s-> in_ch_layout);
        s->in_ch_layout = 0;
    }

    if(av_get_channel_layout_nb_channels(s->out_ch_layout) > SWR_CH_MAX) {
        av_log(s, AV_LOG_WARNING, "Output channel layout 0x%"PRIx64" is invalid or unsupported.\n", s->out_ch_layout);
        s->out_ch_layout = 0;
    }

    switch(s->engine){
#if CONFIG_LIBSOXR
        extern struct Resampler const soxr_resampler;
        case SWR_ENGINE_SOXR: s->resampler = &soxr_resampler; break;
#endif
        case SWR_ENGINE_SWR : s->resampler = &swri_resampler; break;
        default:
            av_log(s, AV_LOG_ERROR, "Requested resampling engine is unavailable\n");
            return AVERROR(EINVAL);
    }

    if(!s->used_ch_count)
        s->used_ch_count= s->in.ch_count;

    if(s->used_ch_count && s-> in_ch_layout && s->used_ch_count != av_get_channel_layout_nb_channels(s-> in_ch_layout)){
        av_log(s, AV_LOG_WARNING, "Input channel layout has a different number of channels than the number of used channels, ignoring layout\n");
        s-> in_ch_layout= 0;
    }

    if(!s-> in_ch_layout)
        s-> in_ch_layout= av_get_default_channel_layout(s->used_ch_count);
    if(!s->out_ch_layout)
        s->out_ch_layout= av_get_default_channel_layout(s->out.ch_count);

    s->rematrix= s->out_ch_layout  !=s->in_ch_layout || s->rematrix_volume!=1.0 ||
                 s->rematrix_custom;

    if(s->int_sample_fmt == AV_SAMPLE_FMT_NONE){
        if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_S16P){
            s->int_sample_fmt= AV_SAMPLE_FMT_S16P;
        }else if(   av_get_planar_sample_fmt(s-> in_sample_fmt) == AV_SAMPLE_FMT_S32P
                 && av_get_planar_sample_fmt(s->out_sample_fmt) == AV_SAMPLE_FMT_S32P
                 && !s->rematrix
                 && s->engine != SWR_ENGINE_SOXR){
            s->int_sample_fmt= AV_SAMPLE_FMT_S32P;
        }else if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_FLTP){
            s->int_sample_fmt= AV_SAMPLE_FMT_FLTP;
        }else{
            av_log(s, AV_LOG_DEBUG, "Using double precision mode\n");
            s->int_sample_fmt= AV_SAMPLE_FMT_DBLP;
        }
    }

    if(   s->int_sample_fmt != AV_SAMPLE_FMT_S16P
        &&s->int_sample_fmt != AV_SAMPLE_FMT_S32P
        &&s->int_sample_fmt != AV_SAMPLE_FMT_FLTP
        &&s->int_sample_fmt != AV_SAMPLE_FMT_DBLP){
        av_log(s, AV_LOG_ERROR, "Requested sample format %s is not supported internally, S16/S32/FLT/DBL is supported\n", av_get_sample_fmt_name(s->int_sample_fmt));
        return AVERROR(EINVAL);
    }

    set_audiodata_fmt(&s-> in, s-> in_sample_fmt);
    set_audiodata_fmt(&s->out, s->out_sample_fmt);

    if (s->firstpts_in_samples != AV_NOPTS_VALUE) {
        if (!s->async && s->min_compensation >= FLT_MAX/2)
            s->async = 1;
        s->firstpts =
        s->outpts   = s->firstpts_in_samples * s->out_sample_rate;
    } else
        s->firstpts = AV_NOPTS_VALUE;

    if (s->async) {
        if (s->min_compensation >= FLT_MAX/2)
            s->min_compensation = 0.001;
        if (s->async > 1.0001) {
            s->max_soft_compensation = s->async / (double) s->in_sample_rate;
        }
    }

    if (s->out_sample_rate!=s->in_sample_rate || (s->flags & SWR_FLAG_RESAMPLE)){
        s->resample = s->resampler->init(s->resample, s->out_sample_rate, s->in_sample_rate, s->filter_size, s->phase_shift, s->linear_interp, s->cutoff, s->int_sample_fmt, s->filter_type, s->kaiser_beta, s->precision, s->cheby);
        if (!s->resample) {
            av_log(s, AV_LOG_ERROR, "Failed to initilaize resampler\n");
            return AVERROR(ENOMEM);
        }
    }else
        s->resampler->free(&s->resample);
    if(    s->int_sample_fmt != AV_SAMPLE_FMT_S16P
        && s->int_sample_fmt != AV_SAMPLE_FMT_S32P
        && s->int_sample_fmt != AV_SAMPLE_FMT_FLTP
        && s->int_sample_fmt != AV_SAMPLE_FMT_DBLP
        && s->resample){
        av_log(s, AV_LOG_ERROR, "Resampling only supported with internal s16/s32/flt/dbl\n");
        ret = AVERROR(EINVAL);
        goto fail;
    }

#define RSC 1 //FIXME finetune
    if(!s-> in.ch_count)
        s-> in.ch_count= av_get_channel_layout_nb_channels(s-> in_ch_layout);
    if(!s->used_ch_count)
        s->used_ch_count= s->in.ch_count;
    if(!s->out.ch_count)
        s->out.ch_count= av_get_channel_layout_nb_channels(s->out_ch_layout);

    if(!s-> in.ch_count){
        av_assert0(!s->in_ch_layout);
        av_log(s, AV_LOG_ERROR, "Input channel count and layout are unset\n");
        ret = AVERROR(EINVAL);
        goto fail;
    }

    av_get_channel_layout_string(l1, sizeof(l1), s-> in.ch_count, s-> in_ch_layout);
    av_get_channel_layout_string(l2, sizeof(l2), s->out.ch_count, s->out_ch_layout);
    if (s->out_ch_layout && s->out.ch_count != av_get_channel_layout_nb_channels(s->out_ch_layout)) {
        av_log(s, AV_LOG_ERROR, "Output channel layout %s mismatches specified channel count %d\n", l2, s->out.ch_count);
        ret = AVERROR(EINVAL);
        goto fail;
    }
    if (s->in_ch_layout && s->used_ch_count != av_get_channel_layout_nb_channels(s->in_ch_layout)) {
        av_log(s, AV_LOG_ERROR, "Input channel layout %s mismatches specified channel count %d\n", l1, s->used_ch_count);
        ret = AVERROR(EINVAL);
        goto fail;
    }

    if ((!s->out_ch_layout || !s->in_ch_layout) && s->used_ch_count != s->out.ch_count && !s->rematrix_custom) {
        av_log(s, AV_LOG_ERROR, "Rematrix is needed between %s and %s "
               "but there is not enough information to do it\n", l1, l2);
        ret = AVERROR(EINVAL);
        goto fail;
    }

av_assert0(s->used_ch_count);
av_assert0(s->out.ch_count);
    s->resample_first= RSC*s->out.ch_count/s->in.ch_count - RSC < s->out_sample_rate/(float)s-> in_sample_rate - 1.0;

    s->in_buffer= s->in;
    s->silence  = s->in;
    s->drop_temp= s->out;

    if(!s->resample && !s->rematrix && !s->channel_map && !s->dither.method){
        s->full_convert = swri_audio_convert_alloc(s->out_sample_fmt,
                                                   s-> in_sample_fmt, s-> in.ch_count, NULL, 0);
        return 0;
    }

    s->in_convert = swri_audio_convert_alloc(s->int_sample_fmt,
                                             s-> in_sample_fmt, s->used_ch_count, s->channel_map, 0);
    s->out_convert= swri_audio_convert_alloc(s->out_sample_fmt,
                                             s->int_sample_fmt, s->out.ch_count, NULL, 0);

    if (!s->in_convert || !s->out_convert) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    s->postin= s->in;
    s->preout= s->out;
    s->midbuf= s->in;

    if(s->channel_map){
        s->postin.ch_count=
        s->midbuf.ch_count= s->used_ch_count;
        if(s->resample)
            s->in_buffer.ch_count= s->used_ch_count;
    }
    if(!s->resample_first){
        s->midbuf.ch_count= s->out.ch_count;
        if(s->resample)
            s->in_buffer.ch_count = s->out.ch_count;
    }

    set_audiodata_fmt(&s->postin, s->int_sample_fmt);
    set_audiodata_fmt(&s->midbuf, s->int_sample_fmt);
    set_audiodata_fmt(&s->preout, s->int_sample_fmt);

    if(s->resample){
        set_audiodata_fmt(&s->in_buffer, s->int_sample_fmt);
    }

    if ((ret = swri_dither_init(s, s->out_sample_fmt, s->int_sample_fmt)) < 0)
        goto fail;

    if(s->rematrix || s->dither.method) {
        ret = swri_rematrix_init(s);
        if (ret < 0)
            goto fail;
    }

    return 0;
fail:
    swr_close(s);
    return ret;

}
Esempio n. 25
0
int swr_init(struct SwrContext *s){
    s->in_buffer_index= 0;
    s->in_buffer_count= 0;
    s->resample_in_constraint= 0;
    free_temp(&s->postin);
    free_temp(&s->midbuf);
    free_temp(&s->preout);
    free_temp(&s->in_buffer);
    free_temp(&s->dither);
    swri_audio_convert_free(&s-> in_convert);
    swri_audio_convert_free(&s->out_convert);
    swri_audio_convert_free(&s->full_convert);
    swri_rematrix_free(s);

    s->flushed = 0;

    if(s-> in_sample_fmt >= AV_SAMPLE_FMT_NB){
        av_log(s, AV_LOG_ERROR, "Requested input sample format %d is invalid\n", s->in_sample_fmt);
        return AVERROR(EINVAL);
    }
    if(s->out_sample_fmt >= AV_SAMPLE_FMT_NB){
        av_log(s, AV_LOG_ERROR, "Requested output sample format %d is invalid\n", s->out_sample_fmt);
        return AVERROR(EINVAL);
    }

    if(s->int_sample_fmt == AV_SAMPLE_FMT_NONE){
        if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_S16P){
            s->int_sample_fmt= AV_SAMPLE_FMT_S16P;
        }else if(av_get_planar_sample_fmt(s->in_sample_fmt) <= AV_SAMPLE_FMT_FLTP){
            s->int_sample_fmt= AV_SAMPLE_FMT_FLTP;
        }else{
            av_log(s, AV_LOG_DEBUG, "Using double precision mode\n");
            s->int_sample_fmt= AV_SAMPLE_FMT_DBLP;
        }
    }

    if(   s->int_sample_fmt != AV_SAMPLE_FMT_S16P
        &&s->int_sample_fmt != AV_SAMPLE_FMT_S32P
        &&s->int_sample_fmt != AV_SAMPLE_FMT_FLTP
        &&s->int_sample_fmt != AV_SAMPLE_FMT_DBLP){
        av_log(s, AV_LOG_ERROR, "Requested sample format %s is not supported internally, S16/S32/FLT/DBL is supported\n", av_get_sample_fmt_name(s->int_sample_fmt));
        return AVERROR(EINVAL);
    }

    set_audiodata_fmt(&s-> in, s-> in_sample_fmt);
    set_audiodata_fmt(&s->out, s->out_sample_fmt);

    if (s->out_sample_rate!=s->in_sample_rate || (s->flags & SWR_FLAG_RESAMPLE)){
        s->resample = swri_resample_init(s->resample, s->out_sample_rate, s->in_sample_rate, s->filter_size, s->phase_shift, s->linear_interp, s->cutoff, s->int_sample_fmt);
    }else
        swri_resample_free(&s->resample);
    if(    s->int_sample_fmt != AV_SAMPLE_FMT_S16P
        && s->int_sample_fmt != AV_SAMPLE_FMT_S32P
        && s->int_sample_fmt != AV_SAMPLE_FMT_FLTP
        && s->int_sample_fmt != AV_SAMPLE_FMT_DBLP
        && s->resample){
        av_log(s, AV_LOG_ERROR, "Resampling only supported with internal s16/s32/flt/dbl\n");
        return -1;
    }

    if(!s->used_ch_count)
        s->used_ch_count= s->in.ch_count;

    if(s->used_ch_count && s-> in_ch_layout && s->used_ch_count != av_get_channel_layout_nb_channels(s-> in_ch_layout)){
        av_log(s, AV_LOG_WARNING, "Input channel layout has a different number of channels than the number of used channels, ignoring layout\n");
        s-> in_ch_layout= 0;
    }

    if(!s-> in_ch_layout)
        s-> in_ch_layout= av_get_default_channel_layout(s->used_ch_count);
    if(!s->out_ch_layout)
        s->out_ch_layout= av_get_default_channel_layout(s->out.ch_count);

    s->rematrix= s->out_ch_layout  !=s->in_ch_layout || s->rematrix_volume!=1.0 ||
                 s->rematrix_custom;

#define RSC 1 //FIXME finetune
    if(!s-> in.ch_count)
        s-> in.ch_count= av_get_channel_layout_nb_channels(s-> in_ch_layout);
    if(!s->used_ch_count)
        s->used_ch_count= s->in.ch_count;
    if(!s->out.ch_count)
        s->out.ch_count= av_get_channel_layout_nb_channels(s->out_ch_layout);

    if(!s-> in.ch_count){
        av_assert0(!s->in_ch_layout);
        av_log(s, AV_LOG_ERROR, "Input channel count and layout are unset\n");
        return -1;
    }

    if ((!s->out_ch_layout || !s->in_ch_layout) && s->used_ch_count != s->out.ch_count && !s->rematrix_custom) {
        av_log(s, AV_LOG_ERROR, "Rematrix is needed but there is not enough information to do it\n");
        return -1;
    }

av_assert0(s->used_ch_count);
av_assert0(s->out.ch_count);
    s->resample_first= RSC*s->out.ch_count/s->in.ch_count - RSC < s->out_sample_rate/(float)s-> in_sample_rate - 1.0;

    s->in_buffer= s->in;

    if(!s->resample && !s->rematrix && !s->channel_map && !s->dither_method){
        s->full_convert = swri_audio_convert_alloc(s->out_sample_fmt,
                                                   s-> in_sample_fmt, s-> in.ch_count, NULL, 0);
        return 0;
    }

    s->in_convert = swri_audio_convert_alloc(s->int_sample_fmt,
                                             s-> in_sample_fmt, s->used_ch_count, s->channel_map, 0);
    s->out_convert= swri_audio_convert_alloc(s->out_sample_fmt,
                                             s->int_sample_fmt, s->out.ch_count, NULL, 0);


    s->postin= s->in;
    s->preout= s->out;
    s->midbuf= s->in;

    if(s->channel_map){
        s->postin.ch_count=
        s->midbuf.ch_count= s->used_ch_count;
        if(s->resample)
            s->in_buffer.ch_count= s->used_ch_count;
    }
    if(!s->resample_first){
        s->midbuf.ch_count= s->out.ch_count;
        if(s->resample)
            s->in_buffer.ch_count = s->out.ch_count;
    }

    set_audiodata_fmt(&s->postin, s->int_sample_fmt);
    set_audiodata_fmt(&s->midbuf, s->int_sample_fmt);
    set_audiodata_fmt(&s->preout, s->int_sample_fmt);

    if(s->resample){
        set_audiodata_fmt(&s->in_buffer, s->int_sample_fmt);
    }

    s->dither = s->preout;

    if(s->rematrix || s->dither_method)
        return swri_rematrix_init(s);

    return 0;
}
Esempio n. 26
0
unsigned int EncoderFfmpegResample::reSampleMixxx(AVFrame *inframe, quint8 **outbuffer) {
    quint8 *l_ptrBuf = NULL;
    quint8 *l_ptrDataSrc1 = NULL;
    quint8 *l_ptrDataSrc2 = NULL;
    bool l_bSupported = false;
    qint64 l_lInReadBytes = av_samples_get_buffer_size(NULL, m_pCodecCtx->channels,
                            inframe->nb_samples,
                            m_pCodecCtx->sample_fmt, 1);

    // Force stereo
    qint64 l_lOutReadBytes = av_samples_get_buffer_size(NULL, m_pCodecCtx->channels,
                             inframe->nb_samples,
                             m_pOutSampleFmt, 1);

    if (m_pCodecCtx->channels == 1) {
        l_ptrDataSrc1 = inframe->data[0];
        l_ptrDataSrc2 = inframe->data[0];
    } else {
        l_ptrDataSrc1 = inframe->data[0];
        l_ptrDataSrc2 = inframe->data[1];
    }
    // This is Cap frame or very much broken!
    // So return before something goes bad
    if (inframe->nb_samples <= 0) {
        qDebug() << "EncoderFfmpegResample::reSample: nb_samples is zero";
        return 0;
    }

    if (l_lInReadBytes < 0) {
        return 0;
    }
    l_ptrBuf = (quint8 *)av_malloc(l_lOutReadBytes);

    switch (m_pCodecCtx->sample_fmt) {
#if LIBAVCODEC_VERSION_INT >= 3482368
    case AV_SAMPLE_FMT_FLTP: {
        if (m_pCodecCtx->channels > 1) {
            SampleUtil::interleaveBuffer((CSAMPLE *)l_ptrBuf, (CSAMPLE *)l_ptrDataSrc1,
                                         (CSAMPLE *)l_ptrDataSrc2, l_lOutReadBytes / 8);
        } else {
            memcpy(l_ptrBuf, l_ptrDataSrc1, l_lInReadBytes);
        }
        outbuffer[0] = l_ptrBuf;
        l_bSupported = true;
    }
    break;
    case AV_SAMPLE_FMT_S16P: {
        if (m_pCodecCtx->channels > 1) {
            quint8 *l_ptrConversion = (quint8 *)av_malloc(l_lInReadBytes);
            quint16 *l_ptrDest = (quint16 *) l_ptrConversion;
            quint16 *l_ptrSrc1 = (quint16 *) l_ptrDataSrc1;
            quint16 *l_ptrSrc2 = (quint16 *) l_ptrDataSrc2;
            // De-Interleave (a.k.a remove planar to PCM)
            for (int i = 0; i < (l_lInReadBytes / 4); ++i) {
                l_ptrDest[2 * i] = l_ptrSrc1[i];
                l_ptrDest[2 * i + 1] = l_ptrSrc2[i];
            }
            SampleUtil::convertS16ToFloat32((CSAMPLE *)l_ptrBuf, (SAMPLE *)l_ptrConversion, l_lInReadBytes / 2);
        } else {
            SampleUtil::convertS16ToFloat32((CSAMPLE *)l_ptrBuf, (SAMPLE *)l_ptrDataSrc1, l_lInReadBytes / 2);
        }
        outbuffer[0] = l_ptrBuf;
        l_bSupported = true;
    }
    break;
#endif
    case AV_SAMPLE_FMT_FLT: {
        memcpy(l_ptrBuf, l_ptrDataSrc1, l_lInReadBytes);
        outbuffer[0] = l_ptrBuf;
        l_bSupported = true;
    }
    break;
    case AV_SAMPLE_FMT_S16: {
        SampleUtil::convertS16ToFloat32((CSAMPLE *)l_ptrBuf, (SAMPLE *)l_ptrDataSrc1, l_lInReadBytes / 2);
        outbuffer[0] = l_ptrBuf;
        l_bSupported = true;
    }
    break;

    case AV_SAMPLE_FMT_NONE:
    case AV_SAMPLE_FMT_U8:
    case AV_SAMPLE_FMT_S32:
    case AV_SAMPLE_FMT_DBL:
    case AV_SAMPLE_FMT_U8P:
#if LIBAVCODEC_VERSION_INT >= 3482368
    case AV_SAMPLE_FMT_S32P:
    case AV_SAMPLE_FMT_DBLP:
#endif
    case AV_SAMPLE_FMT_NB:
    default:
        qDebug() << "Unsupported sample format:" << av_get_sample_fmt_name(m_pCodecCtx->sample_fmt);
        break;
    }

    if (l_bSupported == true) {
        return l_lOutReadBytes;
    }

    // If conversion is unsupported still return silence to prevent crash
    memset(l_ptrBuf, 0x00, l_lOutReadBytes);
    outbuffer[0] = l_ptrBuf;
    return l_lOutReadBytes;
}
Esempio n. 27
0
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
                                        AVFilterInOut *in)
{
    AVFilterContext *last_filter;
    const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
    InputStream *ist = ifilter->ist;
    InputFile     *f = input_files[ist->file_index];
    char args[255], name[255];
    int ret, pad_idx = 0;

    snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
             ":channel_layout=0x%"PRIx64,
             1, ist->dec_ctx->sample_rate,
             ist->dec_ctx->sample_rate,
             av_get_sample_fmt_name(ist->dec_ctx->sample_fmt),
             ist->dec_ctx->channel_layout);
    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
             ist->file_index, ist->st->index);

    if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
                                            name, args, NULL,
                                            fg->graph)) < 0)
        return ret;
    last_filter = ifilter->filter;

    if (audio_sync_method > 0) {
        AVFilterContext *async;
        int  len = 0;

        av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
               "asyncts audio filter instead.\n");

        if (audio_sync_method > 1)
            len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
                            "max_comp=%d:", audio_sync_method);
        snprintf(args + len, sizeof(args) - len, "min_delta=%f",
                 audio_drift_threshold);

        snprintf(name, sizeof(name), "graph %d audio sync for input stream %d:%d",
                 fg->index, ist->file_index, ist->st->index);
        ret = avfilter_graph_create_filter(&async,
                                           avfilter_get_by_name("asyncts"),
                                           name, args, NULL, fg->graph);
        if (ret < 0)
            return ret;

        ret = avfilter_link(last_filter, 0, async, 0);
        if (ret < 0)
            return ret;

        last_filter = async;
    }
    if (audio_volume != 256) {
        AVFilterContext *volume;

        av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
               "audio filter instead.\n");

        snprintf(args, sizeof(args), "volume=%f", audio_volume / 256.0);

        snprintf(name, sizeof(name), "graph %d volume for input stream %d:%d",
                 fg->index, ist->file_index, ist->st->index);
        ret = avfilter_graph_create_filter(&volume,
                                           avfilter_get_by_name("volume"),
                                           name, args, NULL, fg->graph);
        if (ret < 0)
            return ret;

        ret = avfilter_link(last_filter, 0, volume, 0);
        if (ret < 0)
            return ret;

        last_filter = volume;
    }

    snprintf(name, sizeof(name), "trim for input stream %d:%d",
             ist->file_index, ist->st->index);
    ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
                      AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
    if (ret < 0)
        return ret;

    if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
        return ret;

    return 0;
}
Esempio n. 28
0
int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in)
{
    int use_generic = 1;
    int len         = in->nb_samples;
    int p;

    if (ac->dc) {
        /* dithered conversion */
        av_dlog(ac->avr, "%d samples - audio_convert: %s to %s (dithered)\n",
                len, av_get_sample_fmt_name(ac->in_fmt),
                av_get_sample_fmt_name(ac->out_fmt));

        return ff_convert_dither(ac->dc, out, in);
    }

    /* determine whether to use the optimized function based on pointer and
       samples alignment in both the input and output */
    if (ac->has_optimized_func) {
        int ptr_align     = FFMIN(in->ptr_align,     out->ptr_align);
        int samples_align = FFMIN(in->samples_align, out->samples_align);
        int aligned_len   = FFALIGN(len, ac->samples_align);
        if (!(ptr_align % ac->ptr_align) && samples_align >= aligned_len) {
            len = aligned_len;
            use_generic = 0;
        }
    }
    av_dlog(ac->avr, "%d samples - audio_convert: %s to %s (%s)\n", len,
            av_get_sample_fmt_name(ac->in_fmt),
            av_get_sample_fmt_name(ac->out_fmt),
            use_generic ? ac->func_descr_generic : ac->func_descr);

    if (ac->apply_map) {
        ChannelMapInfo *map = &ac->avr->ch_map_info;

        if (!av_sample_fmt_is_planar(ac->out_fmt)) {
            av_log(ac->avr, AV_LOG_ERROR, "cannot remap packed format during conversion\n");
            return AVERROR(EINVAL);
        }

        if (map->do_remap) {
            if (av_sample_fmt_is_planar(ac->in_fmt)) {
                conv_func_flat *convert = use_generic ? ac->conv_flat_generic :
                                                        ac->conv_flat;

                for (p = 0; p < ac->planes; p++)
                    if (map->channel_map[p] >= 0)
                        convert(out->data[p], in->data[map->channel_map[p]], len);
            } else {
                uint8_t *data[AVRESAMPLE_MAX_CHANNELS];
                conv_func_deinterleave *convert = use_generic ?
                                                  ac->conv_deinterleave_generic :
                                                  ac->conv_deinterleave;

                for (p = 0; p < ac->channels; p++)
                    data[map->input_map[p]] = out->data[p];

                convert(data, in->data[0], len, ac->channels);
            }
        }
        if (map->do_copy || map->do_zero) {
            for (p = 0; p < ac->planes; p++) {
                if (map->channel_copy[p])
                    memcpy(out->data[p], out->data[map->channel_copy[p]],
                           len * out->stride);
                else if (map->channel_zero[p])
                    av_samples_set_silence(&out->data[p], 0, len, 1, ac->out_fmt);
            }
        }
    } else {
        switch (ac->func_type) {
        case CONV_FUNC_TYPE_FLAT: {
            if (!in->is_planar)
                len *= in->channels;
            if (use_generic) {
                for (p = 0; p < ac->planes; p++)
                    ac->conv_flat_generic(out->data[p], in->data[p], len);
            } else {
                for (p = 0; p < ac->planes; p++)
                    ac->conv_flat(out->data[p], in->data[p], len);
            }
            break;
        }
        case CONV_FUNC_TYPE_INTERLEAVE:
            if (use_generic)
                ac->conv_interleave_generic(out->data[0], in->data, len,
                                            ac->channels);
            else
                ac->conv_interleave(out->data[0], in->data, len, ac->channels);
            break;
        case CONV_FUNC_TYPE_DEINTERLEAVE:
            if (use_generic)
                ac->conv_deinterleave_generic(out->data, in->data[0], len,
                                              ac->channels);
            else
                ac->conv_deinterleave(out->data, in->data[0], len,
                                      ac->channels);
            break;
        }
    }

    out->nb_samples = in->nb_samples;
    return 0;
}
Esempio n. 29
0
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext *ctx = inlink->dst;
    AShowInfoContext *s  = ctx->priv;
    char chlayout_str[128];
    uint32_t checksum = 0;
    int channels    = inlink->channels;
    int planar      = av_sample_fmt_is_planar(buf->format);
    int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
    int data_size   = buf->nb_samples * block_align;
    int planes      = planar ? channels : 1;
    int i;
    void *tmp_ptr = av_realloc_array(s->plane_checksums, channels, sizeof(*s->plane_checksums));

    if (!tmp_ptr)
        return AVERROR(ENOMEM);
    s->plane_checksums = tmp_ptr;

    for (i = 0; i < planes; i++) {
        uint8_t *data = buf->extended_data[i];

        s->plane_checksums[i] = av_adler32_update(0, data, data_size);
        checksum = i ? av_adler32_update(checksum, data, data_size) :
                       s->plane_checksums[0];
    }

    av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1,
                                 buf->channel_layout);

    av_log(ctx, AV_LOG_INFO,
           "n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
           "fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d "
           "checksum:%08"PRIX32" ",
           inlink->frame_count_out,
           av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
           av_frame_get_pkt_pos(buf),
           av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
           buf->sample_rate, buf->nb_samples,
           checksum);

    av_log(ctx, AV_LOG_INFO, "plane_checksums: [ ");
    for (i = 0; i < planes; i++)
        av_log(ctx, AV_LOG_INFO, "%08"PRIX32" ", s->plane_checksums[i]);
    av_log(ctx, AV_LOG_INFO, "]\n");

    for (i = 0; i < buf->nb_side_data; i++) {
        AVFrameSideData *sd = buf->side_data[i];

        av_log(ctx, AV_LOG_INFO, "  side data - ");
        switch (sd->type) {
        case AV_FRAME_DATA_MATRIXENCODING: dump_matrixenc (ctx, sd); break;
        case AV_FRAME_DATA_DOWNMIX_INFO:   dump_downmix   (ctx, sd); break;
        case AV_FRAME_DATA_REPLAYGAIN:     dump_replaygain(ctx, sd); break;
        case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: dump_audio_service_type(ctx, sd); break;
        default:                           dump_unknown   (ctx, sd); break;
        }

        av_log(ctx, AV_LOG_INFO, "\n");
    }

    return ff_filter_frame(inlink->dst->outputs[0], buf);
}
Esempio n. 30
0
void ff_audio_convert_set_func(AudioConvert *ac, enum AVSampleFormat out_fmt,
                               enum AVSampleFormat in_fmt, int channels,
                               int ptr_align, int samples_align,
                               const char *descr, void *conv)
{
    int found = 0;

    switch (ac->func_type) {
    case CONV_FUNC_TYPE_FLAT:
        if (av_get_packed_sample_fmt(ac->in_fmt)  == in_fmt &&
            av_get_packed_sample_fmt(ac->out_fmt) == out_fmt) {
            ac->conv_flat     = conv;
            ac->func_descr    = descr;
            ac->ptr_align     = ptr_align;
            ac->samples_align = samples_align;
            if (ptr_align == 1 && samples_align == 1) {
                ac->conv_flat_generic  = conv;
                ac->func_descr_generic = descr;
            } else {
                ac->has_optimized_func = 1;
            }
            found = 1;
        }
        break;
    case CONV_FUNC_TYPE_INTERLEAVE:
        if (ac->in_fmt == in_fmt && ac->out_fmt == out_fmt &&
            (!channels || ac->channels == channels)) {
            ac->conv_interleave = conv;
            ac->func_descr      = descr;
            ac->ptr_align       = ptr_align;
            ac->samples_align   = samples_align;
            if (ptr_align == 1 && samples_align == 1) {
                ac->conv_interleave_generic = conv;
                ac->func_descr_generic      = descr;
            } else {
                ac->has_optimized_func = 1;
            }
            found = 1;
        }
        break;
    case CONV_FUNC_TYPE_DEINTERLEAVE:
        if (ac->in_fmt == in_fmt && ac->out_fmt == out_fmt &&
            (!channels || ac->channels == channels)) {
            ac->conv_deinterleave = conv;
            ac->func_descr        = descr;
            ac->ptr_align         = ptr_align;
            ac->samples_align     = samples_align;
            if (ptr_align == 1 && samples_align == 1) {
                ac->conv_deinterleave_generic = conv;
                ac->func_descr_generic        = descr;
            } else {
                ac->has_optimized_func = 1;
            }
            found = 1;
        }
        break;
    }
    if (found) {
        av_log(ac->avr, AV_LOG_DEBUG, "audio_convert: found function: %-4s "
               "to %-4s (%s)\n", av_get_sample_fmt_name(ac->in_fmt),
               av_get_sample_fmt_name(ac->out_fmt), descr);
    }
}