示例#1
0
文件: af_volume.c 项目: AVLeo/libav
static av_cold void volume_init(VolumeContext *vol)
{
    vol->samples_align = 1;

    switch (av_get_packed_sample_fmt(vol->sample_fmt)) {
    case AV_SAMPLE_FMT_U8:
        if (vol->volume_i < 0x1000000)
            vol->scale_samples = scale_samples_u8_small;
        else
            vol->scale_samples = scale_samples_u8;
        break;
    case AV_SAMPLE_FMT_S16:
        if (vol->volume_i < 0x10000)
            vol->scale_samples = scale_samples_s16_small;
        else
            vol->scale_samples = scale_samples_s16;
        break;
    case AV_SAMPLE_FMT_S32:
        vol->scale_samples = scale_samples_s32;
        break;
    case AV_SAMPLE_FMT_FLT:
        avpriv_float_dsp_init(&vol->fdsp, 0);
        vol->samples_align = 4;
        break;
    case AV_SAMPLE_FMT_DBL:
        avpriv_float_dsp_init(&vol->fdsp, 0);
        vol->samples_align = 8;
        break;
    }

    if (ARCH_X86)
        ff_volume_init_x86(vol);
}
static av_cold int decode_init(AVCodecContext * avctx) {
    NellyMoserDecodeContext *s = avctx->priv_data;

    s->avctx = avctx;
    s->imdct_out = s->imdct_buf[0];
    s->imdct_prev = s->imdct_buf[1];
    av_lfg_init(&s->random_state, 0);
    ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0);

    avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);

    s->scale_bias = 1.0/(32768*8);
    avctx->sample_fmt = AV_SAMPLE_FMT_FLT;

    /* Generate overlap window */
    if (!ff_sine_128[127])
        ff_init_ff_sine_windows(7);

    avctx->channels       = 1;
    avctx->channel_layout = AV_CH_LAYOUT_MONO;

    avcodec_get_frame_defaults(&s->frame);
    avctx->coded_frame = &s->frame;

    return 0;
}
static av_cold int decode_init(AVCodecContext *avctx)
{
    ATRAC3PContext *ctx = avctx->priv_data;
    int i, ch, ret;

    ff_atrac3p_init_vlcs();

    avpriv_float_dsp_init(&ctx->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);

    /* initialize IPQF */
    ff_mdct_init(&ctx->ipqf_dct_ctx, 5, 1, 32.0 / 32768.0);

    ff_atrac3p_init_imdct(avctx, &ctx->mdct_ctx);

    ff_atrac_init_gain_compensation(&ctx->gainc_ctx, 6, 2);

    ff_atrac3p_init_wave_synth();

    if ((ret = set_channel_params(ctx, avctx)) < 0)
        return ret;

    ctx->my_channel_layout = avctx->channel_layout;

    ctx->ch_units = av_mallocz(sizeof(*ctx->ch_units) *
                               ctx->num_channel_blocks);
    if (!ctx->ch_units) {
        decode_close(avctx);
        return AVERROR(ENOMEM);
    }

    for (i = 0; i < ctx->num_channel_blocks; i++) {
        for (ch = 0; ch < 2; ch++) {
            ctx->ch_units[i].channels[ch].ch_num          = ch;
            ctx->ch_units[i].channels[ch].wnd_shape       = &ctx->ch_units[i].channels[ch].wnd_shape_hist[0][0];
            ctx->ch_units[i].channels[ch].wnd_shape_prev  = &ctx->ch_units[i].channels[ch].wnd_shape_hist[1][0];
            ctx->ch_units[i].channels[ch].gain_data       = &ctx->ch_units[i].channels[ch].gain_data_hist[0][0];
            ctx->ch_units[i].channels[ch].gain_data_prev  = &ctx->ch_units[i].channels[ch].gain_data_hist[1][0];
            ctx->ch_units[i].channels[ch].tones_info      = &ctx->ch_units[i].channels[ch].tones_info_hist[0][0];
            ctx->ch_units[i].channels[ch].tones_info_prev = &ctx->ch_units[i].channels[ch].tones_info_hist[1][0];

            /* clear IMDCT overlapping buffer */
            memset(&ctx->ch_units[i].prev_buf[ch][0], 0,
                   sizeof(ctx->ch_units[i].prev_buf[ch][0]) *
                   ATRAC3P_FRAME_SAMPLES);
            /* clear IPQF history */
            memset(&ctx->ch_units[i].ipqf_ctx[ch], 0,
                   sizeof(ctx->ch_units[i].ipqf_ctx[ch]));
        }

        ctx->ch_units[i].waves_info      = &ctx->ch_units[i].wave_synth_hist[0];
        ctx->ch_units[i].waves_info_prev = &ctx->ch_units[i].wave_synth_hist[1];
    }

    avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;

    return 0;
}
示例#4
0
static av_cold int atrac3p_decode_init(AVCodecContext *avctx)
{
    ATRAC3PContext *ctx = avctx->priv_data;
    int i, ch, ret;

    if (!avctx->block_align) {
        av_log(avctx, AV_LOG_ERROR, "block_align is not set\n");
        return AVERROR(EINVAL);
    }

    avpriv_float_dsp_init(&ctx->fdsp, avctx->flags & AV_CODEC_FLAG_BITEXACT);

    /* initialize IPQF */
    ff_mdct_init(&ctx->ipqf_dct_ctx, 5, 1, 32.0 / 32768.0);

    ff_atrac3p_init_imdct(avctx, &ctx->mdct_ctx);

    ff_atrac_init_gain_compensation(&ctx->gainc_ctx, 6, 2);

    ff_atrac3p_init_wave_synth();

    if ((ret = set_channel_params(ctx, avctx)) < 0)
        return ret;

    ctx->my_channel_layout = avctx->channel_layout;

    ctx->ch_units = av_mallocz(sizeof(*ctx->ch_units) *
                               ctx->num_channel_blocks);
    if (!ctx->ch_units) {
        atrac3p_decode_close(avctx);
        return AVERROR(ENOMEM);
    }

    for (i = 0; i < ctx->num_channel_blocks; i++) {
        for (ch = 0; ch < 2; ch++) {
            ctx->ch_units[i].channels[ch].ch_num          = ch;
            ctx->ch_units[i].channels[ch].wnd_shape       = &ctx->ch_units[i].channels[ch].wnd_shape_hist[0][0];
            ctx->ch_units[i].channels[ch].wnd_shape_prev  = &ctx->ch_units[i].channels[ch].wnd_shape_hist[1][0];
            ctx->ch_units[i].channels[ch].gain_data       = &ctx->ch_units[i].channels[ch].gain_data_hist[0][0];
            ctx->ch_units[i].channels[ch].gain_data_prev  = &ctx->ch_units[i].channels[ch].gain_data_hist[1][0];
            ctx->ch_units[i].channels[ch].tones_info      = &ctx->ch_units[i].channels[ch].tones_info_hist[0][0];
            ctx->ch_units[i].channels[ch].tones_info_prev = &ctx->ch_units[i].channels[ch].tones_info_hist[1][0];
        }

        ctx->ch_units[i].waves_info      = &ctx->ch_units[i].wave_synth_hist[0];
        ctx->ch_units[i].waves_info_prev = &ctx->ch_units[i].wave_synth_hist[1];
    }

    avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;

    return 0;
}
示例#5
0
文件: aacenc.c 项目: 0xFFeng/ffmpeg
static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
{
    int ret = 0;

    avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);

    // window init
    ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
    ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
    ff_init_ff_sine_windows(10);
    ff_init_ff_sine_windows(7);

    if (ret = ff_mdct_init(&s->mdct1024, 11, 0, 32768.0))
        return ret;
    if (ret = ff_mdct_init(&s->mdct128,   8, 0, 32768.0))
        return ret;

    return 0;
}
示例#6
0
av_cold int ff_twinvq_decode_init(AVCodecContext *avctx)
{
    int ret;
    TwinVQContext *tctx = avctx->priv_data;

    tctx->avctx       = avctx;
    avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;

    avpriv_float_dsp_init(&tctx->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
    if ((ret = init_mdct_win(tctx))) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n");
        ff_twinvq_decode_close(avctx);
        return ret;
    }
    init_bitstream_params(tctx);

    twinvq_memset_float(tctx->bark_hist[0][0], 0.1,
                        FF_ARRAY_ELEMS(tctx->bark_hist));

    return 0;
}
示例#7
0
static av_cold int init(AVFilterContext *ctx)
{
    MixContext *s = ctx->priv;
    int i;

    for (i = 0; i < s->nb_inputs; i++) {
        char name[32];
        AVFilterPad pad = { 0 };

        snprintf(name, sizeof(name), "input%d", i);
        pad.type           = AVMEDIA_TYPE_AUDIO;
        pad.name           = av_strdup(name);
        pad.filter_frame   = filter_frame;

        ff_insert_inpad(ctx, i, &pad);
    }

    avpriv_float_dsp_init(&s->fdsp, 0);

    return 0;
}
示例#8
0
文件: opusdec.c 项目: KonradIT/FFmpeg
static av_cold int opus_decode_init(AVCodecContext *avctx)
{
    OpusContext *c = avctx->priv_data;
    int ret, i, j;

    avctx->sample_fmt  = AV_SAMPLE_FMT_FLTP;
    avctx->sample_rate = 48000;

    avpriv_float_dsp_init(&c->fdsp, 0);

    /* find out the channel configuration */
    ret = ff_opus_parse_extradata(avctx, c);
    if (ret < 0)
        return ret;

    /* allocate and init each independent decoder */
    c->streams = av_mallocz_array(c->nb_streams, sizeof(*c->streams));
    if (!c->streams) {
        c->nb_streams = 0;
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    for (i = 0; i < c->nb_streams; i++) {
        OpusStreamContext *s = &c->streams[i];
        uint64_t layout;

        s->output_channels = (i < c->nb_stereo_streams) ? 2 : 1;

        s->avctx = avctx;

        for (j = 0; j < s->output_channels; j++) {
            s->silk_output[j]       = s->silk_buf[j];
            s->celt_output[j]       = s->celt_buf[j];
            s->redundancy_output[j] = s->redundancy_buf[j];
        }

        s->fdsp = &c->fdsp;

        s->swr =swr_alloc();
        if (!s->swr)
            goto fail;

        layout = (s->output_channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
        av_opt_set_int(s->swr, "in_sample_fmt",      avctx->sample_fmt,  0);
        av_opt_set_int(s->swr, "out_sample_fmt",     avctx->sample_fmt,  0);
        av_opt_set_int(s->swr, "in_channel_layout",  layout,             0);
        av_opt_set_int(s->swr, "out_channel_layout", layout,             0);
        av_opt_set_int(s->swr, "out_sample_rate",    avctx->sample_rate, 0);
        av_opt_set_int(s->swr, "filter_size",        16,                 0);

        ret = ff_silk_init(avctx, &s->silk, s->output_channels);
        if (ret < 0)
            goto fail;

        ret = ff_celt_init(avctx, &s->celt, s->output_channels);
        if (ret < 0)
            goto fail;

        s->celt_delay = av_audio_fifo_alloc(avctx->sample_fmt,
                                            s->output_channels, 1024);
        if (!s->celt_delay) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
    }

    return 0;
fail:
    opus_decode_close(avctx);
    return ret;
}
示例#9
0
av_cold int ff_ac3_float_encode_init(AVCodecContext *avctx)
{
    AC3EncodeContext *s = avctx->priv_data;
    avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
    return ff_ac3_encode_init(avctx);
}