static av_cold int aac_encode_init(AVCodecContext *avctx) { AACEncContext *s = avctx->priv_data; int i; const uint8_t *sizes[2]; int lengths[2]; avctx->frame_size = 1024; for (i = 0; i < 16; i++) if (avctx->sample_rate == ff_mpeg4audio_sample_rates[i]) break; if (i == 16) { av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n", avctx->sample_rate); return -1; } if (avctx->channels > AAC_MAX_CHANNELS) { av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", avctx->channels); return -1; } if (avctx->profile != FF_PROFILE_UNKNOWN && avctx->profile != FF_PROFILE_AAC_LOW) { av_log(avctx, AV_LOG_ERROR, "Unsupported profile %d\n", avctx->profile); return -1; } if (1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * avctx->channels) { av_log(avctx, AV_LOG_ERROR, "Too many bits per frame requested\n"); return -1; } s->samplerate_index = i; dsputil_init(&s->dsp, avctx); ff_mdct_init(&s->mdct1024, 11, 0, 1.0); ff_mdct_init(&s->mdct128, 8, 0, 1.0); // window init ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); ff_init_ff_sine_windows(10); ff_init_ff_sine_windows(7); s->chan_map = aac_chan_configs[avctx->channels-1]; s->samples = av_malloc(2 * 1024 * avctx->channels * sizeof(s->samples[0])); s->cpe = av_mallocz(sizeof(ChannelElement) * s->chan_map[0]); avctx->extradata = av_mallocz(5 + FF_INPUT_BUFFER_PADDING_SIZE); avctx->extradata_size = 5; put_audio_specific_config(avctx); sizes[0] = swb_size_1024[i]; sizes[1] = swb_size_128[i]; lengths[0] = ff_aac_num_swb_1024[i]; lengths[1] = ff_aac_num_swb_128[i]; ff_psy_init(&s->psy, avctx, 2, sizes, lengths, s->chan_map[0], &s->chan_map[1]); s->psypp = ff_psy_preprocess_init(avctx); s->coder = &ff_aac_coders[2]; s->lambda = avctx->global_quality ? avctx->global_quality : 120; ff_aac_tableinit(); return 0; }
static av_cold int decode_init(AVCodecContext * avctx) { NellyMoserDecodeContext *s = avctx->priv_data; int i; s->avctx = avctx; av_init_random(0, &s->random_state); ff_mdct_init(&s->imdct_ctx, 8, 1); dsputil_init(&s->dsp, avctx); if(s->dsp.float_to_int16 == ff_float_to_int16_c) { s->add_bias = 385; s->scale_bias = 1.0/(8*32768); } else { s->add_bias = 0; s->scale_bias = 1.0/(1*8); } /* Generate overlap window */ if (!sine_window[0]) for (i=0 ; i<128; i++) { sine_window[i] = sin((i + 0.5) / 256.0 * M_PI); } return 0; }
static av_cold int decode_init(AVCodecContext * avctx) { NellyMoserDecodeContext *s = avctx->priv_data; s->avctx = avctx; av_lfg_init(&s->random_state, ff_random_get_seed()); ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0); dsputil_init(&s->dsp, avctx); if(s->dsp.float_to_int16 == ff_float_to_int16_c) { s->add_bias = 385; s->scale_bias = 1.0/(8*32768); } else { s->add_bias = 0; s->scale_bias = 1.0/(1*8); } /* Generate overlap window */ if (!ff_sine_128[127]) ff_sine_window_init(ff_sine_128, 128); avctx->sample_fmt = SAMPLE_FMT_S16; avctx->channel_layout = CH_LAYOUT_MONO; return 0; }
static av_cold int decode_init(AVCodecContext * avctx) { NellyMoserDecodeContext *s = avctx->priv_data; s->avctx = avctx; av_lfg_init(&s->random_state, 0); ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0); dsputil_init(&s->dsp, avctx); if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) { s->scale_bias = 1.0/(32768*8); avctx->sample_fmt = AV_SAMPLE_FMT_FLT; } else { s->scale_bias = 1.0/(1*8); avctx->sample_fmt = AV_SAMPLE_FMT_S16; ff_fmt_convert_init(&s->fmt_conv, avctx); s->float_buf = av_mallocz(NELLY_SAMPLES * sizeof(*s->float_buf)); if (!s->float_buf) { av_log(avctx, AV_LOG_ERROR, "error allocating float buffer\n"); return AVERROR(ENOMEM); } } /* Generate overlap window */ if (!ff_sine_128[127]) ff_init_ff_sine_windows(7); avctx->channel_layout = AV_CH_LAYOUT_MONO; avcodec_get_frame_defaults(&s->frame); avctx->coded_frame = &s->frame; return 0; }
static int idcin_decode_init(AVCodecContext *avctx) { IdcinContext *s = (IdcinContext *)avctx->priv_data; int i, j, histogram_index = 0; unsigned char *histograms; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_PAL8; avctx->has_b_frames = 0; dsputil_init(&s->dsp, avctx); /* make sure the Huffman tables make it */ if (s->avctx->extradata_size != HUFFMAN_TABLE_SIZE) { av_log(s->avctx, AV_LOG_ERROR, " Id CIN video: expected extradata size of %d\n", HUFFMAN_TABLE_SIZE); return -1; } /* build the 256 Huffman decode trees */ histograms = (unsigned char *)s->avctx->extradata; for (i = 0; i < 256; i++) { for(j = 0; j < HUF_TOKENS; j++) s->huff_nodes[i][j].count = histograms[histogram_index++]; huff_build_tree(s, i); } s->frame.data[0] = NULL; return 0; }
int H264DecWrapper::Initialize() { codec = &h264_decoder; avcodec_init(); c = avcodec_alloc_context(); picture = avcodec_alloc_frame(); if(codec->capabilities&CODEC_CAP_TRUNCATED) { c->flags |= CODEC_FLAG_TRUNCATED; } if (avcodec_open(c, codec) < 0) { fprintf(stderr, "could not open codec\n"); return -1; } h = (H264Context*)c->priv_data; s = &h->s; s->dsp.idct_permutation_type = 1; dsputil_init(&s->dsp, c); return 0; }
int MPV_common_init(MpegEncContext *s) { int y_size, c_size, yc_size, mb_array_size, mv_table_size; dsputil_init(&s->dsp, s->avctx); s->mb_width = (s->width + 15) / 16; s->mb_height = (s->height + 15) / 16; s->mb_stride = s->mb_width + 1; s->b8_stride = s->mb_width*2 + 1; s->b4_stride = s->mb_width*4 + 1; mb_array_size= s->mb_height * s->mb_stride; mv_table_size= (s->mb_height+2) * s->mb_stride + 1; /* set default edge pos, will be overriden in decode_header if needed */ s->h_edge_pos= s->mb_width*16; s->v_edge_pos= s->mb_height*16; s->mb_num = s->mb_width * s->mb_height; y_size = s->b8_stride * (2 * s->mb_height + 1); c_size = s->mb_stride * (s->mb_height + 1); yc_size = y_size + 2 * c_size; s->avctx->coded_frame= (AVFrame*)&s->current_picture; s->picture= av_mallocz( MAX_PICTURE_COUNT * sizeof(Picture)); s->context_initialized = 1; return 0; }
static int dvvideo_decode_init(AVCodecContext *avctx) { DVVideoDecodeContext *s = avctx->priv_data; MpegEncContext s2; static int done=0; if (!done) { int i; done = 1; /* NOTE: as a trick, we use the fact the no codes are unused to accelerate the parsing of partial codes */ init_vlc(&dv_vlc, TEX_VLC_BITS, NB_DV_VLC, dv_vlc_len, 1, 1, dv_vlc_bits, 2, 2); dv_rl_vlc[0] = av_malloc(dv_vlc.table_size * sizeof(RL_VLC_ELEM)); for(i = 0; i < dv_vlc.table_size; i++){ int code= dv_vlc.table[i][0]; int len = dv_vlc.table[i][1]; int level, run; if(len<0){ //more bits needed run= 0; level= code; } else if (code == (NB_DV_VLC - 1)) { /* EOB */ run = 0; level = 256; } else { run= dv_vlc_run[code] + 1; level= dv_vlc_level[code]; } dv_rl_vlc[0][i].len = len; dv_rl_vlc[0][i].level = level; dv_rl_vlc[0][i].run = run; } } /* ugly way to get the idct & scantable */ /* XXX: fix it */ memset(&s2, 0, sizeof(MpegEncContext)); s2.avctx = avctx; dsputil_init(&s2.dsp, avctx); if (DCT_common_init(&s2) < 0) return -1; s->idct_put[0] = s2.dsp.idct_put; memcpy(s->idct_permutation, s2.dsp.idct_permutation, 64); memcpy(s->dv_zigzag[0], s2.intra_scantable.permutated, 64); /* XXX: use MMX also for idct248 */ s->idct_put[1] = simple_idct248_put; memcpy(s->dv_zigzag[1], dv_248_zigzag, 64); /* XXX: do it only for constant case */ dv_build_unquantize_tables(s); return 0; }
static av_cold int dca_decode_init(AVCodecContext * avctx) { DCAContext *s = avctx->priv_data; int i; s->avctx = avctx; dca_init_vlcs(); dsputil_init(&s->dsp, avctx); ff_mdct_init(&s->imdct, 6, 1, 1.0); for(i = 0; i < 6; i++) s->samples_chanptr[i] = s->samples + i * 256; avctx->sample_fmt = SAMPLE_FMT_S16; if(s->dsp.float_to_int16_interleave == ff_float_to_int16_interleave_c) { s->add_bias = 385.0f; s->scale_bias = 1.0 / 32768.0; } else { s->add_bias = 0.0f; s->scale_bias = 1.0; /* allow downmixing to stereo */ if (avctx->channels > 0 && avctx->request_channels < avctx->channels && avctx->request_channels == 2) { avctx->channels = avctx->request_channels; } } return 0; }
static int config_input(AVFilterLink *inlink) { SelectContext *select = inlink->dst->priv; select->var_values[VAR_N] = 0.0; select->var_values[VAR_SELECTED_N] = 0.0; select->var_values[VAR_TB] = av_q2d(inlink->time_base); select->var_values[VAR_PREV_PTS] = NAN; select->var_values[VAR_PREV_SELECTED_PTS] = NAN; select->var_values[VAR_PREV_SELECTED_T] = NAN; select->var_values[VAR_START_PTS] = NAN; select->var_values[VAR_START_T] = NAN; select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I; select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P; select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B; select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI; select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP; select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P; select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T; select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B; if (select->do_scene_detect) { select->avctx = avcodec_alloc_context3(NULL); if (!select->avctx) return AVERROR(ENOMEM); dsputil_init(&select->c, select->avctx); } return 0; }
static av_cold int imc_decode_init(AVCodecContext * avctx) { int i, j; IMCContext *q = avctx->priv_data; double r1, r2; q->decoder_reset = 1; for(i = 0; i < BANDS; i++) q->old_floor[i] = 1.0; /* Build mdct window, a simple sine window normalized with sqrt(2) */ ff_sine_window_init(q->mdct_sine_window, COEFFS); for(i = 0; i < COEFFS; i++) q->mdct_sine_window[i] *= sqrt(2.0); for(i = 0; i < COEFFS/2; i++){ q->post_cos[i] = cos(i / 256.0 * M_PI); q->post_sin[i] = sin(i / 256.0 * M_PI); r1 = sin((i * 4.0 + 1.0) / 1024.0 * M_PI); r2 = cos((i * 4.0 + 1.0) / 1024.0 * M_PI); if (i & 0x1) { q->pre_coef1[i] = (r1 + r2) * sqrt(2.0); q->pre_coef2[i] = -(r1 - r2) * sqrt(2.0); } else { q->pre_coef1[i] = -(r1 + r2) * sqrt(2.0); q->pre_coef2[i] = (r1 - r2) * sqrt(2.0); } q->last_fft_im[i] = 0; } /* Generate a square root table */ for(i = 0; i < 30; i++) { q->sqrt_tab[i] = sqrt(i); } /* initialize the VLC tables */ for(i = 0; i < 4 ; i++) { for(j = 0; j < 4; j++) { huffman_vlc[i][j].table = vlc_tables[vlc_offsets[i * 4 + j]]; huffman_vlc[i][j].table_allocated = vlc_offsets[i * 4 + j + 1] - vlc_offsets[i * 4 + j]; init_vlc(&huffman_vlc[i][j], 9, imc_huffman_sizes[i], imc_huffman_lens[i][j], 1, 1, imc_huffman_bits[i][j], 2, 2, INIT_VLC_USE_NEW_STATIC); } } q->one_div_log2 = 1/log(2); ff_fft_init(&q->fft, 7, 1); dsputil_init(&q->dsp, avctx); avctx->sample_fmt = SAMPLE_FMT_S16; return 0; }
int main(int argc, char **argv) { AVCodecContext *ctx; int c; DSPContext cctx, mmxctx; int flags[2] = { AV_CPU_FLAG_MMX, AV_CPU_FLAG_MMX2 }; int flags_size = HAVE_MMX2 ? 2 : 1; for(;;) { c = getopt(argc, argv, "h"); if (c == -1) break; switch(c) { case 'h': help(); break; } } printf("ffmpeg motion test\n"); ctx = avcodec_alloc_context(); ctx->dsp_mask = AV_CPU_FLAG_FORCE; dsputil_init(&cctx, ctx); for (c = 0; c < flags_size; c++) { int x; ctx->dsp_mask = AV_CPU_FLAG_FORCE | flags[c]; dsputil_init(&mmxctx, ctx); for (x = 0; x < 2; x++) { printf("%s for %dx%d pixels\n", c ? "mmx2" : "mmx", x ? 8 : 16, x ? 8 : 16); test_motion("mmx", mmxctx.pix_abs[x][0], cctx.pix_abs[x][0]); test_motion("mmx_x2", mmxctx.pix_abs[x][1], cctx.pix_abs[x][1]); test_motion("mmx_y2", mmxctx.pix_abs[x][2], cctx.pix_abs[x][2]); test_motion("mmx_xy2", mmxctx.pix_abs[x][3], cctx.pix_abs[x][3]); } } av_free(ctx); return 0; }
static av_cold int aac_encode_init(AVCodecContext *avctx) { AACEncContext *s = avctx->priv_data; int i; const uint8_t *sizes[2]; int lengths[2]; avctx->frame_size = 1024; for(i = 0; i < 16; i++) if(avctx->sample_rate == ff_mpeg4audio_sample_rates[i]) break; if(i == 16){ av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n", avctx->sample_rate); return -1; } if(avctx->channels > 6){ av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", avctx->channels); return -1; } s->samplerate_index = i; dsputil_init(&s->dsp, avctx); ff_mdct_init(&s->mdct1024, 11, 0, 1.0); ff_mdct_init(&s->mdct128, 8, 0, 1.0); // window init ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); ff_sine_window_init(ff_sine_1024, 1024); ff_sine_window_init(ff_sine_128, 128); s->samples = av_malloc(2 * 1024 * avctx->channels * sizeof(s->samples[0])); s->cpe = av_mallocz(sizeof(ChannelElement) * aac_chan_configs[avctx->channels-1][0]); avctx->extradata = av_malloc(2); avctx->extradata_size = 2; put_audio_specific_config(avctx); sizes[0] = swb_size_1024[i]; sizes[1] = swb_size_128[i]; lengths[0] = ff_aac_num_swb_1024[i]; lengths[1] = ff_aac_num_swb_128[i]; ff_psy_init(&s->psy, avctx, 2, sizes, lengths); s->psypp = ff_psy_preprocess_init(avctx); s->coder = &ff_aac_coders[0]; s->lambda = avctx->global_quality ? avctx->global_quality : 120; #if !CONFIG_HARDCODED_TABLES for (i = 0; i < 428; i++) ff_aac_pow2sf_tab[i] = pow(2, (i - 200)/4.); #endif /* CONFIG_HARDCODED_TABLES */ if (avctx->channels > 5) av_log(avctx, AV_LOG_ERROR, "This encoder does not yet enforce the restrictions on LFEs. " "The output will most likely be an illegal bitstream.\n"); return 0; }
static av_cold int dnxhd_decode_init(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ctx->avctx = avctx; dsputil_init(&ctx->dsp, avctx); avctx->coded_frame = &ctx->picture; ctx->picture.type = FF_I_TYPE; return 0; }
static int imc_decode_init(AVCodecContext * avctx) { int i, j; IMCContext *q = avctx->priv_data; double r1, r2; q->decoder_reset = 1; for(i = 0; i < BANDS; i++) q->old_floor[i] = 1.0; /* Build mdct window, a simple sine window normalized with sqrt(2) */ for(i = 0; i < COEFFS; i++) q->mdct_sine_window[i] = sin((i + 0.5) / 512.0 * M_PI) * sqrt(2.0); for(i = 0; i < COEFFS/2; i++){ q->post_cos[i] = cos(i / 256.0 * M_PI); q->post_sin[i] = sin(i / 256.0 * M_PI); r1 = sin((i * 4.0 + 1.0) / 1024.0 * M_PI); r2 = cos((i * 4.0 + 1.0) / 1024.0 * M_PI); if (i & 0x1) { q->pre_coef1[i] = (r1 + r2) * sqrt(2.0); q->pre_coef2[i] = -(r1 - r2) * sqrt(2.0); } else { q->pre_coef1[i] = -(r1 + r2) * sqrt(2.0); q->pre_coef2[i] = (r1 - r2) * sqrt(2.0); } q->last_fft_im[i] = 0; } /* Generate a square root table */ for(i = 0; i < 30; i++) { q->sqrt_tab[i] = sqrt(i); } /* initialize the VLC tables */ for(i = 0; i < 4 ; i++) { for(j = 0; j < 4; j++) { init_vlc (&q->huffman_vlc[i][j], 9, imc_huffman_sizes[i], imc_huffman_lens[i][j], 1, 1, imc_huffman_bits[i][j], 2, 2, 1); } } q->one_div_log2 = 1/log(2); ff_fft_init(&q->fft, 7, 1); dsputil_init(&q->dsp, avctx); return 0; }
static av_cold int tgq_decode_init(AVCodecContext *avctx){ TgqContext *s = avctx->priv_data; s->avctx = avctx; if(avctx->idct_algo==FF_IDCT_AUTO) avctx->idct_algo=FF_IDCT_EA; dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); avctx->time_base = (AVRational){1, 15}; avctx->pix_fmt = PIX_FMT_YUV420P; return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { MadContext *t = avctx->priv_data; MpegEncContext *s = &t->s; s->avctx = avctx; avctx->pix_fmt = PIX_FMT_YUV420P; if (avctx->idct_algo == FF_IDCT_AUTO) avctx->idct_algo = FF_IDCT_EA; dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); ff_mpeg12_init_vlcs(); return 0; }
static void common_init(AVCodecContext *avctx){ ASV1Context * const a = avctx->priv_data; dsputil_init(&a->dsp, avctx); a->mb_width = (avctx->width + 15) / 16; a->mb_height = (avctx->height + 15) / 16; a->mb_width2 = (avctx->width + 0) / 16; a->mb_height2 = (avctx->height + 0) / 16; avctx->coded_frame= (AVFrame*)&a->picture; a->avctx= avctx; }
static av_cold int common_init(AVCodecContext *avctx){ HYuvContext *s = avctx->priv_data; s->avctx= avctx; s->flags= avctx->flags; dsputil_init(&s->dsp, avctx); s->width= avctx->width; s->height= avctx->height; assert(s->width>0 && s->height>0); return 0; }
static av_cold int mp_decode_init(AVCodecContext *avctx) { MotionPixelsContext *mp = avctx->priv_data; motionpixels_tableinit(); mp->avctx = avctx; dsputil_init(&mp->dsp, avctx); mp->changes_map = av_mallocz(avctx->width * avctx->height); mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1; mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel)); mp->hpt = av_mallocz(avctx->height * avctx->width / 16 * sizeof(YuvPixel)); avctx->pix_fmt = PIX_FMT_RGB555; return 0; }
static int config_props(AVFilterLink *link) { DeshakeContext *deshake = link->dst->priv; deshake->ref = NULL; deshake->last.vector.x = 0; deshake->last.vector.y = 0; deshake->last.angle = 0; deshake->last.zoom = 0; deshake->avctx = avcodec_alloc_context3(NULL); dsputil_init(&deshake->c, deshake->avctx); return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { NuvContext *c = avctx->priv_data; avctx->pix_fmt = PIX_FMT_YUV420P; c->pic.data[0] = NULL; c->decomp_buf = NULL; c->quality = -1; c->width = 0; c->height = 0; c->codec_frameheader = avctx->codec_tag == MKTAG('R', 'J', 'P', 'G'); if (avctx->extradata_size) get_quant(avctx, c, avctx->extradata, avctx->extradata_size); dsputil_init(&c->dsp, avctx); if (!codec_reinit(avctx, avctx->width, avctx->height, -1)) return 1; return 0; }
static av_cold int mimic_decode_init(AVCodecContext *avctx) { MimicContext *ctx = avctx->priv_data; ctx->prev_index = 0; ctx->cur_index = 15; if(init_vlc(&ctx->vlc, 11, sizeof(huffbits)/sizeof(huffbits[0]), huffbits, 1, 1, huffcodes, 4, 4, 0)) { av_log(avctx, AV_LOG_ERROR, "error initializing vlc table\n"); return -1; } dsputil_init(&ctx->dsp, avctx); ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, col_zag); return 0; }
static av_cold int truespeech_decode_init(AVCodecContext * avctx) { TSContext *c = avctx->priv_data; if (avctx->channels != 1) { av_log_ask_for_sample(avctx, "Unsupported channel count: %d\n", avctx->channels); return AVERROR(EINVAL); } avctx->sample_fmt = AV_SAMPLE_FMT_S16; dsputil_init(&c->dsp, avctx); avcodec_get_frame_defaults(&c->frame); avctx->coded_frame = &c->frame; return 0; }
static int common_init(AVCodecContext *avctx){ HYuvContext *s = avctx->priv_data; int i; s->avctx= avctx; s->flags= avctx->flags; dsputil_init(&s->dsp, avctx); s->width= avctx->width; s->height= avctx->height; assert(s->width>0 && s->height>0); for(i=0; i<3; i++){ s->temp[i]= av_malloc(avctx->width + 16); } return 0; }
static av_cold int dca_decode_init(AVCodecContext * avctx) { DCAContext *s = avctx->priv_data; s->avctx = avctx; dca_init_vlcs(); pre_calc_cosmod(s); dsputil_init(&s->dsp, avctx); /* allow downmixing to stereo */ if (avctx->channels > 0 && avctx->request_channels < avctx->channels && avctx->request_channels == 2) { avctx->channels = avctx->request_channels; } return 0; }
static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s) { int ret = 0; dsputil_init(&s->dsp, avctx); // window init ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); ff_init_ff_sine_windows(10); ff_init_ff_sine_windows(7); if (ret = ff_mdct_init(&s->mdct1024, 11, 0, 32768.0)) return ret; if (ret = ff_mdct_init(&s->mdct128, 8, 0, 32768.0)) return ret; return 0; }
static av_cold int aac_encode_init(AVCodecContext *avctx) { AACEncContext *s = avctx->priv_data; int i; avctx->frame_size = 1024; for(i = 0; i < 16; i++) if(avctx->sample_rate == ff_mpeg4audio_sample_rates[i]) break; if(i == 16){ av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate %d\n", avctx->sample_rate); return -1; } if(avctx->channels > 6){ av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels: %d\n", avctx->channels); return -1; } s->samplerate_index = i; dsputil_init(&s->dsp, avctx); ff_mdct_init(&s->mdct1024, 11, 0); ff_mdct_init(&s->mdct128, 8, 0); // window init ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024); ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128); ff_sine_window_init(ff_sine_1024, 1024); ff_sine_window_init(ff_sine_128, 128); s->samples = av_malloc(2 * 1024 * avctx->channels * sizeof(s->samples[0])); s->cpe = av_mallocz(sizeof(ChannelElement) * aac_chan_configs[avctx->channels-1][0]); if(ff_aac_psy_init(&s->psy, avctx, AAC_PSY_3GPP, aac_chan_configs[avctx->channels-1][0], 0, swb_size_1024[i], ff_aac_num_swb_1024[i], swb_size_128[i], ff_aac_num_swb_128[i]) < 0){ av_log(avctx, AV_LOG_ERROR, "Cannot initialize selected model.\n"); return -1; } avctx->extradata = av_malloc(2); avctx->extradata_size = 2; put_audio_specific_config(avctx); return 0; }
static av_cold int decode_init(AVCodecContext * avctx) { NellyMoserDecodeContext *s = avctx->priv_data; s->avctx = avctx; av_lfg_init(&s->random_state, 0); ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0); dsputil_init(&s->dsp, avctx); ff_fmt_convert_init(&s->fmt_conv, avctx); s->scale_bias = 1.0/(1*8); /* Generate overlap window */ if (!ff_sine_128[127]) ff_init_ff_sine_windows(7); avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->channel_layout = AV_CH_LAYOUT_MONO; return 0; }
void *CreateDecoder() { struct decoder_handle *hdec; InitDecoder(); hdec = malloc(sizeof(struct decoder_handle)); if(!hdec) return NULL; hdec->tag = 'HDEC'; hdec->c = avcodec_alloc_context(); if(!hdec->c) goto c2; //if(h264_decoder.capabilities & CODEC_CAP_TRUNCATED) // hdec->c->flags |= CODEC_FLAG_TRUNCATED; if(avcodec_open(hdec->c, &h264_decoder) < 0) { av_free(hdec->c); return NULL; } { H264Context *h = hdec->c->priv_data; MpegEncContext *s = &h->s; s->dsp.idct_permutation_type = 1; dsputil_init(&s->dsp, hdec->c); } hdec->picture = avcodec_alloc_frame(); if(!hdec->picture) goto c1; return hdec; //av_free(picture); c1: av_free(hdec->c); c2: free(hdec); return NULL; }