Esempio n. 1
0
static av_cold int mp_decode_init(AVCodecContext *avctx)
{
    MotionPixelsContext *mp = avctx->priv_data;
    int w4 = (avctx->width  + 3) & ~3;
    int h4 = (avctx->height + 3) & ~3;

    if(avctx->extradata_size < 2) {
        av_log(avctx, AV_LOG_ERROR, "extradata too small\n");
        return AVERROR_INVALIDDATA;
    }

    motionpixels_tableinit();
    mp->avctx = avctx;
    ff_dsputil_init(&mp->dsp, avctx);
    mp->changes_map = av_mallocz(avctx->width * h4);
    mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1;
    mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel));
    mp->hpt = av_mallocz(h4 * w4 / 16 * sizeof(YuvPixel));
    if (!mp->changes_map || !mp->vpt || !mp->hpt) {
        av_freep(&mp->changes_map);
        av_freep(&mp->vpt);
        av_freep(&mp->hpt);
        return AVERROR(ENOMEM);
    }
    avctx->pix_fmt = AV_PIX_FMT_RGB555;

    mp->frame = av_frame_alloc();
    if (!mp->frame) {
        mp_decode_end(avctx);
        return AVERROR(ENOMEM);
    }

    return 0;
}
Esempio n. 2
0
av_cold int ffv1_common_init(AVCodecContext *avctx)
{
    FFV1Context *s = avctx->priv_data;

    if (!avctx->width || !avctx->height)
        return AVERROR_INVALIDDATA;

    s->avctx = avctx;
    s->flags = avctx->flags;

    s->picture.f = avcodec_alloc_frame();
    s->last_picture.f = av_frame_alloc();
    if (!s->picture.f || !s->last_picture.f)
        return AVERROR(ENOMEM);
    ff_dsputil_init(&s->dsp, avctx);

    s->width  = avctx->width;
    s->height = avctx->height;

    // defaults
    s->num_h_slices = 1;
    s->num_v_slices = 1;

    return 0;
}
Esempio n. 3
0
static av_cold int decode_init(AVCodecContext * avctx) {
    NellyMoserDecodeContext *s = avctx->priv_data;

    s->avctx = avctx;
    s->imdct_out = s->imdct_buf[0];
    s->imdct_prev = s->imdct_buf[1];
    av_lfg_init(&s->random_state, 0);
    ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0);

    ff_dsputil_init(&s->dsp, avctx);

    s->scale_bias = 1.0/(32768*8);
    avctx->sample_fmt = AV_SAMPLE_FMT_FLT;

    /* Generate overlap window */
    if (!ff_sine_128[127])
        ff_init_ff_sine_windows(7);

    avctx->channels       = 1;
    avctx->channel_layout = AV_CH_LAYOUT_MONO;

    avcodec_get_frame_defaults(&s->frame);
    avctx->coded_frame = &s->frame;

    return 0;
}
Esempio n. 4
0
static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
{
    int ret;

    ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance,
                    avpriv_mjpeg_val_dc, 12, 0);
    if (ret)
        return ret;
    ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance,
                    avpriv_mjpeg_val_dc, 12, 0);
    if (ret)
        return ret;
    ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance,
                    avpriv_mjpeg_val_ac_luminance, 251, 1);
    if (ret)
        return ret;
    ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance,
                    avpriv_mjpeg_val_ac_chrominance, 251, 1);
    if (ret)
        return ret;

    ff_dsputil_init(&c->dsp, avctx);
    ff_init_scantable(c->dsp.idct_permutation, &c->scantable,
                      ff_zigzag_direct);

    return 0;
}
Esempio n. 5
0
int main(int argc, char **argv)
{
    AVCodecContext *ctx;
    int c;
    DSPContext cctx, mmxctx;
    int flags[2] = { AV_CPU_FLAG_MMX, AV_CPU_FLAG_MMX2 };
    int flags_size = HAVE_MMX2 ? 2 : 1;

    for(;;) {
        c = getopt(argc, argv, "h");
        if (c == -1)
            break;
        switch(c) {
        case 'h':
            help();
            return 1;
        }
    }

    printf("Libav motion test\n");

    ctx = avcodec_alloc_context3(NULL);
    ctx->dsp_mask = AV_CPU_FLAG_FORCE;
    ff_dsputil_init(&cctx, ctx);
    for (c = 0; c < flags_size; c++) {
        int x;
        ctx->dsp_mask = AV_CPU_FLAG_FORCE | flags[c];
        ff_dsputil_init(&mmxctx, ctx);

        for (x = 0; x < 2; x++) {
            printf("%s for %dx%d pixels\n", c ? "mmx2" : "mmx",
                   x ? 8 : 16, x ? 8 : 16);
            test_motion("mmx",     mmxctx.pix_abs[x][0], cctx.pix_abs[x][0]);
            test_motion("mmx_x2",  mmxctx.pix_abs[x][1], cctx.pix_abs[x][1]);
            test_motion("mmx_y2",  mmxctx.pix_abs[x][2], cctx.pix_abs[x][2]);
            test_motion("mmx_xy2", mmxctx.pix_abs[x][3], cctx.pix_abs[x][3]);
        }
    }
    av_free(ctx);

    return 0;
}
Esempio n. 6
0
static av_cold int decode_init(AVCodecContext *avctx)
{
    MadContext *s = avctx->priv_data;
    s->avctx = avctx;
    avctx->pix_fmt = AV_PIX_FMT_YUV420P;
    ff_dsputil_init(&s->dsp, avctx);
    ff_init_scantable_permutation(s->dsp.idct_permutation, FF_NO_IDCT_PERM);
    ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
    ff_mpeg12_init_vlcs();
    return 0;
}
Esempio n. 7
0
av_cold void ff_asv_common_init(AVCodecContext *avctx) {
    ASV1Context * const a = avctx->priv_data;

    ff_dsputil_init(&a->dsp, avctx);

    a->mb_width   = (avctx->width  + 15) / 16;
    a->mb_height  = (avctx->height + 15) / 16;
    a->mb_width2  = (avctx->width  + 0) / 16;
    a->mb_height2 = (avctx->height + 0) / 16;

    a->avctx= avctx;
}
Esempio n. 8
0
File: eamad.c Progetto: Arcen/libav
static av_cold int decode_init(AVCodecContext *avctx)
{
    MadContext *t = avctx->priv_data;
    MpegEncContext *s = &t->s;
    s->avctx = avctx;
    avctx->pix_fmt = PIX_FMT_YUV420P;
    if (avctx->idct_algo == FF_IDCT_AUTO)
        avctx->idct_algo = FF_IDCT_EA;
    ff_dsputil_init(&s->dsp, avctx);
    ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
    ff_mpeg12_init_vlcs();
    return 0;
}
Esempio n. 9
0
int main(int argc, char **argv)
{
    AVCodecContext *ctx;
    int c;
    DSPContext cctx, mmxctx;
    int flags[2] = { AV_CPU_FLAG_MMX, AV_CPU_FLAG_MMXEXT };
    int flags_size = HAVE_MMXEXT ? 2 : 1;

    if (argc > 1) {
        help();
        return 1;
    }

    printf("ffmpeg motion test\n");

    ctx = avcodec_alloc_context3(NULL);
    ctx->dsp_mask = AV_CPU_FLAG_FORCE;
    memset(&cctx, 0, sizeof(cctx));
    ff_dsputil_init(&cctx, ctx);
    for (c = 0; c < flags_size; c++) {
        int x;
        ctx->dsp_mask = AV_CPU_FLAG_FORCE | flags[c];
        memset(&mmxctx, 0, sizeof(mmxctx));
        ff_dsputil_init(&mmxctx, ctx);

        for (x = 0; x < 2; x++) {
            printf("%s for %dx%d pixels\n", c ? "mmx2" : "mmx",
                   x ? 8 : 16, x ? 8 : 16);
            test_motion("mmx",     mmxctx.pix_abs[x][0], cctx.pix_abs[x][0]);
            test_motion("mmx_x2",  mmxctx.pix_abs[x][1], cctx.pix_abs[x][1]);
            test_motion("mmx_y2",  mmxctx.pix_abs[x][2], cctx.pix_abs[x][2]);
            test_motion("mmx_xy2", mmxctx.pix_abs[x][3], cctx.pix_abs[x][3]);
        }
    }
    av_free(ctx);

    return 0;
}
Esempio n. 10
0
File: eatqi.c Progetto: AVbin/libav
static av_cold int tqi_decode_init(AVCodecContext *avctx)
{
    TqiContext *t = avctx->priv_data;
    MpegEncContext *s = &t->s;
    s->avctx = avctx;
    ff_dsputil_init(&s->dsp, avctx);
    ff_init_scantable_permutation(s->dsp.idct_permutation, FF_NO_IDCT_PERM);
    ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
    s->qscale = 1;
    avctx->time_base = (AVRational){1, 15};
    avctx->pix_fmt = AV_PIX_FMT_YUV420P;
    ff_mpeg12_init_vlcs();
    return 0;
}
Esempio n. 11
0
static av_cold int common_init(AVCodecContext *avctx){
    HYuvContext *s = avctx->priv_data;

    s->avctx= avctx;
    s->flags= avctx->flags;

    ff_dsputil_init(&s->dsp, avctx);

    s->width= avctx->width;
    s->height= avctx->height;
    assert(s->width>0 && s->height>0);

    return 0;
}
Esempio n. 12
0
av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
                                  int flip, int has_alpha)
{
    int i;

    s->avctx = avctx;
    avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;

    ff_dsputil_init(&s->dsp, avctx);
    ff_h264chroma_init(&s->h264chroma, 8);
    ff_videodsp_init(&s->vdsp, 8);
    ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
    ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id);
    ff_init_scantable_permutation(s->dsp.idct_permutation, s->vp3dsp.idct_perm);
    ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);

    for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
        s->frames[i] = av_frame_alloc();
        if (!s->frames[i]) {
            ff_vp56_free(avctx);
            return AVERROR(ENOMEM);
        }
    }
    s->edge_emu_buffer_alloc = NULL;

    s->above_blocks = NULL;
    s->macroblocks = NULL;
    s->quantizer = -1;
    s->deblock_filtering = 1;
    s->golden_frame = 0;

    s->filter = NULL;

    s->has_alpha = has_alpha;

    s->modelp = &s->model;

    if (flip) {
        s->flip = -1;
        s->frbi = 2;
        s->srbi = 0;
    } else {
        s->flip = 1;
        s->frbi = 0;
        s->srbi = 2;
    }

    return 0;
}
Esempio n. 13
0
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
{
    HYuvContext *s = avctx->priv_data;

    s->avctx = avctx;
    s->flags = avctx->flags;

    ff_dsputil_init(&s->dsp, avctx);
    ff_llviddsp_init(&s->llviddsp, avctx);

    s->width = avctx->width;
    s->height = avctx->height;

    av_assert1(s->width > 0 && s->height > 0);
}
Esempio n. 14
0
static av_cold int decode_init(AVCodecContext *avctx) {
    NuvContext *c = avctx->priv_data;
    avctx->pix_fmt = PIX_FMT_YUV420P;
    c->pic.data[0] = NULL;
    c->decomp_buf = NULL;
    c->quality = -1;
    c->width = 0;
    c->height = 0;
    c->codec_frameheader = avctx->codec_tag == MKTAG('R', 'J', 'P', 'G');
    if (avctx->extradata_size)
        get_quant(avctx, c, avctx->extradata, avctx->extradata_size);
    ff_dsputil_init(&c->dsp, avctx);
    if (codec_reinit(avctx, avctx->width, avctx->height, -1) < 0)
        return 1;
    return 0;
}
Esempio n. 15
0
static av_cold int mp_decode_init(AVCodecContext *avctx)
{
    MotionPixelsContext *mp = avctx->priv_data;
    int w4 = (avctx->width  + 3) & ~3;
    int h4 = (avctx->height + 3) & ~3;

    motionpixels_tableinit();
    mp->avctx = avctx;
    ff_dsputil_init(&mp->dsp, avctx);
    mp->changes_map = av_mallocz(avctx->width * h4);
    mp->offset_bits_len = av_log2(avctx->width * avctx->height) + 1;
    mp->vpt = av_mallocz(avctx->height * sizeof(YuvPixel));
    mp->hpt = av_mallocz(h4 * w4 / 16 * sizeof(YuvPixel));
    avctx->pix_fmt = PIX_FMT_RGB555;
    return 0;
}
Esempio n. 16
0
av_cold void ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
                                  int flip, int has_alpha)
{
    int i;

    s->avctx = avctx;
    avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;

    ff_dsputil_init(&s->dsp, avctx);
    ff_videodsp_init(&s->vdsp, 8);
    ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
    ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id);
    ff_init_scantable_permutation(s->dsp.idct_permutation, s->vp3dsp.idct_perm);
    ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);

    for (i=0; i<4; i++) {
        s->framep[i] = &s->frames[i];
        avcodec_get_frame_defaults(&s->frames[i]);
    }
    s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN];
    s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2];
    s->edge_emu_buffer_alloc = NULL;

    s->above_blocks = NULL;
    s->macroblocks = NULL;
    s->quantizer = -1;
    s->deblock_filtering = 1;
    s->golden_frame = 0;

    s->filter = NULL;

    s->has_alpha = has_alpha;

    s->modelp = &s->model;

    if (flip) {
        s->flip = -1;
        s->frbi = 2;
        s->srbi = 0;
    } else {
        s->flip = 1;
        s->frbi = 0;
        s->srbi = 2;
    }
}
Esempio n. 17
0
static av_cold int decode_init(AVCodecContext *avctx)
{
    ProresContext *ctx = avctx->priv_data;
    uint8_t idct_permutation[64];

    avctx->bits_per_raw_sample = 10;

    ff_dsputil_init(&ctx->dsp, avctx);
    ff_proresdsp_init(&ctx->prodsp, avctx);

    ff_init_scantable_permutation(idct_permutation,
                                  ctx->prodsp.idct_permutation_type);

    permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation);
    permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation);

    return 0;
}
Esempio n. 18
0
static av_cold int decode_init(AVCodecContext *avctx)
{
    MadContext *s = avctx->priv_data;
    s->avctx = avctx;
    avctx->pix_fmt = AV_PIX_FMT_YUV420P;
    ff_blockdsp_init(&s->bdsp, avctx);
    ff_bswapdsp_init(&s->bbdsp);
    ff_dsputil_init(&s->dsp, avctx);
    ff_init_scantable_permutation(s->dsp.idct_permutation, FF_NO_IDCT_PERM);
    ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
    ff_mpeg12_init_vlcs();

    s->last_frame = av_frame_alloc();
    if (!s->last_frame)
        return AVERROR(ENOMEM);

    return 0;
}
Esempio n. 19
0
static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
{
    int ret = 0;

    ff_dsputil_init(&s->dsp, avctx);

    // window init
    ff_kbd_window_init(ff_aac_kbd_long_1024, 4.0, 1024);
    ff_kbd_window_init(ff_aac_kbd_short_128, 6.0, 128);
    ff_init_ff_sine_windows(10);
    ff_init_ff_sine_windows(7);

    if (ret = ff_mdct_init(&s->mdct1024, 11, 0, 32768.0))
        return ret;
    if (ret = ff_mdct_init(&s->mdct128,   8, 0, 32768.0))
        return ret;

    return 0;
}
Esempio n. 20
0
void init_common(AVCodecContext *avctx, MscCodecContext *mscContext) {

	ff_dsputil_init(&mscContext->dsp, avctx);

	mscContext->mb_width   = (avctx->width  + 15) / 16;
	mscContext->mb_height  = (avctx->height + 15) / 16;

	initialize_model(&mscContext->arithModelIndexCodingModel, 4);
	initialize_model(&mscContext->lastZeroCodingModel, 6);
	/*
	 * Initialize arithmetic coder models. First model: arithModels[0],
	 * can store 0, 1 values (2^1). Second one arithModels[1], 0, 1, 2, 3
	 * (2^2) and so on.
	 */
	for (int i = 0; i < 10; ++i) {
		initialize_model(&mscContext->arithModels[i], i + 1);
		mscContext->arithModelAddValue[i] = pow(2, i) - 1;
	}

	mscContext->referenceFrame = avcodec_alloc_frame();
	if (!mscContext->referenceFrame) {
		av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
	}
}
Esempio n. 21
0
static av_cold int decode_init(AVCodecContext * avctx) {
    NellyMoserDecodeContext *s = avctx->priv_data;

    s->avctx = avctx;
    s->imdct_out = s->imdct_buf[0];
    s->imdct_prev = s->imdct_buf[1];
    av_lfg_init(&s->random_state, 0);
    ff_mdct_init(&s->imdct_ctx, 8, 1, 1.0);

    ff_dsputil_init(&s->dsp, avctx);

    if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) {
        s->scale_bias = 1.0/(32768*8);
        avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
    } else {
        s->scale_bias = 1.0/(1*8);
        avctx->sample_fmt = AV_SAMPLE_FMT_S16;
        ff_fmt_convert_init(&s->fmt_conv, avctx);
        s->float_buf = av_mallocz(NELLY_SAMPLES * sizeof(*s->float_buf));
        if (!s->float_buf) {
            av_log(avctx, AV_LOG_ERROR, "error allocating float buffer\n");
            return AVERROR(ENOMEM);
        }
    }

    /* Generate overlap window */
    if (!ff_sine_128[127])
        ff_init_ff_sine_windows(7);

    avctx->channel_layout = AV_CH_LAYOUT_MONO;

    avcodec_get_frame_defaults(&s->frame);
    avctx->coded_frame = &s->frame;

    return 0;
}
Esempio n. 22
0
static av_cold int mpc8_decode_init(AVCodecContext * avctx)
{
    int i;
    MPCContext *c = avctx->priv_data;
    GetBitContext gb;
    static int vlc_initialized = 0;
    int channels;

    static VLC_TYPE band_table[542][2];
    static VLC_TYPE q1_table[520][2];
    static VLC_TYPE q9up_table[524][2];
    static VLC_TYPE scfi0_table[1 << MPC8_SCFI0_BITS][2];
    static VLC_TYPE scfi1_table[1 << MPC8_SCFI1_BITS][2];
    static VLC_TYPE dscf0_table[560][2];
    static VLC_TYPE dscf1_table[598][2];
    static VLC_TYPE q3_0_table[512][2];
    static VLC_TYPE q3_1_table[516][2];
    static VLC_TYPE codes_table[5708][2];

    if(avctx->extradata_size < 2){
        av_log(avctx, AV_LOG_ERROR, "Too small extradata size (%i)!\n", avctx->extradata_size);
        return -1;
    }
    memset(c->oldDSCF, 0, sizeof(c->oldDSCF));
    av_lfg_init(&c->rnd, 0xDEADBEEF);
    ff_dsputil_init(&c->dsp, avctx);
    ff_mpadsp_init(&c->mpadsp);

    ff_mpc_init();

    init_get_bits(&gb, avctx->extradata, 16);

    skip_bits(&gb, 3);//sample rate
    c->maxbands = get_bits(&gb, 5) + 1;
    if (c->maxbands >= BANDS) {
        av_log(avctx,AV_LOG_ERROR, "maxbands %d too high\n", c->maxbands);
        return AVERROR_INVALIDDATA;
    }
    channels = get_bits(&gb, 4) + 1;
    if (channels > 2) {
        av_log_missing_feature(avctx, "Multichannel MPC SV8", 1);
        return -1;
    }
    c->MSS = get_bits1(&gb);
    c->frames = 1 << (get_bits(&gb, 3) * 2);

    avctx->sample_fmt = AV_SAMPLE_FMT_S16;
    avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
    avctx->channels = channels;

    avcodec_get_frame_defaults(&c->frame);
    avctx->coded_frame = &c->frame;

    if(vlc_initialized) return 0;
    av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n");

    band_vlc.table = band_table;
    band_vlc.table_allocated = 542;
    init_vlc(&band_vlc, MPC8_BANDS_BITS, MPC8_BANDS_SIZE,
             mpc8_bands_bits,  1, 1,
             mpc8_bands_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);

    q1_vlc.table = q1_table;
    q1_vlc.table_allocated = 520;
    init_vlc(&q1_vlc, MPC8_Q1_BITS, MPC8_Q1_SIZE,
             mpc8_q1_bits,  1, 1,
             mpc8_q1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
    q9up_vlc.table = q9up_table;
    q9up_vlc.table_allocated = 524;
    init_vlc(&q9up_vlc, MPC8_Q9UP_BITS, MPC8_Q9UP_SIZE,
             mpc8_q9up_bits,  1, 1,
             mpc8_q9up_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);

    scfi_vlc[0].table = scfi0_table;
    scfi_vlc[0].table_allocated = 1 << MPC8_SCFI0_BITS;
    init_vlc(&scfi_vlc[0], MPC8_SCFI0_BITS, MPC8_SCFI0_SIZE,
             mpc8_scfi0_bits,  1, 1,
             mpc8_scfi0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
    scfi_vlc[1].table = scfi1_table;
    scfi_vlc[1].table_allocated = 1 << MPC8_SCFI1_BITS;
    init_vlc(&scfi_vlc[1], MPC8_SCFI1_BITS, MPC8_SCFI1_SIZE,
             mpc8_scfi1_bits,  1, 1,
             mpc8_scfi1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);

    dscf_vlc[0].table = dscf0_table;
    dscf_vlc[0].table_allocated = 560;
    init_vlc(&dscf_vlc[0], MPC8_DSCF0_BITS, MPC8_DSCF0_SIZE,
             mpc8_dscf0_bits,  1, 1,
             mpc8_dscf0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
    dscf_vlc[1].table = dscf1_table;
    dscf_vlc[1].table_allocated = 598;
    init_vlc(&dscf_vlc[1], MPC8_DSCF1_BITS, MPC8_DSCF1_SIZE,
             mpc8_dscf1_bits,  1, 1,
             mpc8_dscf1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);

    q3_vlc[0].table = q3_0_table;
    q3_vlc[0].table_allocated = 512;
    ff_init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE,
             mpc8_q3_bits,  1, 1,
             mpc8_q3_codes, 1, 1,
             mpc8_q3_syms,  1, 1, INIT_VLC_USE_NEW_STATIC);
    q3_vlc[1].table = q3_1_table;
    q3_vlc[1].table_allocated = 516;
    ff_init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE,
             mpc8_q4_bits,  1, 1,
             mpc8_q4_codes, 1, 1,
             mpc8_q4_syms,  1, 1, INIT_VLC_USE_NEW_STATIC);

    for(i = 0; i < 2; i++){
        res_vlc[i].table = &codes_table[vlc_offsets[0+i]];
        res_vlc[i].table_allocated = vlc_offsets[1+i] - vlc_offsets[0+i];
        init_vlc(&res_vlc[i], MPC8_RES_BITS, MPC8_RES_SIZE,
                 &mpc8_res_bits[i],  1, 1,
                 &mpc8_res_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);

        q2_vlc[i].table = &codes_table[vlc_offsets[2+i]];
        q2_vlc[i].table_allocated = vlc_offsets[3+i] - vlc_offsets[2+i];
        init_vlc(&q2_vlc[i], MPC8_Q2_BITS, MPC8_Q2_SIZE,
                 &mpc8_q2_bits[i],  1, 1,
                 &mpc8_q2_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);

        quant_vlc[0][i].table = &codes_table[vlc_offsets[4+i]];
        quant_vlc[0][i].table_allocated = vlc_offsets[5+i] - vlc_offsets[4+i];
        init_vlc(&quant_vlc[0][i], MPC8_Q5_BITS, MPC8_Q5_SIZE,
                 &mpc8_q5_bits[i],  1, 1,
                 &mpc8_q5_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
        quant_vlc[1][i].table = &codes_table[vlc_offsets[6+i]];
        quant_vlc[1][i].table_allocated = vlc_offsets[7+i] - vlc_offsets[6+i];
        init_vlc(&quant_vlc[1][i], MPC8_Q6_BITS, MPC8_Q6_SIZE,
                 &mpc8_q6_bits[i],  1, 1,
                 &mpc8_q6_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
        quant_vlc[2][i].table = &codes_table[vlc_offsets[8+i]];
        quant_vlc[2][i].table_allocated = vlc_offsets[9+i] - vlc_offsets[8+i];
        init_vlc(&quant_vlc[2][i], MPC8_Q7_BITS, MPC8_Q7_SIZE,
                 &mpc8_q7_bits[i],  1, 1,
                 &mpc8_q7_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
        quant_vlc[3][i].table = &codes_table[vlc_offsets[10+i]];
        quant_vlc[3][i].table_allocated = vlc_offsets[11+i] - vlc_offsets[10+i];
        init_vlc(&quant_vlc[3][i], MPC8_Q8_BITS, MPC8_Q8_SIZE,
                 &mpc8_q8_bits[i],  1, 1,
                 &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
    }
    vlc_initialized = 1;

    return 0;
}
Esempio n. 23
0
/**
 * Cook initialization
 *
 * @param avctx     pointer to the AVCodecContext
 */
static av_cold int cook_decode_init(AVCodecContext *avctx)
{
    COOKContext *q = avctx->priv_data;
    const uint8_t *edata_ptr = avctx->extradata;
    const uint8_t *edata_ptr_end = edata_ptr + avctx->extradata_size;
    int extradata_size = avctx->extradata_size;
    int s = 0;
    unsigned int channel_mask = 0;
    int samples_per_frame = 0;
    int ret;
    q->avctx = avctx;

    /* Take care of the codec specific extradata. */
    if (extradata_size <= 0) {
        av_log(avctx, AV_LOG_ERROR, "Necessary extradata missing!\n");
        return AVERROR_INVALIDDATA;
    }
    av_log(avctx, AV_LOG_DEBUG, "codecdata_length=%d\n", avctx->extradata_size);

    /* Take data from the AVCodecContext (RM container). */
    if (!avctx->channels) {
        av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
        return AVERROR_INVALIDDATA;
    }

    /* Initialize RNG. */
    av_lfg_init(&q->random_state, 0);

    ff_dsputil_init(&q->dsp, avctx);

    while (edata_ptr < edata_ptr_end) {
        /* 8 for mono, 16 for stereo, ? for multichannel
           Swap to right endianness so we don't need to care later on. */
        if (extradata_size >= 8) {
            q->subpacket[s].cookversion = bytestream_get_be32(&edata_ptr);
            samples_per_frame           = bytestream_get_be16(&edata_ptr);
            q->subpacket[s].subbands = bytestream_get_be16(&edata_ptr);
            extradata_size -= 8;
        }
        if (extradata_size >= 8) {
            bytestream_get_be32(&edata_ptr);    // Unknown unused
            q->subpacket[s].js_subband_start = bytestream_get_be16(&edata_ptr);
            q->subpacket[s].js_vlc_bits = bytestream_get_be16(&edata_ptr);
            extradata_size -= 8;
        }

        /* Initialize extradata related variables. */
        q->subpacket[s].samples_per_channel = samples_per_frame / avctx->channels;
        q->subpacket[s].bits_per_subpacket = avctx->block_align * 8;

        /* Initialize default data states. */
        q->subpacket[s].log2_numvector_size = 5;
        q->subpacket[s].total_subbands = q->subpacket[s].subbands;
        q->subpacket[s].num_channels = 1;

        /* Initialize version-dependent variables */

        av_log(avctx, AV_LOG_DEBUG, "subpacket[%i].cookversion=%x\n", s,
               q->subpacket[s].cookversion);
        q->subpacket[s].joint_stereo = 0;
        switch (q->subpacket[s].cookversion) {
        case MONO:
            if (avctx->channels != 1) {
                av_log_ask_for_sample(avctx, "Container channels != 1.\n");
                return AVERROR_PATCHWELCOME;
            }
            av_log(avctx, AV_LOG_DEBUG, "MONO\n");
            break;
        case STEREO:
            if (avctx->channels != 1) {
                q->subpacket[s].bits_per_subpdiv = 1;
                q->subpacket[s].num_channels = 2;
            }
            av_log(avctx, AV_LOG_DEBUG, "STEREO\n");
            break;
        case JOINT_STEREO:
            if (avctx->channels != 2) {
                av_log_ask_for_sample(avctx, "Container channels != 2.\n");
                return AVERROR_PATCHWELCOME;
            }
            av_log(avctx, AV_LOG_DEBUG, "JOINT_STEREO\n");
            if (avctx->extradata_size >= 16) {
                q->subpacket[s].total_subbands = q->subpacket[s].subbands +
                                                 q->subpacket[s].js_subband_start;
                q->subpacket[s].joint_stereo = 1;
                q->subpacket[s].num_channels = 2;
            }
            if (q->subpacket[s].samples_per_channel > 256) {
                q->subpacket[s].log2_numvector_size = 6;
            }
            if (q->subpacket[s].samples_per_channel > 512) {
                q->subpacket[s].log2_numvector_size = 7;
            }
            break;
        case MC_COOK:
            av_log(avctx, AV_LOG_DEBUG, "MULTI_CHANNEL\n");
            if (extradata_size >= 4)
                channel_mask |= q->subpacket[s].channel_mask = bytestream_get_be32(&edata_ptr);

            if (av_get_channel_layout_nb_channels(q->subpacket[s].channel_mask) > 1) {
                q->subpacket[s].total_subbands = q->subpacket[s].subbands +
                                                 q->subpacket[s].js_subband_start;
                q->subpacket[s].joint_stereo = 1;
                q->subpacket[s].num_channels = 2;
                q->subpacket[s].samples_per_channel = samples_per_frame >> 1;

                if (q->subpacket[s].samples_per_channel > 256) {
                    q->subpacket[s].log2_numvector_size = 6;
                }
                if (q->subpacket[s].samples_per_channel > 512) {
                    q->subpacket[s].log2_numvector_size = 7;
                }
            } else
                q->subpacket[s].samples_per_channel = samples_per_frame;

            break;
        default:
            av_log_ask_for_sample(avctx, "Unknown Cook version.\n");
            return AVERROR_PATCHWELCOME;
        }