static av_cold int decode_init(AVCodecContext *avctx) { MadContext *s = avctx->priv_data; s->avctx = avctx; avctx->pix_fmt = AV_PIX_FMT_YUV420P; ff_blockdsp_init(&s->bdsp, avctx); ff_bswapdsp_init(&s->bbdsp); ff_idctdsp_init(&s->idsp, avctx); ff_init_scantable_permutation(s->idsp.idct_permutation, FF_IDCT_PERM_NONE); ff_init_scantable(s->idsp.idct_permutation, &s->scantable, ff_zigzag_direct); ff_mpeg12_init_vlcs(); s->last_frame = av_frame_alloc(); if (!s->last_frame) return AVERROR(ENOMEM); return 0; }
static av_cold int tqi_decode_init(AVCodecContext *avctx) { TqiContext *t = avctx->priv_data; MpegEncContext *s = &t->s; s->avctx = avctx; if(avctx->idct_algo == FF_IDCT_AUTO) avctx->idct_algo = FF_IDCT_EA; dsputil_init(&s->dsp, avctx); ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); s->qscale = 1; avctx->time_base = (AVRational) { 1, 15 }; avctx->pix_fmt = PIX_FMT_YUV420P; ff_mpeg12_init_vlcs(); return 0; }
av_cold void ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s, int flip, int has_alpha) { int i; s->avctx = avctx; avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P; ff_dsputil_init(&s->dsp, avctx); ff_vp3dsp_init(&s->vp3dsp, avctx->flags); ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id); ff_init_scantable_permutation(s->dsp.idct_permutation, s->vp3dsp.idct_perm); ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct); for (i=0; i<4; i++) { s->framep[i] = &s->frames[i]; avcodec_get_frame_defaults(&s->frames[i]); } s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN]; s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2]; s->edge_emu_buffer_alloc = NULL; s->above_blocks = NULL; s->macroblocks = NULL; s->quantizer = -1; s->deblock_filtering = 1; s->golden_frame = 0; s->filter = NULL; s->has_alpha = has_alpha; s->modelp = &s->model; if (flip) { s->flip = -1; s->frbi = 2; s->srbi = 0; } else { s->flip = 1; s->frbi = 0; s->srbi = 2; } }
static av_cold int tqi_decode_init(AVCodecContext *avctx) { TqiContext *t = avctx->priv_data; MpegEncContext *s = &t->s; s->avctx = avctx; ff_blockdsp_init(&s->bdsp, avctx); ff_bswapdsp_init(&t->bsdsp); ff_idctdsp_init(&s->idsp, avctx); ff_init_scantable_permutation(s->idsp.idct_permutation, FF_IDCT_PERM_NONE); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); s->qscale = 1; #ifdef IDE_COMPILE avctx->time_base.num = 1; avctx->time_base.den = 15; #else avctx->time_base = (AVRational){1, 15}; #endif avctx->pix_fmt = AV_PIX_FMT_YUV420P; ff_mpeg12_init_vlcs(); return 0; }
static int decode_init(AVCodecContext *avctx) { MscDecoderContext * mscDecoderContext; MscCodecContext * mscContext; AVFrame * p; const int scale = 1; mscDecoderContext = avctx->priv_data; mscContext = &mscDecoderContext->mscContext; init_common(avctx, &mscDecoderContext->mscContext); ff_init_scantable(mscContext->dsp.idct_permutation, &mscContext->scantable, scantab); avctx->pix_fmt = PIX_FMT_YUV420P; if(avctx->extradata_size < 1 || (mscContext->inv_qscale= avctx->extradata[0]) == 0) { av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n"); mscContext->inv_qscale = 8; } for(int i = 0; i < 64; i++){ int index= scantab[i]; mscContext->intra_matrix[i]= 64 * scale * quant_intra_matrix[index] / mscContext->inv_qscale; mscContext->non_intra_matrix[i]= 64 * scale * ff_mpeg1_default_non_intra_matrix[index] / mscContext->inv_qscale; } // allocate frame p = avctx->coded_frame = avcodec_alloc_frame(); if (!avctx->coded_frame) { av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n"); return AVERROR(ENOMEM); } p->qstride = mscContext->mb_width; p->qscale_table = av_malloc( p->qstride * mscContext->mb_height); p->quality = (32 * scale + mscContext->inv_qscale / 2) / mscContext->inv_qscale; memset(p->qscale_table, p->quality, p->qstride * mscContext->mb_height); return 0; }
int BIKPlayer::video_init(int w, int h) { int bw, bh, blocks; int i; if (!bink_trees[15].table) { for (i = 0; i < 16; i++) { const int maxbits = bink_tree_lens[i][15]; bink_trees[i].table = table + i*128; bink_trees[i].table_allocated = 1 << maxbits; bink_trees[i].init_vlc(maxbits, 16, bink_tree_lens[i], 1, 1, bink_tree_bits[i], 1, 1, INIT_VLC_LE); } } memset(&c_pic,0, sizeof(AVFrame)); memset(&c_last,0, sizeof(AVFrame)); if (w<(signed) header.width || h<(signed) header.height) { //movie dimensions are higher than available screen return 1; } ff_init_scantable(&c_scantable, bink_scan); bw = (header.width + 7) >> 3; bh = (header.height + 7) >> 3; blocks = bw * bh; for (i = 0; i < BINK_NB_SRC; i++) { c_bundle[i].data = (uint8_t *) av_malloc(blocks * 64); //not enough memory if(!c_bundle[i].data) { return 2; } c_bundle[i].data_end = (uint8_t *) c_bundle[i].data + blocks * 64; } return 0; }
static int dnxhd_init_vlc(DNXHDContext *ctx, int cid) { if (!ctx->cid_table) { int index; if ((index = ff_dnxhd_get_cid_table(cid)) < 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unsupported cid %d\n", cid); return -1; } ctx->cid_table = &ff_dnxhd_cid_table[index]; init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257, ctx->cid_table->ac_bits, 1, 1, ctx->cid_table->ac_codes, 2, 2, 0); init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, ctx->cid_table->bit_depth+4, ctx->cid_table->dc_bits, 1, 1, ctx->cid_table->dc_codes, 1, 1, 0); init_vlc(&ctx->run_vlc, DNXHD_VLC_BITS, 62, ctx->cid_table->run_bits, 1, 1, ctx->cid_table->run_codes, 2, 2, 0); ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, ff_zigzag_direct); } return 0; }
static av_cold int speedhq_decode_init(AVCodecContext *avctx) { int ret; static AVOnce init_once = AV_ONCE_INIT; SHQContext * const s = avctx->priv_data; s->avctx = avctx; ret = ff_thread_once(&init_once, speedhq_static_init); if (ret) return AVERROR_UNKNOWN; ff_blockdsp_init(&s->bdsp, avctx); ff_idctdsp_init(&s->idsp, avctx); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); switch (avctx->codec_tag) { case MKTAG('S', 'H', 'Q', '0'): s->subsampling = SHQ_SUBSAMPLING_420; s->alpha_type = SHQ_NO_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUV420P; break; case MKTAG('S', 'H', 'Q', '1'): s->subsampling = SHQ_SUBSAMPLING_420; s->alpha_type = SHQ_RLE_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break; case MKTAG('S', 'H', 'Q', '2'): s->subsampling = SHQ_SUBSAMPLING_422; s->alpha_type = SHQ_NO_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUV422P; break; case MKTAG('S', 'H', 'Q', '3'): s->subsampling = SHQ_SUBSAMPLING_422; s->alpha_type = SHQ_RLE_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break; case MKTAG('S', 'H', 'Q', '4'): s->subsampling = SHQ_SUBSAMPLING_444; s->alpha_type = SHQ_NO_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUV444P; break; case MKTAG('S', 'H', 'Q', '5'): s->subsampling = SHQ_SUBSAMPLING_444; s->alpha_type = SHQ_RLE_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break; case MKTAG('S', 'H', 'Q', '7'): s->subsampling = SHQ_SUBSAMPLING_422; s->alpha_type = SHQ_DCT_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break; case MKTAG('S', 'H', 'Q', '9'): s->subsampling = SHQ_SUBSAMPLING_444; s->alpha_type = SHQ_DCT_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break; default: av_log(avctx, AV_LOG_ERROR, "Unknown NewTek SpeedHQ FOURCC provided (%08X)\n", avctx->codec_tag); return AVERROR_INVALIDDATA; } /* This matches what NDI's RGB -> Y'CbCr 4:2:2 converter uses. */ avctx->colorspace = AVCOL_SPC_BT470BG; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; return 0; }