static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c) { int ret; ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance, avpriv_mjpeg_val_dc, 12, 0); if (ret) return ret; ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance, avpriv_mjpeg_val_dc, 12, 0); if (ret) return ret; ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance, avpriv_mjpeg_val_ac_luminance, 251, 1); if (ret) return ret; ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance, avpriv_mjpeg_val_ac_chrominance, 251, 1); if (ret) return ret; ff_blockdsp_init(&c->bdsp, avctx); ff_idctdsp_init(&c->idsp, avctx); ff_init_scantable(c->idsp.idct_permutation, &c->scantable, ff_zigzag_direct); return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { ASV1Context *const a = avctx->priv_data; const int scale = avctx->codec_id == AV_CODEC_ID_ASV1 ? 1 : 2; int i; if (avctx->extradata_size < 1) { av_log(avctx, AV_LOG_WARNING, "No extradata provided\n"); } ff_asv_common_init(avctx); ff_blockdsp_init(&a->bdsp, avctx); ff_idctdsp_init(&a->idsp, avctx); init_vlcs(a); ff_init_scantable(a->idsp.idct_permutation, &a->scantable, ff_asv_scantab); avctx->pix_fmt = AV_PIX_FMT_YUV420P; if (avctx->extradata_size < 1 || (a->inv_qscale = avctx->extradata[0]) == 0) { av_log(avctx, AV_LOG_ERROR, "illegal qscale 0\n"); if (avctx->codec_id == AV_CODEC_ID_ASV1) a->inv_qscale = 6; else a->inv_qscale = 10; } for (i = 0; i < 64; i++) { int index = ff_asv_scantab[i]; a->intra_matrix[i] = 64 * scale * ff_mpeg1_default_intra_matrix[index] / a->inv_qscale; } return 0; }
static av_cold int tqi_decode_init(AVCodecContext *avctx) { TqiContext *t = avctx->priv_data; ff_blockdsp_init(&t->bdsp, avctx); ff_bswapdsp_init(&t->bsdsp); ff_idctdsp_init(&t->idsp, avctx); ff_init_scantable_permutation(t->idsp.idct_permutation, FF_IDCT_PERM_NONE); ff_init_scantable(t->idsp.idct_permutation, &t->intra_scantable, ff_zigzag_direct); avctx->framerate = (AVRational){ 15, 1 }; avctx->pix_fmt = AV_PIX_FMT_YUV420P; ff_mpeg12_init_vlcs(); return 0; }
static av_cold int tqi_decode_init(AVCodecContext *avctx) { TqiContext *t = avctx->priv_data; MpegEncContext *s = &t->s; s->avctx = avctx; ff_blockdsp_init(&s->bdsp, avctx); ff_bswapdsp_init(&t->bsdsp); ff_idctdsp_init(&s->idsp, avctx); ff_init_scantable_permutation(s->idsp.idct_permutation, FF_IDCT_PERM_NONE); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); s->qscale = 1; avctx->time_base = (AVRational){1, 15}; avctx->pix_fmt = AV_PIX_FMT_YUV420P; ff_mpeg12_init_vlcs(); return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { MadContext *s = avctx->priv_data; s->avctx = avctx; avctx->pix_fmt = AV_PIX_FMT_YUV420P; ff_blockdsp_init(&s->bdsp, avctx); ff_bswapdsp_init(&s->bbdsp); ff_idctdsp_init(&s->idsp, avctx); ff_init_scantable_permutation(s->idsp.idct_permutation, FF_IDCT_PERM_NONE); ff_init_scantable(s->idsp.idct_permutation, &s->scantable, ff_zigzag_direct); ff_mpeg12_init_vlcs(); s->last_frame = av_frame_alloc(); if (!s->last_frame) return AVERROR(ENOMEM); return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { ProresContext *ctx = avctx->priv_data; uint8_t idct_permutation[64]; avctx->bits_per_raw_sample = 10; ff_blockdsp_init(&ctx->bdsp, avctx); ff_proresdsp_init(&ctx->prodsp, avctx); ff_init_scantable_permutation(idct_permutation, ctx->prodsp.idct_permutation_type); permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation); permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation); return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { JvContext *s = avctx->priv_data; if (!avctx->width || !avctx->height || (avctx->width & 7) || (avctx->height & 7)) { av_log(avctx, AV_LOG_ERROR, "Invalid video dimensions: %dx%d\n", avctx->width, avctx->height); return AVERROR(EINVAL); } s->frame = av_frame_alloc(); if (!s->frame) return AVERROR(ENOMEM); avctx->pix_fmt = AV_PIX_FMT_PAL8; ff_blockdsp_init(&s->bdsp, avctx); return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { ProresContext *ctx = avctx->priv_data; uint8_t idct_permutation[64]; avctx->bits_per_raw_sample = 10; ff_blockdsp_init(&ctx->bdsp, avctx); ff_proresdsp_init(&ctx->prodsp, avctx); ff_init_scantable_permutation(idct_permutation, ctx->prodsp.idct_permutation_type); permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation); permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation); switch (avctx->codec_tag) { case MKTAG('a','p','c','o'): avctx->profile = FF_PROFILE_PRORES_PROXY; break; case MKTAG('a','p','c','s'): avctx->profile = FF_PROFILE_PRORES_LT; break; case MKTAG('a','p','c','n'): avctx->profile = FF_PROFILE_PRORES_STANDARD; break; case MKTAG('a','p','c','h'): avctx->profile = FF_PROFILE_PRORES_HQ; break; case MKTAG('a','p','4','h'): avctx->profile = FF_PROFILE_PRORES_4444; break; case MKTAG('a','p','4','x'): avctx->profile = FF_PROFILE_PRORES_XQ; break; default: avctx->profile = FF_PROFILE_UNKNOWN; av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag); } return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { ProresContext *ctx = avctx->priv_data; uint8_t idct_permutation[64]; avctx->bits_per_raw_sample = 10; ff_blockdsp_init(&ctx->bdsp, avctx); ff_proresdsp_init(&ctx->prodsp, avctx); switch (avctx->codec_tag) { case MKTAG('a', 'p', 'c', 'h'): avctx->pix_fmt = PIX_FMT_YUV422P10; break; case MKTAG('a', 'p', 'c', 'n'): avctx->pix_fmt = PIX_FMT_YUV422P10; break; case MKTAG('a', 'p', 'c', 's'): avctx->pix_fmt = PIX_FMT_YUV422P10; break; case MKTAG('a', 'p', 'c', 'o'): avctx->pix_fmt = PIX_FMT_YUV422P10; break; case MKTAG('a', 'p', '4', 'h'): avctx->pix_fmt = PIX_FMT_YUV444P10; break; default: av_log(avctx, AV_LOG_WARNING, "Unknown ProRes FOURCC provided (%08X)\n", avctx->codec_tag); } ff_init_scantable_permutation(idct_permutation, ctx->prodsp.idct_permutation_type); permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation); permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation); return 0; }
static av_cold int speedhq_decode_init(AVCodecContext *avctx) { int ret; static AVOnce init_once = AV_ONCE_INIT; SHQContext * const s = avctx->priv_data; s->avctx = avctx; ret = ff_thread_once(&init_once, speedhq_static_init); if (ret) return AVERROR_UNKNOWN; ff_blockdsp_init(&s->bdsp, avctx); ff_idctdsp_init(&s->idsp, avctx); ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct); switch (avctx->codec_tag) { case MKTAG('S', 'H', 'Q', '0'): s->subsampling = SHQ_SUBSAMPLING_420; s->alpha_type = SHQ_NO_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUV420P; break; case MKTAG('S', 'H', 'Q', '1'): s->subsampling = SHQ_SUBSAMPLING_420; s->alpha_type = SHQ_RLE_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break; case MKTAG('S', 'H', 'Q', '2'): s->subsampling = SHQ_SUBSAMPLING_422; s->alpha_type = SHQ_NO_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUV422P; break; case MKTAG('S', 'H', 'Q', '3'): s->subsampling = SHQ_SUBSAMPLING_422; s->alpha_type = SHQ_RLE_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break; case MKTAG('S', 'H', 'Q', '4'): s->subsampling = SHQ_SUBSAMPLING_444; s->alpha_type = SHQ_NO_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUV444P; break; case MKTAG('S', 'H', 'Q', '5'): s->subsampling = SHQ_SUBSAMPLING_444; s->alpha_type = SHQ_RLE_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break; case MKTAG('S', 'H', 'Q', '7'): s->subsampling = SHQ_SUBSAMPLING_422; s->alpha_type = SHQ_DCT_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break; case MKTAG('S', 'H', 'Q', '9'): s->subsampling = SHQ_SUBSAMPLING_444; s->alpha_type = SHQ_DCT_ALPHA; avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break; default: av_log(avctx, AV_LOG_ERROR, "Unknown NewTek SpeedHQ FOURCC provided (%08X)\n", avctx->codec_tag); return AVERROR_INVALIDDATA; } /* This matches what NDI's RGB -> Y'CbCr 4:2:2 converter uses. */ avctx->colorspace = AVCOL_SPC_BT470BG; avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; return 0; }