static av_cold int decode_init(AVCodecContext *avctx) { AnsiContext *s = avctx->priv_data; avctx->pix_fmt = AV_PIX_FMT_PAL8; s->frame = av_frame_alloc(); if (!s->frame) return AVERROR(ENOMEM); /* defaults */ s->font = avpriv_vga16_font; s->font_height = 16; s->fg = DEFAULT_FG_COLOR; s->bg = DEFAULT_BG_COLOR; if (!avctx->width || !avctx->height) { int ret = ff_set_dimensions(avctx, 80 << 3, 25 << 4); if (ret < 0) return ret; } else if (avctx->width % FONT_WIDTH || avctx->height % s->font_height) { av_log(avctx, AV_LOG_ERROR, "Invalid dimensions %d %d\n", avctx->width, avctx->height); return AVERROR(EINVAL); } return 0; }
static int ffmal_update_format(AVCodecContext *avctx) { MMALDecodeContext *ctx = avctx->priv_data; MMAL_STATUS_T status; int ret = 0; MMAL_COMPONENT_T *decoder = ctx->decoder; MMAL_ES_FORMAT_T *format_out = decoder->output[0]->format; ffmmal_poolref_unref(ctx->pool_out); if (!(ctx->pool_out = av_mallocz(sizeof(*ctx->pool_out)))) { ret = AVERROR(ENOMEM); goto fail; } ctx->pool_out->refcount = 1; if (!format_out) goto fail; if ((status = mmal_port_parameter_set_uint32(decoder->output[0], MMAL_PARAMETER_EXTRA_BUFFERS, ctx->extra_buffers))) goto fail; if ((status = mmal_port_parameter_set_boolean(decoder->output[0], MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS, 0))) goto fail; if (avctx->pix_fmt == AV_PIX_FMT_MMAL) { format_out->encoding = MMAL_ENCODING_OPAQUE; } else { format_out->encoding_variant = format_out->encoding = MMAL_ENCODING_I420; } if ((status = mmal_port_format_commit(decoder->output[0]))) goto fail; if ((ret = ff_set_dimensions(avctx, format_out->es->video.crop.x + format_out->es->video.crop.width, format_out->es->video.crop.y + format_out->es->video.crop.height)) < 0) goto fail; if (format_out->es->video.par.num && format_out->es->video.par.den) { avctx->sample_aspect_ratio.num = format_out->es->video.par.num; avctx->sample_aspect_ratio.den = format_out->es->video.par.den; } avctx->colorspace = ffmmal_csp_to_av_csp(format_out->es->video.color_space); decoder->output[0]->buffer_size = FFMAX(decoder->output[0]->buffer_size_min, decoder->output[0]->buffer_size_recommended); decoder->output[0]->buffer_num = FFMAX(decoder->output[0]->buffer_num_min, decoder->output[0]->buffer_num_recommended) + ctx->extra_buffers; ctx->pool_out->pool = mmal_pool_create(decoder->output[0]->buffer_num, decoder->output[0]->buffer_size); if (!ctx->pool_out->pool) { ret = AVERROR(ENOMEM); goto fail; } return 0; fail: return ret < 0 ? ret : AVERROR_UNKNOWN; }
/* XXX: make it use less memory */ static int mpeg4_decode_header(AVCodecParserContext *s1, AVCodecContext *avctx, const uint8_t *buf, int buf_size) { struct Mp4vParseContext *pc = s1->priv_data; Mpeg4DecContext *dec_ctx = &pc->dec_ctx; MpegEncContext *s = &dec_ctx->m; GetBitContext gb1, *gb = &gb1; int ret; s->avctx = avctx; s->current_picture_ptr = &s->current_picture; if (avctx->extradata_size && pc->first_picture) { init_get_bits(gb, avctx->extradata, avctx->extradata_size * 8); ret = ff_mpeg4_decode_picture_header(dec_ctx, gb); } init_get_bits(gb, buf, 8 * buf_size); ret = ff_mpeg4_decode_picture_header(dec_ctx, gb); if (s->width && (!avctx->width || !avctx->height || !avctx->coded_width || !avctx->coded_height)) { ret = ff_set_dimensions(avctx, s->width, s->height); if (ret < 0) return ret; } s1->pict_type = s->pict_type; pc->first_picture = 0; return ret; }
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; GifState *s = avctx->priv_data; AVFrame *picture = data; int ret; bytestream2_init(&s->gb, buf, buf_size); if ((ret = gif_read_header1(s)) < 0) return ret; avctx->pix_fmt = AV_PIX_FMT_PAL8; if ((ret = ff_set_dimensions(avctx, s->screen_width, s->screen_height)) < 0) return ret; if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } s->image_palette = (uint32_t *)picture->data[1]; ret = gif_parse_next_image(s, picture); if (ret < 0) return ret; *got_frame = 1; return bytestream2_tell(&s->gb); }
static int smvjpeg_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const AVPixFmtDescriptor *desc; SMVJpegDecodeContext *s = avctx->priv_data; AVFrame* mjpeg_data = s->picture[0]; int i, cur_frame = 0, ret = 0; cur_frame = avpkt->pts % s->frames_per_jpeg; /* Are we at the start of a block? */ if (!cur_frame) { av_frame_unref(mjpeg_data); ret = avcodec_decode_video2(s->avctx, mjpeg_data, &s->mjpeg_data_size, avpkt); if (ret < 0) { s->mjpeg_data_size = 0; return ret; } } else if (!s->mjpeg_data_size) return AVERROR(EINVAL); desc = av_pix_fmt_desc_get(s->avctx->pix_fmt); av_assert0(desc); if (mjpeg_data->height % (s->frames_per_jpeg << desc->log2_chroma_h)) { av_log(avctx, AV_LOG_ERROR, "Invalid height\n"); return AVERROR_INVALIDDATA; } /*use the last lot... */ *data_size = s->mjpeg_data_size; avctx->pix_fmt = s->avctx->pix_fmt; /* We shouldn't get here if frames_per_jpeg <= 0 because this was rejected in init */ ret = ff_set_dimensions(avctx, mjpeg_data->width, mjpeg_data->height / s->frames_per_jpeg); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Failed to set dimensions\n"); return ret; } if (*data_size) { s->picture[1]->extended_data = NULL; s->picture[1]->width = avctx->width; s->picture[1]->height = avctx->height; s->picture[1]->format = avctx->pix_fmt; /* ff_init_buffer_info(avctx, &s->picture[1]); */ smv_img_pnt(s->picture[1]->data, mjpeg_data->data, mjpeg_data->linesize, avctx->pix_fmt, avctx->width, avctx->height, cur_frame); for (i = 0; i < AV_NUM_DATA_POINTERS; i++) s->picture[1]->linesize[i] = mjpeg_data->linesize[i]; ret = av_frame_ref(data, s->picture[1]); } return ret; }
/* [DIRAC_STD] 10. Sequence Header. sequence_header() */ int avpriv_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb, dirac_source_params *source) { unsigned version_major; unsigned video_format, picture_coding_mode; int ret; /* [DIRAC_SPEC] 10.1 Parse Parameters. parse_parameters() */ version_major = svq3_get_ue_golomb(gb); svq3_get_ue_golomb(gb); /* version_minor */ avctx->profile = svq3_get_ue_golomb(gb); avctx->level = svq3_get_ue_golomb(gb); /* [DIRAC_SPEC] sequence_header() -> base_video_format as defined in * 10.2 Base Video Format, table 10.1 Dirac predefined video formats */ video_format = svq3_get_ue_golomb(gb); if (version_major < 2) av_log(avctx, AV_LOG_WARNING, "Stream is old and may not work\n"); else if (version_major > 2) av_log(avctx, AV_LOG_WARNING, "Stream may have unhandled features\n"); if (video_format > 20U) return AVERROR_INVALIDDATA; /* Fill in defaults for the source parameters. */ *source = dirac_source_parameters_defaults[video_format]; /* [DIRAC_STD] 10.3 Source Parameters * Override the defaults. */ if (ret = parse_source_parameters(avctx, gb, source)) return ret; ret = ff_set_dimensions(avctx, source->width, source->height); if (ret < 0) return ret; ff_set_sar(avctx, avctx->sample_aspect_ratio); /* [DIRAC_STD] picture_coding_mode shall be 0 for fields and 1 for frames * currently only used to signal field coding */ picture_coding_mode = svq3_get_ue_golomb(gb); if (picture_coding_mode != 0) { av_log(avctx, AV_LOG_ERROR, "Unsupported picture coding mode %d\n", picture_coding_mode); return AVERROR_INVALIDDATA; } return 0; }
static int tqi_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf+buf_size; TqiContext *t = avctx->priv_data; AVFrame *frame = data; int ret, w, h; w = AV_RL16(&buf[0]); h = AV_RL16(&buf[2]); tqi_calculate_qtable(t, buf[4]); buf += 8; ret = ff_set_dimensions(avctx, w, h); if (ret < 0) return ret; if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } av_fast_padded_malloc(&t->bitstream_buf, &t->bitstream_buf_size, buf_end - buf); if (!t->bitstream_buf) return AVERROR(ENOMEM); t->bsdsp.bswap_buf(t->bitstream_buf, (const uint32_t *) buf, (buf_end - buf) / 4); init_get_bits(&t->gb, t->bitstream_buf, 8 * (buf_end - buf)); t->last_dc[0] = t->last_dc[1] = t->last_dc[2] = 0; for (t->mb_y = 0; t->mb_y < (h + 15) / 16; t->mb_y++) { for (t->mb_x = 0; t->mb_x < (w + 15) / 16; t->mb_x++) { if (tqi_decode_mb(t, t->block) < 0) break; tqi_idct_put(avctx, frame, t->block); } } *got_frame = 1; return buf_size; }
static int tgq_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; TgqContext *s = avctx->priv_data; AVFrame *frame = data; int x, y, ret; int big_endian; if (buf_size < 16) { av_log(avctx, AV_LOG_WARNING, "truncated header\n"); return AVERROR_INVALIDDATA; } big_endian = AV_RL32(&buf[4]) > 0x000FFFFF; bytestream2_init(&s->gb, buf + 8, buf_size - 8); if (big_endian) { s->width = bytestream2_get_be16u(&s->gb); s->height = bytestream2_get_be16u(&s->gb); } else { s->width = bytestream2_get_le16u(&s->gb); s->height = bytestream2_get_le16u(&s->gb); } ret = ff_set_dimensions(s->avctx, s->width, s->height); if (ret < 0) return ret; tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb)); bytestream2_skip(&s->gb, 3); if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) return ret; frame->key_frame = 1; frame->pict_type = AV_PICTURE_TYPE_I; for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++) for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++) if (tgq_decode_mb(s, frame, y, x) < 0) return AVERROR_INVALIDDATA; *got_frame = 1; return avpkt->size; }
static int tqi_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf+buf_size; TqiContext *t = avctx->priv_data; MpegEncContext *s = &t->s; AVFrame *frame = data; int ret; s->width = AV_RL16(&buf[0]); s->height = AV_RL16(&buf[2]); tqi_calculate_qtable(s, buf[4]); buf += 8; ret = ff_set_dimensions(s->avctx, s->width, s->height); if (ret < 0) return ret; if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) return ret; av_fast_padded_malloc(&t->bitstream_buf, &t->bitstream_buf_size, buf_end - buf); if (!t->bitstream_buf) return AVERROR(ENOMEM); t->bsdsp.bswap_buf(t->bitstream_buf, (const uint32_t *) buf, (buf_end - buf) / 4); init_get_bits(&s->gb, t->bitstream_buf, 8*(buf_end-buf)); s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 0; for (s->mb_y=0; s->mb_y<(avctx->height+15)/16; s->mb_y++) for (s->mb_x=0; s->mb_x<(avctx->width+15)/16; s->mb_x++) { if (tqi_decode_mb(s, t->block) < 0) goto end; tqi_idct_put(t, frame, t->block); } end: *got_frame = 1; return buf_size; }
static int svc_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { SVCContext *s = avctx->priv_data; SBufferInfo info = { 0 }; uint8_t* ptrs[3]; int ret, linesize[3]; AVFrame *avframe = data; DECODING_STATE state; state = (*s->decoder)->DecodeFrame2(s->decoder, avpkt->data, avpkt->size, ptrs, &info); if (state != dsErrorFree) { av_log(avctx, AV_LOG_ERROR, "DecodeFrame2 failed\n"); return AVERROR_UNKNOWN; } if (info.iBufferStatus != 1) { av_log(avctx, AV_LOG_DEBUG, "No frame produced\n"); return avpkt->size; } ret = ff_set_dimensions(avctx, info.UsrData.sSystemBuffer.iWidth, info.UsrData.sSystemBuffer.iHeight); if (ret < 0) return ret; // The decoder doesn't (currently) support decoding into a user // provided buffer, so do a copy instead. if (ff_get_buffer(avctx, avframe, 0) < 0) { av_log(avctx, AV_LOG_ERROR, "Unable to allocate buffer\n"); return AVERROR(ENOMEM); } linesize[0] = info.UsrData.sSystemBuffer.iStride[0]; linesize[1] = linesize[2] = info.UsrData.sSystemBuffer.iStride[1]; av_image_copy(avframe->data, avframe->linesize, (const uint8_t **) ptrs, linesize, avctx->pix_fmt, avctx->width, avctx->height); avframe->pts = avpkt->pts; avframe->pkt_dts = avpkt->dts; #if FF_API_PKT_PTS FF_DISABLE_DEPRECATION_WARNINGS avframe->pkt_pts = avpkt->pts; FF_ENABLE_DEPRECATION_WARNINGS #endif *got_frame = 1; return avpkt->size; }
static int vp5_parse_header(VP56Context *s, const uint8_t *buf, int buf_size) { VP56RangeCoder *c = &s->c; int rows, cols; ff_vp56_init_range_decoder(&s->c, buf, buf_size); s->frames[VP56_FRAME_CURRENT]->key_frame = !vp56_rac_get(c); vp56_rac_get(c); ff_vp56_init_dequant(s, vp56_rac_gets(c, 6)); if (s->frames[VP56_FRAME_CURRENT]->key_frame) { vp56_rac_gets(c, 8); if(vp56_rac_gets(c, 5) > 5) return AVERROR_INVALIDDATA; vp56_rac_gets(c, 2); if (vp56_rac_get(c)) { av_log(s->avctx, AV_LOG_ERROR, "interlacing not supported\n"); return AVERROR_PATCHWELCOME; } rows = vp56_rac_gets(c, 8); /* number of stored macroblock rows */ cols = vp56_rac_gets(c, 8); /* number of stored macroblock cols */ if (!rows || !cols) { av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n", cols << 4, rows << 4); return AVERROR_INVALIDDATA; } vp56_rac_gets(c, 8); /* number of displayed macroblock rows */ vp56_rac_gets(c, 8); /* number of displayed macroblock cols */ vp56_rac_gets(c, 2); if (!s->macroblocks || /* first frame */ 16*cols != s->avctx->coded_width || 16*rows != s->avctx->coded_height) { int ret = ff_set_dimensions(s->avctx, 16 * cols, 16 * rows); if (ret < 0) return ret; return VP56_SIZE_CHANGE; } } else if (!s->macroblocks) return AVERROR_INVALIDDATA; return 0; }
static int vp56_size_changed(VP56Context *s) { AVCodecContext *avctx = s->avctx; int stride = s->frames[VP56_FRAME_CURRENT]->linesize[0]; int i; s->plane_width[0] = s->plane_width[3] = avctx->coded_width; s->plane_width[1] = s->plane_width[2] = avctx->coded_width/2; s->plane_height[0] = s->plane_height[3] = avctx->coded_height; s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2; for (i=0; i<4; i++) s->stride[i] = s->flip * s->frames[VP56_FRAME_CURRENT]->linesize[i]; s->mb_width = (avctx->coded_width +15) / 16; s->mb_height = (avctx->coded_height+15) / 16; if (s->mb_width > 1000 || s->mb_height > 1000) { ff_set_dimensions(avctx, 0, 0); av_log(avctx, AV_LOG_ERROR, "picture too big\n"); return -1; } av_reallocp_array(&s->above_blocks, 4*s->mb_width+6, sizeof(*s->above_blocks)); av_reallocp_array(&s->macroblocks, s->mb_width*s->mb_height, sizeof(*s->macroblocks)); av_free(s->edge_emu_buffer_alloc); s->edge_emu_buffer_alloc = av_malloc(16*stride); s->edge_emu_buffer = s->edge_emu_buffer_alloc; if (!s->above_blocks || !s->macroblocks || !s->edge_emu_buffer_alloc) return AVERROR(ENOMEM); if (s->flip < 0) s->edge_emu_buffer += 15 * stride; if (s->alpha_context) return vp56_size_changed(s->alpha_context); return 0; }
static av_cold int decode_init(AVCodecContext *avctx) { AnsiContext *s = avctx->priv_data; avctx->pix_fmt = AV_PIX_FMT_PAL8; s->frame = av_frame_alloc(); if (!s->frame) return AVERROR(ENOMEM); /* defaults */ s->font = ff_vga16_font; s->font_height = 16; s->fg = DEFAULT_FG_COLOR; s->bg = DEFAULT_BG_COLOR; if (!avctx->width || !avctx->height) { int ret = ff_set_dimensions(avctx, 80 << 3, 25 << 4); if (ret < 0) return ret; } return 0; }
/* XXX: make it use less memory */ static int mpeg4_decode_header(AVCodecParserContext *s1, AVCodecContext *avctx, const uint8_t *buf, int buf_size) { struct Mp4vParseContext *pc = s1->priv_data; Mpeg4DecContext *dec_ctx = &pc->dec_ctx; MpegEncContext *s = &dec_ctx->m; GetBitContext gb1, *gb = &gb1; int ret; s->avctx = avctx; s->current_picture_ptr = &s->current_picture; if (avctx->extradata_size && pc->first_picture) { init_get_bits(gb, avctx->extradata, avctx->extradata_size * 8); ret = ff_mpeg4_decode_picture_header(dec_ctx, gb); if (ret < -1) av_log(avctx, AV_LOG_WARNING, "Failed to parse extradata\n"); } init_get_bits(gb, buf, 8 * buf_size); ret = ff_mpeg4_decode_picture_header(dec_ctx, gb); if (s->width && (!avctx->width || !avctx->height || !avctx->coded_width || !avctx->coded_height)) { ret = ff_set_dimensions(avctx, s->width, s->height); if (ret < 0) return ret; } if((s1->flags & PARSER_FLAG_USE_CODEC_TS) && s->avctx->time_base.den>0 && ret>=0){ av_assert1(s1->pts == AV_NOPTS_VALUE); av_assert1(s1->dts == AV_NOPTS_VALUE); s1->pts = av_rescale_q(s->time, (AVRational){1, s->avctx->time_base.den}, (AVRational){1, 1200000}); } s1->pict_type = s->pict_type; pc->first_picture = 0; return ret; }
static int xbm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *p = data; int ret, linesize, i, j; int width = 0; int height = 0; const uint8_t *end, *ptr = avpkt->data; const uint8_t *next; uint8_t *dst; avctx->pix_fmt = AV_PIX_FMT_MONOWHITE; end = avpkt->data + avpkt->size; width = parse_str_int(avpkt->data, avpkt->size, "_width"); height = parse_str_int(avpkt->data, avpkt->size, "_height"); if ((ret = ff_set_dimensions(avctx, width, height)) < 0) return ret; if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; // goto start of image data next = memchr(ptr, '{', avpkt->size); if (!next) next = memchr(ptr, '(', avpkt->size); if (!next) return AVERROR_INVALIDDATA; ptr = next + 1; linesize = (avctx->width + 7) / 8; for (i = 0; i < avctx->height; i++) { dst = p->data[0] + i * p->linesize[0]; for (j = 0; j < linesize; j++) { uint8_t val; while (ptr < end && *ptr != 'x' && *ptr != '$') ptr++; ptr ++; if (ptr < end && av_isxdigit(*ptr)) { val = convert(*ptr++); if (av_isxdigit(*ptr)) val = (val << 4) + convert(*ptr++); *dst++ = ff_reverse[val]; if (av_isxdigit(*ptr) && j+1 < linesize) { j++; val = convert(*ptr++); if (av_isxdigit(*ptr)) val = (val << 4) + convert(*ptr++); *dst++ = ff_reverse[val]; } } else { av_log(avctx, AV_LOG_ERROR, "Unexpected data at %.8s.\n", ptr); return AVERROR_INVALIDDATA; } } } p->key_frame = 1; p->pict_type = AV_PICTURE_TYPE_I; *got_frame = 1; return avpkt->size; }
/* Returns the number of bytes consumed from the bytestream. Returns -1 if * there was an error while decoding the header */ static int truemotion1_decode_header(TrueMotion1Context *s) { int i, ret; int width_shift = 0; int new_pix_fmt; struct frame_header header; uint8_t header_buffer[128] = { 0 }; /* logical maximum size of the header */ const uint8_t *sel_vector_table; header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f; if (s->buf[0] < 0x10 || header.header_size >= s->size) { av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]); return AVERROR_INVALIDDATA; } /* unscramble the header bytes with a XOR operation */ for (i = 1; i < header.header_size; i++) header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1]; header.compression = header_buffer[0]; header.deltaset = header_buffer[1]; header.vectable = header_buffer[2]; header.ysize = AV_RL16(&header_buffer[3]); header.xsize = AV_RL16(&header_buffer[5]); header.checksum = AV_RL16(&header_buffer[7]); header.version = header_buffer[9]; header.header_type = header_buffer[10]; header.flags = header_buffer[11]; header.control = header_buffer[12]; /* Version 2 */ if (header.version >= 2) { if (header.header_type > 3) { av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type); return AVERROR_INVALIDDATA; } else if ((header.header_type == 2) || (header.header_type == 3)) { s->flags = header.flags; if (!(s->flags & FLAG_INTERFRAME)) s->flags |= FLAG_KEYFRAME; } else s->flags = FLAG_KEYFRAME; } else /* Version 1 */ s->flags = FLAG_KEYFRAME; if (s->flags & FLAG_SPRITE) { avpriv_request_sample(s->avctx, "Frame with sprite"); /* FIXME header.width, height, xoffset and yoffset aren't initialized */ return AVERROR_PATCHWELCOME; } else { s->w = header.xsize; s->h = header.ysize; if (header.header_type < 2) { if ((s->w < 213) && (s->h >= 176)) { s->flags |= FLAG_INTERPOLATED; avpriv_request_sample(s->avctx, "Interpolated frame"); } } } if (header.compression >= 17) { av_log(s->avctx, AV_LOG_ERROR, "invalid compression type (%d)\n", header.compression); return AVERROR_INVALIDDATA; } if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable)) select_delta_tables(s, header.deltaset); if ((header.compression & 1) && header.header_type) sel_vector_table = pc_tbl2; else { if (header.vectable > 0 && header.vectable < 4) sel_vector_table = tables[header.vectable - 1]; else { av_log(s->avctx, AV_LOG_ERROR, "invalid vector table id (%d)\n", header.vectable); return AVERROR_INVALIDDATA; } } if (compression_types[header.compression].algorithm == ALGO_RGB24H) { new_pix_fmt = AV_PIX_FMT_RGB32; width_shift = 1; } else new_pix_fmt = AV_PIX_FMT_RGB555; // RGB565 is supported as well s->w >>= width_shift; if (s->w != s->avctx->width || s->h != s->avctx->height || new_pix_fmt != s->avctx->pix_fmt) { av_frame_unref(s->frame); s->avctx->sample_aspect_ratio = (AVRational){ 1 << width_shift, 1 }; s->avctx->pix_fmt = new_pix_fmt; if ((ret = ff_set_dimensions(s->avctx, s->w, s->h)) < 0) return ret; av_fast_malloc(&s->vert_pred, &s->vert_pred_size, s->avctx->width * sizeof(unsigned int)); if (!s->vert_pred) return AVERROR(ENOMEM); } /* There is 1 change bit per 4 pixels, so each change byte represents * 32 pixels; divide width by 4 to obtain the number of change bits and * then round up to the nearest byte. */ s->mb_change_bits_row_size = ((s->avctx->width >> (2 - width_shift)) + 7) >> 3; if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable)) { if (compression_types[header.compression].algorithm == ALGO_RGB24H) gen_vector_table24(s, sel_vector_table); else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB555) gen_vector_table15(s, sel_vector_table); else gen_vector_table16(s, sel_vector_table); } /* set up pointers to the other key data chunks */ s->mb_change_bits = s->buf + header.header_size; if (s->flags & FLAG_KEYFRAME) { /* no change bits specified for a keyframe; only index bytes */ s->index_stream = s->mb_change_bits; } else { /* one change bit per 4x4 block */ s->index_stream = s->mb_change_bits + (s->mb_change_bits_row_size * (s->avctx->height >> 2)); } s->index_stream_size = s->size - (s->index_stream - s->buf); s->last_deltaset = header.deltaset; s->last_vectable = header.vectable; s->compression = header.compression; s->block_width = compression_types[header.compression].block_width; s->block_height = compression_types[header.compression].block_height; s->block_type = compression_types[header.compression].block_type; if (s->avctx->debug & FF_DEBUG_PICT_INFO) av_log(s->avctx, AV_LOG_INFO, "tables: %d / %d c:%d %dx%d t:%d %s%s%s%s\n", s->last_deltaset, s->last_vectable, s->compression, s->block_width, s->block_height, s->block_type, s->flags & FLAG_KEYFRAME ? " KEY" : "", s->flags & FLAG_INTERFRAME ? " INTER" : "", s->flags & FLAG_SPRITE ? " SPRITE" : "", s->flags & FLAG_INTERPOLATED ? " INTERPOL" : ""); return header.header_size; }
static int h261_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; H261Context *h = avctx->priv_data; MpegEncContext *s = &h->s; int ret; AVFrame *pict = data; av_dlog(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size); av_dlog(avctx, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]); s->flags = avctx->flags; s->flags2 = avctx->flags2; h->gob_start_code_skipped = 0; retry: init_get_bits(&s->gb, buf, buf_size * 8); if (!s->context_initialized) // we need the IDCT permutaton for reading a custom matrix ff_mpv_idct_init(s); ret = h261_decode_picture_header(h); /* skip if the header was thrashed */ if (ret < 0) { av_log(s->avctx, AV_LOG_ERROR, "header damaged\n"); return -1; } if (s->width != avctx->coded_width || s->height != avctx->coded_height) { ParseContext pc = s->parse_context; // FIXME move this demuxing hack to libavformat s->parse_context.buffer = 0; ff_mpv_common_end(s); s->parse_context = pc; } if (!s->context_initialized) { if ((ret = ff_mpv_common_init(s)) < 0) return ret; ret = ff_set_dimensions(avctx, s->width, s->height); if (ret < 0) return ret; goto retry; } // for skipping the frame s->current_picture.f->pict_type = s->pict_type; s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) || (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) || avctx->skip_frame >= AVDISCARD_ALL) return get_consumed_bytes(s, buf_size); if (ff_mpv_frame_start(s, avctx) < 0) return -1; ff_mpeg_er_frame_start(s); /* decode each macroblock */ s->mb_x = 0; s->mb_y = 0; while (h->gob_number < (s->mb_height == 18 ? 12 : 5)) { if (h261_resync(h) < 0) break; h261_decode_gob(h); } ff_mpv_frame_end(s); av_assert0(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type); av_assert0(s->current_picture.f->pict_type == s->pict_type); if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0) return ret; ff_print_debug_info(s, s->current_picture_ptr, pict); *got_frame = 1; return get_consumed_bytes(s, buf_size); }
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { GifState *s = avctx->priv_data; int ret; bytestream2_init(&s->gb, avpkt->data, avpkt->size); s->frame->pts = avpkt->pts; s->frame->pkt_pts = avpkt->pts; s->frame->pkt_dts = avpkt->dts; av_frame_set_pkt_duration(s->frame, avpkt->duration); if (avpkt->size >= 6) { s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 || memcmp(avpkt->data, gif89a_sig, 6) == 0; } else { s->keyframe = 0; } if (s->keyframe) { s->keyframe_ok = 0; s->gce_prev_disposal = GCE_DISPOSAL_NONE; if ((ret = gif_read_header1(s)) < 0) return ret; if ((ret = ff_set_dimensions(avctx, s->screen_width, s->screen_height)) < 0) return ret; av_frame_unref(s->frame); if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0) return ret; av_fast_malloc(&s->idx_line, &s->idx_line_size, s->screen_width); if (!s->idx_line) return AVERROR(ENOMEM); s->frame->pict_type = AV_PICTURE_TYPE_I; s->frame->key_frame = 1; s->keyframe_ok = 1; } else { if (!s->keyframe_ok) { av_log(avctx, AV_LOG_ERROR, "cannot decode frame without keyframe\n"); return AVERROR_INVALIDDATA; } if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; s->frame->pict_type = AV_PICTURE_TYPE_P; s->frame->key_frame = 0; } ret = gif_parse_next_image(s, s->frame); if (ret < 0) return ret; if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; return bytestream2_tell(&s->gb); }
static int mediacodec_dec_parse_format(AVCodecContext *avctx, MediaCodecDecContext *s) { int width = 0; int height = 0; int32_t value = 0; char *format = NULL; if (!s->format) { av_log(avctx, AV_LOG_ERROR, "Output MediaFormat is not set\n"); return AVERROR(EINVAL); } format = ff_AMediaFormat_toString(s->format); if (!format) { return AVERROR_EXTERNAL; } av_log(avctx, AV_LOG_DEBUG, "Parsing MediaFormat %s\n", format); av_freep(&format); /* Mandatory fields */ if (!ff_AMediaFormat_getInt32(s->format, "width", &value)) { format = ff_AMediaFormat_toString(s->format); av_log(avctx, AV_LOG_ERROR, "Could not get %s from format %s\n", "width", format); av_freep(&format); return AVERROR_EXTERNAL; } s->width = value; if (!ff_AMediaFormat_getInt32(s->format, "height", &value)) { format = ff_AMediaFormat_toString(s->format); av_log(avctx, AV_LOG_ERROR, "Could not get %s from format %s\n", "height", format); av_freep(&format); return AVERROR_EXTERNAL; } s->height = value; if (!ff_AMediaFormat_getInt32(s->format, "stride", &value)) { format = ff_AMediaFormat_toString(s->format); av_log(avctx, AV_LOG_ERROR, "Could not get %s from format %s\n", "stride", format); av_freep(&format); return AVERROR_EXTERNAL; } s->stride = value > 0 ? value : s->width; if (!ff_AMediaFormat_getInt32(s->format, "slice-height", &value)) { format = ff_AMediaFormat_toString(s->format); av_log(avctx, AV_LOG_ERROR, "Could not get %s from format %s\n", "slice-height", format); av_freep(&format); return AVERROR_EXTERNAL; } s->slice_height = value > 0 ? value : s->height; if (strstr(s->codec_name, "OMX.Nvidia.")) { s->slice_height = FFALIGN(s->height, 16); } else if (strstr(s->codec_name, "OMX.SEC.avc.dec")) { s->slice_height = avctx->height; s->stride = avctx->width; } if (!ff_AMediaFormat_getInt32(s->format, "color-format", &value)) { format = ff_AMediaFormat_toString(s->format); av_log(avctx, AV_LOG_ERROR, "Could not get %s from format %s\n", "color-format", format); av_freep(&format); return AVERROR_EXTERNAL; } s->color_format = value; s->pix_fmt = avctx->pix_fmt = mcdec_map_color_format(avctx, s, value); if (avctx->pix_fmt == AV_PIX_FMT_NONE) { av_log(avctx, AV_LOG_ERROR, "Output color format is not supported\n"); return AVERROR(EINVAL); } /* Optional fields */ if (ff_AMediaFormat_getInt32(s->format, "crop-top", &value)) s->crop_top = value; if (ff_AMediaFormat_getInt32(s->format, "crop-bottom", &value)) s->crop_bottom = value; if (ff_AMediaFormat_getInt32(s->format, "crop-left", &value)) s->crop_left = value; if (ff_AMediaFormat_getInt32(s->format, "crop-right", &value)) s->crop_right = value; width = s->crop_right + 1 - s->crop_left; height = s->crop_bottom + 1 - s->crop_top; av_log(avctx, AV_LOG_INFO, "Output crop parameters top=%d bottom=%d left=%d right=%d, " "resulting dimensions width=%d height=%d\n", s->crop_top, s->crop_bottom, s->crop_left, s->crop_right, width, height); return ff_set_dimensions(avctx, width, height); }
static int vp6_parse_header(VP56Context *s, const uint8_t *buf, int buf_size) { VP56RangeCoder *c = &s->c; int parse_filter_info = 0; int coeff_offset = 0; int vrt_shift = 0; int sub_version; int rows, cols; int res = 0; int separated_coeff = buf[0] & 1; s->frames[VP56_FRAME_CURRENT]->key_frame = !(buf[0] & 0x80); ff_vp56_init_dequant(s, (buf[0] >> 1) & 0x3F); if (s->frames[VP56_FRAME_CURRENT]->key_frame) { sub_version = buf[1] >> 3; if (sub_version > 8) return AVERROR_INVALIDDATA; s->filter_header = buf[1] & 0x06; if (buf[1] & 1) { avpriv_report_missing_feature(s->avctx, "Interlacing"); return AVERROR_PATCHWELCOME; } if (separated_coeff || !s->filter_header) { coeff_offset = AV_RB16(buf+2) - 2; buf += 2; buf_size -= 2; } rows = buf[2]; /* number of stored macroblock rows */ cols = buf[3]; /* number of stored macroblock cols */ /* buf[4] is number of displayed macroblock rows */ /* buf[5] is number of displayed macroblock cols */ if (!rows || !cols) { av_log(s->avctx, AV_LOG_ERROR, "Invalid size %dx%d\n", cols << 4, rows << 4); return AVERROR_INVALIDDATA; } if (!s->macroblocks || /* first frame */ 16*cols != s->avctx->coded_width || 16*rows != s->avctx->coded_height) { if (s->avctx->extradata_size == 0 && FFALIGN(s->avctx->width, 16) == 16 * cols && FFALIGN(s->avctx->height, 16) == 16 * rows) { // We assume this is properly signalled container cropping, // in an F4V file. Just set the coded_width/height, don't // touch the cropped ones. s->avctx->coded_width = 16 * cols; s->avctx->coded_height = 16 * rows; } else { int ret = ff_set_dimensions(s->avctx, 16 * cols, 16 * rows); if (ret < 0) return ret; if (s->avctx->extradata_size == 1) { s->avctx->width -= s->avctx->extradata[0] >> 4; s->avctx->height -= s->avctx->extradata[0] & 0x0F; } } res = VP56_SIZE_CHANGE; }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { SgiState *s = avctx->priv_data; AVFrame *p = data; unsigned int dimension, rle; int ret = 0; uint8_t *out_buf, *out_end; bytestream2_init(&s->g, avpkt->data, avpkt->size); if (bytestream2_get_bytes_left(&s->g) < SGI_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "buf_size too small (%d)\n", avpkt->size); return AVERROR_INVALIDDATA; } /* Test for SGI magic. */ if (bytestream2_get_be16(&s->g) != SGI_MAGIC) { av_log(avctx, AV_LOG_ERROR, "bad magic number\n"); return AVERROR_INVALIDDATA; } rle = bytestream2_get_byte(&s->g); s->bytes_per_channel = bytestream2_get_byte(&s->g); dimension = bytestream2_get_be16(&s->g); s->width = bytestream2_get_be16(&s->g); s->height = bytestream2_get_be16(&s->g); s->depth = bytestream2_get_be16(&s->g); if (s->bytes_per_channel != 1 && s->bytes_per_channel != 2) { av_log(avctx, AV_LOG_ERROR, "wrong channel number\n"); return -1; } /* Check for supported image dimensions. */ if (dimension != 2 && dimension != 3) { av_log(avctx, AV_LOG_ERROR, "wrong dimension number\n"); return -1; } if (s->depth == SGI_GRAYSCALE) { avctx->pix_fmt = s->bytes_per_channel == 2 ? AV_PIX_FMT_GRAY16BE : AV_PIX_FMT_GRAY8; } else if (s->depth == SGI_RGB) { avctx->pix_fmt = s->bytes_per_channel == 2 ? AV_PIX_FMT_RGB48BE : AV_PIX_FMT_RGB24; } else if (s->depth == SGI_RGBA) { avctx->pix_fmt = s->bytes_per_channel == 2 ? AV_PIX_FMT_RGBA64BE : AV_PIX_FMT_RGBA; } else { av_log(avctx, AV_LOG_ERROR, "wrong picture format\n"); return -1; } ret = ff_set_dimensions(avctx, s->width, s->height); if (ret < 0) return ret; if (ff_get_buffer(avctx, p, 0) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n"); return -1; } p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; out_buf = p->data[0]; out_end = out_buf + p->linesize[0] * s->height; s->linesize = p->linesize[0]; /* Skip header. */ bytestream2_seek(&s->g, SGI_HEADER_SIZE, SEEK_SET); if (rle) { ret = read_rle_sgi(out_end, s); } else { ret = read_uncompressed_sgi(out_buf, s); } if (ret == 0) { *got_frame = 1; return avpkt->size; } else { return ret; } }
static int pix_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AVFrame *frame = data; int ret, i; GetByteContext gb; unsigned int bytes_pp; unsigned int magic[4]; unsigned int chunk_type; unsigned int data_len; unsigned int bytes_per_scanline; unsigned int bytes_left; PixHeader hdr; bytestream2_init(&gb, avpkt->data, avpkt->size); magic[0] = bytestream2_get_be32(&gb); magic[1] = bytestream2_get_be32(&gb); magic[2] = bytestream2_get_be32(&gb); magic[3] = bytestream2_get_be32(&gb); if (magic[0] != 0x12 || magic[1] != 0x08 || magic[2] != 0x02 || magic[3] != 0x02) { av_log(avctx, AV_LOG_ERROR, "Not a BRender PIX file.\n"); return AVERROR_INVALIDDATA; } chunk_type = bytestream2_get_be32(&gb); if (chunk_type != HEADER1_CHUNK && chunk_type != HEADER2_CHUNK) { av_log(avctx, AV_LOG_ERROR, "Invalid chunk type %d.\n", chunk_type); return AVERROR_INVALIDDATA; } ret = pix_decode_header(&hdr, &gb); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid header length.\n"); return ret; } switch (hdr.format) { case 3: avctx->pix_fmt = AV_PIX_FMT_PAL8; bytes_pp = 1; break; case 4: avctx->pix_fmt = AV_PIX_FMT_RGB555BE; bytes_pp = 2; break; case 5: avctx->pix_fmt = AV_PIX_FMT_RGB565BE; bytes_pp = 2; break; case 6: avctx->pix_fmt = AV_PIX_FMT_RGB24; bytes_pp = 3; break; case 7: avctx->pix_fmt = AV_PIX_FMT_0RGB; bytes_pp = 4; break; case 8: // ARGB avctx->pix_fmt = AV_PIX_FMT_ARGB; bytes_pp = 4; break; case 18: avctx->pix_fmt = AV_PIX_FMT_Y400A; bytes_pp = 2; break; default: avpriv_request_sample(avctx, "Format %d", hdr.format); return AVERROR_PATCHWELCOME; } if ((ret = ff_set_dimensions(avctx, hdr.width, hdr.height)) < 0) return ret; if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) return ret; chunk_type = bytestream2_get_be32(&gb); if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && (chunk_type == HEADER1_CHUNK || chunk_type == HEADER2_CHUNK)) { /* read palette data from data[1] */ PixHeader palhdr; uint32_t *pal_out = (uint32_t *)frame->data[1]; ret = pix_decode_header(&palhdr, &gb); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid palette header length.\n"); return ret; } if (palhdr.format != 7) avpriv_request_sample(avctx, "Palette not in RGB format"); chunk_type = bytestream2_get_be32(&gb); data_len = bytestream2_get_be32(&gb); bytestream2_skip(&gb, 8); if (chunk_type != IMAGE_DATA_CHUNK || data_len != 1032 || bytestream2_get_bytes_left(&gb) < 1032) { av_log(avctx, AV_LOG_ERROR, "Invalid palette data.\n"); return AVERROR_INVALIDDATA; } // palette data is surrounded by 8 null bytes (both top and bottom) // convert 0RGB to machine endian format (ARGB32) for (i = 0; i < 256; ++i) *pal_out++ = (0xFFU << 24) | bytestream2_get_be32u(&gb); bytestream2_skip(&gb, 8); frame->palette_has_changed = 1; chunk_type = bytestream2_get_be32(&gb); } else if (avctx->pix_fmt == AV_PIX_FMT_PAL8) { /* no palette supplied, use the default one */ uint32_t *pal_out = (uint32_t *)frame->data[1]; // TODO: add an AVOption to load custom palette files av_log(avctx, AV_LOG_WARNING, "Using default palette, colors might be off.\n"); memcpy(pal_out, std_pal_table, sizeof(uint32_t) * 256); frame->palette_has_changed = 1; } data_len = bytestream2_get_be32(&gb); bytestream2_skip(&gb, 8); // read the image data to the buffer bytes_per_scanline = bytes_pp * hdr.width; bytes_left = bytestream2_get_bytes_left(&gb); if (chunk_type != IMAGE_DATA_CHUNK || data_len != bytes_left || bytes_left / bytes_per_scanline < hdr.height) { av_log(avctx, AV_LOG_ERROR, "Invalid image data.\n"); return AVERROR_INVALIDDATA; } av_image_copy_plane(frame->data[0], frame->linesize[0], avpkt->data + bytestream2_tell(&gb), bytes_per_scanline, bytes_per_scanline, hdr.height); frame->pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1; *got_frame = 1; return avpkt->size; }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { PicContext *s = avctx->priv_data; AVFrame *frame = data; uint32_t *palette; int bits_per_plane, bpp, etype, esize, npal, pos_after_pal; int i, x, y, plane, tmp, ret; bytestream2_init(&s->g, avpkt->data, avpkt->size); if (bytestream2_get_bytes_left(&s->g) < 11) return AVERROR_INVALIDDATA; if (bytestream2_get_le16u(&s->g) != 0x1234) return AVERROR_INVALIDDATA; s->width = bytestream2_get_le16u(&s->g); s->height = bytestream2_get_le16u(&s->g); bytestream2_skip(&s->g, 4); tmp = bytestream2_get_byteu(&s->g); bits_per_plane = tmp & 0xF; s->nb_planes = (tmp >> 4) + 1; bpp = bits_per_plane * s->nb_planes; if (bits_per_plane > 8 || bpp < 1 || bpp > 32) { avpriv_request_sample(avctx, "Unsupported bit depth"); return AVERROR_PATCHWELCOME; } if (bytestream2_peek_byte(&s->g) == 0xFF) { bytestream2_skip(&s->g, 2); etype = bytestream2_get_le16(&s->g); esize = bytestream2_get_le16(&s->g); if (bytestream2_get_bytes_left(&s->g) < esize) return AVERROR_INVALIDDATA; } else { etype = -1; esize = 0; } avctx->pix_fmt = AV_PIX_FMT_PAL8; if (s->width != avctx->width && s->height != avctx->height) { ret = ff_set_dimensions(avctx, s->width, s->height); if (ret < 0) return ret; } if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } memset(frame->data[0], 0, s->height * frame->linesize[0]); frame->pict_type = AV_PICTURE_TYPE_I; frame->palette_has_changed = 1; pos_after_pal = bytestream2_tell(&s->g) + esize; palette = (uint32_t*)frame->data[1]; if (etype == 1 && esize > 1 && bytestream2_peek_byte(&s->g) < 6) { int idx = bytestream2_get_byte(&s->g); npal = 4; for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ]; } else if (etype == 2) { npal = FFMIN(esize, 16); for (i = 0; i < npal; i++) { int pal_idx = bytestream2_get_byte(&s->g); palette[i] = ff_cga_palette[FFMIN(pal_idx, 16)]; } } else if (etype == 3) { npal = FFMIN(esize, 16); for (i = 0; i < npal; i++) { int pal_idx = bytestream2_get_byte(&s->g); palette[i] = ff_ega_palette[FFMIN(pal_idx, 63)]; } } else if (etype == 4 || etype == 5) { npal = FFMIN(esize / 3, 256); for (i = 0; i < npal; i++) palette[i] = bytestream2_get_be24(&s->g) << 2; } else { if (bpp == 1) { npal = 2; palette[0] = 0x000000; palette[1] = 0xFFFFFF; } else if (bpp == 2) { npal = 4; for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ cga_mode45_index[0][i] ]; } else { npal = 16; memcpy(palette, ff_cga_palette, npal * 4); } } // fill remaining palette entries memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4); // skip remaining palette bytes bytestream2_seek(&s->g, pos_after_pal, SEEK_SET); x = 0; y = s->height - 1; plane = 0; if (bytestream2_get_le16(&s->g)) { while (bytestream2_get_bytes_left(&s->g) >= 6) { int stop_size, marker, t1, t2; t1 = bytestream2_get_bytes_left(&s->g); t2 = bytestream2_get_le16(&s->g); stop_size = t1 - FFMIN(t1, t2); // ignore uncompressed block size bytestream2_skip(&s->g, 2); marker = bytestream2_get_byte(&s->g); while (plane < s->nb_planes && bytestream2_get_bytes_left(&s->g) > stop_size) { int run = 1; int val = bytestream2_get_byte(&s->g); if (val == marker) { run = bytestream2_get_byte(&s->g); if (run == 0) run = bytestream2_get_le16(&s->g); val = bytestream2_get_byte(&s->g); } if (!bytestream2_get_bytes_left(&s->g)) break; if (bits_per_plane == 8) { picmemset_8bpp(s, frame, val, run, &x, &y); if (y < 0) goto finish; } else { picmemset(s, frame, val, run, &x, &y, &plane, bits_per_plane); } } } } else { avpriv_request_sample(avctx, "Uncompressed image"); return avpkt->size; } finish: *got_frame = 1; return avpkt->size; }
static int init_image(TiffContext *s, ThreadFrame *frame) { int i, ret; uint32_t *pal; switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) { case 11: if (!s->palette_is_set) { s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK; break; } case 21: case 41: case 81: s->avctx->pix_fmt = AV_PIX_FMT_PAL8; break; case 243: if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) { if (s->subsampling[0] == 1 && s->subsampling[1] == 1) { s->avctx->pix_fmt = AV_PIX_FMT_YUV444P; } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) { s->avctx->pix_fmt = AV_PIX_FMT_YUV422P; } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) { s->avctx->pix_fmt = AV_PIX_FMT_YUV411P; } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) { s->avctx->pix_fmt = AV_PIX_FMT_YUV440P; } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) { s->avctx->pix_fmt = AV_PIX_FMT_YUV420P; } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) { s->avctx->pix_fmt = AV_PIX_FMT_YUV410P; } else { av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n"); return AVERROR_PATCHWELCOME; } } else s->avctx->pix_fmt = AV_PIX_FMT_RGB24; break; case 161: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE; break; case 162: s->avctx->pix_fmt = AV_PIX_FMT_GRAY8A; break; case 324: s->avctx->pix_fmt = AV_PIX_FMT_RGBA; break; case 483: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE; break; case 644: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE; break; case 1243: s->avctx->pix_fmt = AV_PIX_FMT_GBRP; break; case 1324: s->avctx->pix_fmt = AV_PIX_FMT_GBRAP; break; case 1483: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE; break; case 1644: s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE; break; default: av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, bppcount=%d)\n", s->bpp, s->bppcount); return AVERROR_INVALIDDATA; } if (s->width != s->avctx->width || s->height != s->avctx->height) { ret = ff_set_dimensions(s->avctx, s->width, s->height); if (ret < 0) return ret; } if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0) return ret; if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) { if (s->palette_is_set) { memcpy(frame->f->data[1], s->palette, sizeof(s->palette)); } else { /* make default grayscale pal */ pal = (uint32_t *) frame->f->data[1]; for (i = 0; i < 1<<s->bpp; i++) pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101; } } return 0; }
/** * Execute ANSI escape code * @return 0 on success, negative on error */ static int execute_code(AVCodecContext * avctx, int c) { AnsiContext *s = avctx->priv_data; int ret, i; int width = 0; int height = 0; switch(c) { case 'A': //Cursor Up s->y = FFMAX(s->y - (s->nb_args > 0 ? s->args[0]*s->font_height : s->font_height), 0); break; case 'B': //Cursor Down s->y = FFMIN(s->y + (s->nb_args > 0 ? s->args[0]*s->font_height : s->font_height), avctx->height - s->font_height); break; case 'C': //Cursor Right s->x = FFMIN(s->x + (s->nb_args > 0 ? s->args[0]*FONT_WIDTH : FONT_WIDTH), avctx->width - FONT_WIDTH); break; case 'D': //Cursor Left s->x = FFMAX(s->x - (s->nb_args > 0 ? s->args[0]*FONT_WIDTH : FONT_WIDTH), 0); break; case 'H': //Cursor Position case 'f': //Horizontal and Vertical Position s->y = s->nb_args > 0 ? av_clip((s->args[0] - 1)*s->font_height, 0, avctx->height - s->font_height) : 0; s->x = s->nb_args > 1 ? av_clip((s->args[1] - 1)*FONT_WIDTH, 0, avctx->width - FONT_WIDTH) : 0; break; case 'h': //set creen mode case 'l': //reset screen mode if (s->nb_args < 2) s->args[0] = DEFAULT_SCREEN_MODE; switch(s->args[0]) { case 0: case 1: case 4: case 5: case 13: case 19: //320x200 (25 rows) s->font = ff_cga_font; s->font_height = 8; width = 40<<3; height = 25<<3; break; case 2: case 3: //640x400 (25 rows) s->font = ff_vga16_font; s->font_height = 16; width = 80<<3; height = 25<<4; break; case 6: case 14: //640x200 (25 rows) s->font = ff_cga_font; s->font_height = 8; width = 80<<3; height = 25<<3; break; case 7: //set line wrapping break; case 15: case 16: //640x350 (43 rows) s->font = ff_cga_font; s->font_height = 8; width = 80<<3; height = 43<<3; break; case 17: case 18: //640x480 (60 rows) s->font = ff_cga_font; s->font_height = 8; width = 80<<3; height = 60<<4; break; default: avpriv_request_sample(avctx, "Unsupported screen mode"); } if (width != 0 && height != 0 && (width != avctx->width || height != avctx->height)) { av_frame_unref(s->frame); ret = ff_set_dimensions(avctx, width, height); if (ret < 0) return ret; ret = ff_get_buffer(avctx, s->frame, AV_GET_BUFFER_FLAG_REF); if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return ret; } s->frame->pict_type = AV_PICTURE_TYPE_I; s->frame->palette_has_changed = 1; memcpy(s->frame->data[1], ff_cga_palette, 16 * 4); erase_screen(avctx); } else if (c == 'l') { erase_screen(avctx); } break; case 'J': //Erase in Page switch (s->args[0]) { case 0: erase_line(avctx, s->x, avctx->width - s->x); if (s->y < avctx->height - s->font_height) memset(s->frame->data[0] + (s->y + s->font_height)*s->frame->linesize[0], DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame->linesize[0]); break; case 1: erase_line(avctx, 0, s->x); if (s->y > 0) memset(s->frame->data[0], DEFAULT_BG_COLOR, s->y * s->frame->linesize[0]); break; case 2: erase_screen(avctx); } break; case 'K': //Erase in Line switch(s->args[0]) { case 0: erase_line(avctx, s->x, avctx->width - s->x); break; case 1: erase_line(avctx, 0, s->x); break; case 2: erase_line(avctx, 0, avctx->width); } break; case 'm': //Select Graphics Rendition if (s->nb_args == 0) { s->nb_args = 1; s->args[0] = 0; } for (i = 0; i < FFMIN(s->nb_args, MAX_NB_ARGS); i++) { int m = s->args[i]; if (m == 0) { s->attributes = 0; s->fg = DEFAULT_FG_COLOR; s->bg = DEFAULT_BG_COLOR; } else if (m == 1 || m == 2 || m == 4 || m == 5 || m == 7 || m == 8) { s->attributes |= 1 << (m - 1); } else if (m >= 30 && m <= 38) { s->fg = ansi_to_cga[m - 30]; } else if (m == 39) { s->fg = ansi_to_cga[DEFAULT_FG_COLOR]; } else if (m >= 40 && m <= 47) { s->bg = ansi_to_cga[m - 40]; } else if (m == 49) { s->fg = ansi_to_cga[DEFAULT_BG_COLOR]; } else { avpriv_request_sample(avctx, "Unsupported rendition parameter"); } } break; case 'n': //Device Status Report case 'R': //report current line and column /* ignore */ break; case 's': //Save Cursor Position s->sx = s->x; s->sy = s->y; break; case 'u': //Restore Cursor Position s->x = av_clip(s->sx, 0, avctx->width - FONT_WIDTH); s->y = av_clip(s->sy, 0, avctx->height - s->font_height); break; default: avpriv_request_sample(avctx, "Unknown escape code"); break; } return 0; }
static int rv20_decode_picture_header(RVDecContext *rv) { MpegEncContext *s = &rv->m; int seq, mb_pos, i, ret; int rpr_bits; i = get_bits(&s->gb, 2); switch (i) { case 0: s->pict_type = AV_PICTURE_TYPE_I; break; case 1: s->pict_type = AV_PICTURE_TYPE_I; break; // hmm ... case 2: s->pict_type = AV_PICTURE_TYPE_P; break; case 3: s->pict_type = AV_PICTURE_TYPE_B; break; default: av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n"); return AVERROR_INVALIDDATA; } if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B) { av_log(s->avctx, AV_LOG_ERROR, "early B-frame\n"); return AVERROR_INVALIDDATA; } if (get_bits1(&s->gb)) { av_log(s->avctx, AV_LOG_ERROR, "reserved bit set\n"); return AVERROR_INVALIDDATA; } s->qscale = get_bits(&s->gb, 5); if (s->qscale == 0) { av_log(s->avctx, AV_LOG_ERROR, "Invalid qscale value: 0\n"); return AVERROR_INVALIDDATA; } if (RV_GET_MINOR_VER(rv->sub_id) >= 2) s->loop_filter = get_bits1(&s->gb); if (RV_GET_MINOR_VER(rv->sub_id) <= 1) seq = get_bits(&s->gb, 8) << 7; else seq = get_bits(&s->gb, 13) << 2; rpr_bits = s->avctx->extradata[1] & 7; if (rpr_bits) { int f, new_w, new_h; rpr_bits = FFMIN((rpr_bits >> 1) + 1, 3); f = get_bits(&s->gb, rpr_bits); if (f) { if (s->avctx->extradata_size < 8 + 2 * f) { av_log(s->avctx, AV_LOG_ERROR, "Extradata too small.\n"); return AVERROR_INVALIDDATA; } new_w = 4 * ((uint8_t *) s->avctx->extradata)[6 + 2 * f]; new_h = 4 * ((uint8_t *) s->avctx->extradata)[7 + 2 * f]; } else { new_w = s->orig_width; new_h = s->orig_height; } if (new_w != s->width || new_h != s->height) { av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h); ff_MPV_common_end(s); ret = ff_set_dimensions(s->avctx, new_w, new_h); if (ret < 0) return ret; s->width = new_w; s->height = new_h; if ((ret = ff_MPV_common_init(s)) < 0) return ret; } if (s->avctx->debug & FF_DEBUG_PICT_INFO) { av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, rpr_bits); } } else if (av_image_check_size(s->width, s->height, 0, s->avctx) < 0)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; C93DecoderContext * const c93 = avctx->priv_data; AVFrame * const newpic = c93->pictures[c93->currentpic]; AVFrame * const oldpic = c93->pictures[c93->currentpic^1]; GetByteContext gb; uint8_t *out; int stride, ret, i, x, y, b, bt = 0; if ((ret = ff_set_dimensions(avctx, WIDTH, HEIGHT)) < 0) return ret; c93->currentpic ^= 1; if ((ret = ff_reget_buffer(avctx, newpic)) < 0) return ret; stride = newpic->linesize[0]; bytestream2_init(&gb, buf, buf_size); b = bytestream2_get_byte(&gb); if (b & C93_FIRST_FRAME) { newpic->pict_type = AV_PICTURE_TYPE_I; newpic->key_frame = 1; } else { newpic->pict_type = AV_PICTURE_TYPE_P; newpic->key_frame = 0; } for (y = 0; y < HEIGHT; y += 8) { out = newpic->data[0] + y * stride; for (x = 0; x < WIDTH; x += 8) { uint8_t *copy_from = oldpic->data[0]; unsigned int offset, j; uint8_t cols[4], grps[4]; C93BlockType block_type; if (!bt) bt = bytestream2_get_byte(&gb); block_type= bt & 0x0F; switch (block_type) { case C93_8X8_FROM_PREV: offset = bytestream2_get_le16(&gb); if ((ret = copy_block(avctx, out, copy_from, offset, 8, stride)) < 0) return ret; break; case C93_4X4_FROM_CURR: copy_from = newpic->data[0]; case C93_4X4_FROM_PREV: for (j = 0; j < 8; j += 4) { for (i = 0; i < 8; i += 4) { int offset = bytestream2_get_le16(&gb); int from_x = offset % WIDTH; int from_y = offset / WIDTH; if (block_type == C93_4X4_FROM_CURR && from_y == y+j && (FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) { avpriv_request_sample(avctx, "block overlap %d %d %d %d\n", from_x, x+i, from_y, y+j); return AVERROR_INVALIDDATA; } if ((ret = copy_block(avctx, &out[j*stride+i], copy_from, offset, 4, stride)) < 0) return ret; } } break; case C93_8X8_2COLOR: bytestream2_get_buffer(&gb, cols, 2); for (i = 0; i < 8; i++) { draw_n_color(out + i*stride, stride, 8, 1, 1, cols, NULL, bytestream2_get_byte(&gb)); } break; case C93_4X4_2COLOR: case C93_4X4_4COLOR: case C93_4X4_4COLOR_GRP: for (j = 0; j < 8; j += 4) { for (i = 0; i < 8; i += 4) { if (block_type == C93_4X4_2COLOR) { bytestream2_get_buffer(&gb, cols, 2); draw_n_color(out + i + j*stride, stride, 4, 4, 1, cols, NULL, bytestream2_get_le16(&gb)); } else if (block_type == C93_4X4_4COLOR) { bytestream2_get_buffer(&gb, cols, 4); draw_n_color(out + i + j*stride, stride, 4, 4, 2, cols, NULL, bytestream2_get_le32(&gb)); } else { bytestream2_get_buffer(&gb, grps, 4); draw_n_color(out + i + j*stride, stride, 4, 4, 1, cols, grps, bytestream2_get_le16(&gb)); } } } break; case C93_NOOP: break; case C93_8X8_INTRA: for (j = 0; j < 8; j++) bytestream2_get_buffer(&gb, out + j*stride, 8); break; default: av_log(avctx, AV_LOG_ERROR, "unexpected type %x at %dx%d\n", block_type, x, y); return AVERROR_INVALIDDATA; } bt >>= 4; out += 8; } } if (b & C93_HAS_PALETTE) { uint32_t *palette = (uint32_t *) newpic->data[1]; for (i = 0; i < 256; i++) { palette[i] = 0xFFU << 24 | bytestream2_get_be24(&gb); } newpic->palette_has_changed = 1; } else { if (oldpic->data[1]) memcpy(newpic->data[1], oldpic->data[1], 256 * 4); } if ((ret = av_frame_ref(data, newpic)) < 0) return ret; *got_frame = 1; return buf_size; }
static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) { int size = 0, ret; const uint8_t *data; uint32_t flags; data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size); if (!data) return 0; if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) { av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter " "changes, but PARAM_CHANGE side data was sent to it.\n"); ret = AVERROR(EINVAL); goto fail2; } if (size < 4) goto fail; flags = bytestream_get_le32(&data); size -= 4; if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) { if (size < 4) goto fail; avctx->channels = bytestream_get_le32(&data); size -= 4; } if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) { if (size < 8) goto fail; avctx->channel_layout = bytestream_get_le64(&data); size -= 8; } if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) { if (size < 4) goto fail; avctx->sample_rate = bytestream_get_le32(&data); size -= 4; } if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) { if (size < 8) goto fail; avctx->width = bytestream_get_le32(&data); avctx->height = bytestream_get_le32(&data); size -= 8; ret = ff_set_dimensions(avctx, avctx->width, avctx->height); if (ret < 0) goto fail2; } return 0; fail: av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n"); ret = AVERROR_INVALIDDATA; fail2: if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n"); if (avctx->err_recognition & AV_EF_EXPLODE) return ret; } return 0; }
static int ffmal_update_format(AVCodecContext *avctx) { MMALDecodeContext *ctx = avctx->priv_data; MMAL_STATUS_T status; int ret = 0; MMAL_COMPONENT_T *decoder = ctx->decoder; MMAL_ES_FORMAT_T *format_out = decoder->output[0]->format; MMAL_PARAMETER_VIDEO_INTERLACE_TYPE_T interlace_type; ffmmal_poolref_unref(ctx->pool_out); if (!(ctx->pool_out = av_mallocz(sizeof(*ctx->pool_out)))) { ret = AVERROR(ENOMEM); goto fail; } ctx->pool_out->refcount = 1; if (!format_out) goto fail; if ((status = mmal_port_parameter_set_uint32(decoder->output[0], MMAL_PARAMETER_EXTRA_BUFFERS, ctx->extra_buffers))) goto fail; if ((status = mmal_port_parameter_set_boolean(decoder->output[0], MMAL_PARAMETER_VIDEO_INTERPOLATE_TIMESTAMPS, 0))) goto fail; if (avctx->pix_fmt == AV_PIX_FMT_MMAL) { format_out->encoding = MMAL_ENCODING_OPAQUE; } else { format_out->encoding_variant = format_out->encoding = MMAL_ENCODING_I420; } if ((status = mmal_port_format_commit(decoder->output[0]))) goto fail; interlace_type.hdr.id = MMAL_PARAMETER_VIDEO_INTERLACE_TYPE; interlace_type.hdr.size = sizeof(MMAL_PARAMETER_VIDEO_INTERLACE_TYPE_T); status = mmal_port_parameter_get(decoder->output[0], &interlace_type.hdr); if (status != MMAL_SUCCESS) { av_log(avctx, AV_LOG_ERROR, "Cannot read MMAL interlace information!\n"); } else { ctx->interlaced_frame = (interlace_type.eMode != MMAL_InterlaceProgressive); ctx->top_field_first = (interlace_type.eMode == MMAL_InterlaceFieldsInterleavedUpperFirst); } if ((ret = ff_set_dimensions(avctx, format_out->es->video.crop.x + format_out->es->video.crop.width, format_out->es->video.crop.y + format_out->es->video.crop.height)) < 0) goto fail; if (format_out->es->video.par.num && format_out->es->video.par.den) { avctx->sample_aspect_ratio.num = format_out->es->video.par.num; avctx->sample_aspect_ratio.den = format_out->es->video.par.den; } if (format_out->es->video.frame_rate.num && format_out->es->video.frame_rate.den) { avctx->framerate.num = format_out->es->video.frame_rate.num; avctx->framerate.den = format_out->es->video.frame_rate.den; } avctx->colorspace = ffmmal_csp_to_av_csp(format_out->es->video.color_space); decoder->output[0]->buffer_size = FFMAX(decoder->output[0]->buffer_size_min, decoder->output[0]->buffer_size_recommended); decoder->output[0]->buffer_num = FFMAX(decoder->output[0]->buffer_num_min, decoder->output[0]->buffer_num_recommended) + ctx->extra_buffers; ctx->pool_out->pool = mmal_pool_create(decoder->output[0]->buffer_num, decoder->output[0]->buffer_size); if (!ctx->pool_out->pool) { ret = AVERROR(ENOMEM); goto fail; } return 0; fail: return ret < 0 ? ret : AVERROR_UNKNOWN; }
static int cdxl_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *pkt) { CDXLVideoContext *c = avctx->priv_data; AVFrame * const p = data; int ret, w, h, encoding, aligned_width, buf_size = pkt->size; const uint8_t *buf = pkt->data; if (buf_size < 32) return AVERROR_INVALIDDATA; encoding = buf[1] & 7; c->format = buf[1] & 0xE0; w = AV_RB16(&buf[14]); h = AV_RB16(&buf[16]); c->bpp = buf[19]; c->palette_size = AV_RB16(&buf[20]); c->palette = buf + 32; c->video = c->palette + c->palette_size; c->video_size = buf_size - c->palette_size - 32; if (c->palette_size > 512) return AVERROR_INVALIDDATA; if (buf_size < c->palette_size + 32) return AVERROR_INVALIDDATA; if (c->bpp < 1) return AVERROR_INVALIDDATA; if (c->format != BIT_PLANAR && c->format != BIT_LINE) { avpriv_request_sample(avctx, "Pixel format 0x%0x", c->format); return AVERROR_PATCHWELCOME; } if ((ret = ff_set_dimensions(avctx, w, h)) < 0) return ret; aligned_width = FFALIGN(c->avctx->width, 16); c->padded_bits = aligned_width - c->avctx->width; if (c->video_size < aligned_width * avctx->height * c->bpp / 8) return AVERROR_INVALIDDATA; if (!encoding && c->palette_size && c->bpp <= 8) { avctx->pix_fmt = AV_PIX_FMT_PAL8; } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) { if (c->palette_size != (1 << (c->bpp - 1))) return AVERROR_INVALIDDATA; avctx->pix_fmt = AV_PIX_FMT_BGR24; } else { avpriv_request_sample(avctx, "Encoding %d and bpp %d", encoding, c->bpp); return AVERROR_PATCHWELCOME; } if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; if (encoding) { av_fast_padded_malloc(&c->new_video, &c->new_video_size, h * w + FF_INPUT_BUFFER_PADDING_SIZE); if (!c->new_video) return AVERROR(ENOMEM); if (c->bpp == 8) cdxl_decode_ham8(c, p); else cdxl_decode_ham6(c, p); } else { cdxl_decode_rgb(c, p); } *got_frame = 1; return buf_size; }