static int gif_read_header1(GifState *s) { uint8_t sig[6]; int v, n; int has_global_palette; if (bytestream2_get_bytes_left(&s->gb) < 13) return AVERROR_INVALIDDATA; /* read gif signature */ bytestream2_get_buffer(&s->gb, sig, 6); if (memcmp(sig, gif87a_sig, 6) != 0 && memcmp(sig, gif89a_sig, 6) != 0) return AVERROR_INVALIDDATA; /* read screen header */ s->transparent_color_index = -1; s->screen_width = bytestream2_get_le16(&s->gb); s->screen_height = bytestream2_get_le16(&s->gb); if( (unsigned)s->screen_width > 32767 || (unsigned)s->screen_height > 32767) { av_log(NULL, AV_LOG_ERROR, "picture size too large\n"); return AVERROR_INVALIDDATA; } v = bytestream2_get_byte(&s->gb); s->color_resolution = ((v & 0x70) >> 4) + 1; has_global_palette = (v & 0x80); s->bits_per_pixel = (v & 0x07) + 1; s->background_color_index = bytestream2_get_byte(&s->gb); bytestream2_get_byte(&s->gb); /* ignored */ av_dlog(s->avctx, "gif: screen_w=%d screen_h=%d bpp=%d global_palette=%d\n", s->screen_width, s->screen_height, s->bits_per_pixel, has_global_palette); if (has_global_palette) { n = 1 << s->bits_per_pixel; if (bytestream2_get_bytes_left(&s->gb) < n * 3) return AVERROR_INVALIDDATA; bytestream2_get_buffer(&s->gb, s->global_palette, n * 3); } return 0; }
/** * @param half_horiz Half horizontal resolution (0 or 1) * @param half_vert Half vertical resolution (0 or 1) */ static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert) { int data_off = bytestream2_get_le16(&s->gb); int y = 0; GetByteContext data_ptr; if (bytestream2_get_bytes_left(&s->gb) < data_off) return AVERROR_INVALIDDATA; bytestream2_init(&data_ptr, s->gb.buffer + data_off, bytestream2_get_bytes_left(&s->gb) - data_off); while (s->gb.buffer < data_ptr.buffer_start) { int i, j; int length = bytestream2_get_byte(&s->gb); int x = bytestream2_get_byte(&s->gb) + ((length & 0x80) << 1); length &= 0x7F; if (length==0) { y += x; continue; } if (y + half_vert >= s->avctx->height) return 0; for(i=0; i<length; i++) { int replace_array = bytestream2_get_byte(&s->gb); for(j=0; j<8; j++) { int replace = (replace_array >> (7-j)) & 1; if (x + half_horiz >= s->avctx->width) return AVERROR_INVALIDDATA; if (replace) { int color = bytestream2_get_byte(&data_ptr); s->frame->data[0][y*s->frame->linesize[0] + x] = color; if (half_horiz) s->frame->data[0][y*s->frame->linesize[0] + x + 1] = color; if (half_vert) { s->frame->data[0][(y+1)*s->frame->linesize[0] + x] = color; if (half_horiz) s->frame->data[0][(y+1)*s->frame->linesize[0] + x + 1] = color; } } x += 1 + half_horiz; } } y += 1 + half_vert; } return 0; }
static int gif_read_extension(GifState *s) { int ext_code, ext_len, i, gce_flags, gce_transparent_index; /* extension */ ext_code = bytestream2_get_byte(&s->gb); ext_len = bytestream2_get_byte(&s->gb); av_dlog(s->avctx, "gif: ext_code=0x%x len=%d\n", ext_code, ext_len); switch(ext_code) { case 0xf9: if (ext_len != 4) goto discard_ext; s->transparent_color_index = -1; gce_flags = bytestream2_get_byte(&s->gb); s->gce_delay = bytestream2_get_le16(&s->gb); gce_transparent_index = bytestream2_get_byte(&s->gb); if (gce_flags & 0x01) s->transparent_color_index = gce_transparent_index; else s->transparent_color_index = -1; s->gce_disposal = (gce_flags >> 2) & 0x7; av_dlog(s->avctx, "gif: gce_flags=%x delay=%d tcolor=%d disposal=%d\n", gce_flags, s->gce_delay, s->transparent_color_index, s->gce_disposal); ext_len = bytestream2_get_byte(&s->gb); break; } /* NOTE: many extension blocks can come after */ discard_ext: while (ext_len != 0) { for (i = 0; i < ext_len; i++) bytestream2_get_byte(&s->gb); ext_len = bytestream2_get_byte(&s->gb); av_dlog(s->avctx, "gif: ext_len1=%d\n", ext_len); } return 0; }
static int bethsoftvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { BethsoftvidContext * vid = avctx->priv_data; char block_type; uint8_t * dst; uint8_t * frame_end; int remaining = avctx->width; // number of bytes remaining on a line int wrap_to_next_line; int code, ret; int yoffset; if ((ret = ff_reget_buffer(avctx, vid->frame)) < 0) return ret; wrap_to_next_line = vid->frame->linesize[0] - avctx->width; if (avpkt->side_data_elems > 0 && avpkt->side_data[0].type == AV_PKT_DATA_PALETTE) { bytestream2_init(&vid->g, avpkt->side_data[0].data, avpkt->side_data[0].size); if ((ret = set_palette(vid)) < 0) return ret; } bytestream2_init(&vid->g, avpkt->data, avpkt->size); dst = vid->frame->data[0]; frame_end = vid->frame->data[0] + vid->frame->linesize[0] * avctx->height; switch(block_type = bytestream2_get_byte(&vid->g)){ case PALETTE_BLOCK: { *got_frame = 0; if ((ret = set_palette(vid)) < 0) { av_log(avctx, AV_LOG_ERROR, "error reading palette\n"); return ret; } return bytestream2_tell(&vid->g); } case VIDEO_YOFF_P_FRAME: yoffset = bytestream2_get_le16(&vid->g); if(yoffset >= avctx->height) return AVERROR_INVALIDDATA; dst += vid->frame->linesize[0] * yoffset; } // main code while((code = bytestream2_get_byte(&vid->g))){ int length = code & 0x7f; // copy any bytes starting at the current position, and ending at the frame width while(length > remaining){ if(code < 0x80) bytestream2_get_buffer(&vid->g, dst, remaining); else if(block_type == VIDEO_I_FRAME) memset(dst, bytestream2_peek_byte(&vid->g), remaining); length -= remaining; // decrement the number of bytes to be copied dst += remaining + wrap_to_next_line; // skip over extra bytes at end of frame remaining = avctx->width; if(dst == frame_end) goto end; } // copy any remaining bytes after / if line overflows if(code < 0x80) bytestream2_get_buffer(&vid->g, dst, length); else if(block_type == VIDEO_I_FRAME) memset(dst, bytestream2_get_byte(&vid->g), length); remaining -= length; dst += length; } end: if ((ret = av_frame_ref(data, vid->frame)) < 0) return ret; *got_frame = 1; return avpkt->size; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; C93DecoderContext * const c93 = avctx->priv_data; AVFrame * const newpic = &c93->pictures[c93->currentpic]; AVFrame * const oldpic = &c93->pictures[c93->currentpic^1]; AVFrame *picture = data; GetByteContext gb; uint8_t *out; int stride, i, x, y, b, bt = 0; c93->currentpic ^= 1; newpic->reference = 3; newpic->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE; if (avctx->reget_buffer(avctx, newpic)) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } stride = newpic->linesize[0]; bytestream2_init(&gb, buf, buf_size); b = bytestream2_get_byte(&gb); if (b & C93_FIRST_FRAME) { newpic->pict_type = AV_PICTURE_TYPE_I; newpic->key_frame = 1; } else { newpic->pict_type = AV_PICTURE_TYPE_P; newpic->key_frame = 0; } for (y = 0; y < HEIGHT; y += 8) { out = newpic->data[0] + y * stride; for (x = 0; x < WIDTH; x += 8) { uint8_t *copy_from = oldpic->data[0]; unsigned int offset, j; uint8_t cols[4], grps[4]; C93BlockType block_type; if (!bt) bt = bytestream2_get_byte(&gb); block_type= bt & 0x0F; switch (block_type) { case C93_8X8_FROM_PREV: offset = bytestream2_get_le16(&gb); if (copy_block(avctx, out, copy_from, offset, 8, stride)) return -1; break; case C93_4X4_FROM_CURR: copy_from = newpic->data[0]; case C93_4X4_FROM_PREV: for (j = 0; j < 8; j += 4) { for (i = 0; i < 8; i += 4) { offset = bytestream2_get_le16(&gb); if (copy_block(avctx, &out[j*stride+i], copy_from, offset, 4, stride)) return -1; } } break; case C93_8X8_2COLOR: bytestream2_get_buffer(&gb, cols, 2); for (i = 0; i < 8; i++) { draw_n_color(out + i*stride, stride, 8, 1, 1, cols, NULL, bytestream2_get_byte(&gb)); } break; case C93_4X4_2COLOR: case C93_4X4_4COLOR: case C93_4X4_4COLOR_GRP: for (j = 0; j < 8; j += 4) { for (i = 0; i < 8; i += 4) { if (block_type == C93_4X4_2COLOR) { bytestream2_get_buffer(&gb, cols, 2); draw_n_color(out + i + j*stride, stride, 4, 4, 1, cols, NULL, bytestream2_get_le16(&gb)); } else if (block_type == C93_4X4_4COLOR) { bytestream2_get_buffer(&gb, cols, 4); draw_n_color(out + i + j*stride, stride, 4, 4, 2, cols, NULL, bytestream2_get_le32(&gb)); } else { bytestream2_get_buffer(&gb, grps, 4); draw_n_color(out + i + j*stride, stride, 4, 4, 1, cols, grps, bytestream2_get_le16(&gb)); } } } break; case C93_NOOP: break; case C93_8X8_INTRA: for (j = 0; j < 8; j++) bytestream2_get_buffer(&gb, out + j*stride, 8); break; default: av_log(avctx, AV_LOG_ERROR, "unexpected type %x at %dx%d\n", block_type, x, y); return -1; } bt >>= 4; out += 8; } } if (b & C93_HAS_PALETTE) { uint32_t *palette = (uint32_t *) newpic->data[1]; for (i = 0; i < 256; i++) { palette[i] = 0xFFU << 24 | bytestream2_get_be24(&gb); } } else { if (oldpic->data[1]) memcpy(newpic->data[1], oldpic->data[1], 256 * 4); } *picture = *newpic; *data_size = sizeof(AVFrame); return buf_size; }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; C93DecoderContext * const c93 = avctx->priv_data; AVFrame * const newpic = c93->pictures[c93->currentpic]; AVFrame * const oldpic = c93->pictures[c93->currentpic^1]; GetByteContext gb; uint8_t *out; int stride, ret, i, x, y, b, bt = 0; if ((ret = ff_set_dimensions(avctx, WIDTH, HEIGHT)) < 0) return ret; c93->currentpic ^= 1; if ((ret = ff_reget_buffer(avctx, newpic)) < 0) return ret; stride = newpic->linesize[0]; bytestream2_init(&gb, buf, buf_size); b = bytestream2_get_byte(&gb); if (b & C93_FIRST_FRAME) { newpic->pict_type = AV_PICTURE_TYPE_I; newpic->key_frame = 1; } else { newpic->pict_type = AV_PICTURE_TYPE_P; newpic->key_frame = 0; } for (y = 0; y < HEIGHT; y += 8) { out = newpic->data[0] + y * stride; for (x = 0; x < WIDTH; x += 8) { uint8_t *copy_from = oldpic->data[0]; unsigned int offset, j; uint8_t cols[4], grps[4]; C93BlockType block_type; if (!bt) bt = bytestream2_get_byte(&gb); block_type= bt & 0x0F; switch (block_type) { case C93_8X8_FROM_PREV: offset = bytestream2_get_le16(&gb); if ((ret = copy_block(avctx, out, copy_from, offset, 8, stride)) < 0) return ret; break; case C93_4X4_FROM_CURR: copy_from = newpic->data[0]; case C93_4X4_FROM_PREV: for (j = 0; j < 8; j += 4) { for (i = 0; i < 8; i += 4) { int offset = bytestream2_get_le16(&gb); int from_x = offset % WIDTH; int from_y = offset / WIDTH; if (block_type == C93_4X4_FROM_CURR && from_y == y+j && (FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) { avpriv_request_sample(avctx, "block overlap %d %d %d %d\n", from_x, x+i, from_y, y+j); return AVERROR_INVALIDDATA; } if ((ret = copy_block(avctx, &out[j*stride+i], copy_from, offset, 4, stride)) < 0) return ret; } } break; case C93_8X8_2COLOR: bytestream2_get_buffer(&gb, cols, 2); for (i = 0; i < 8; i++) { draw_n_color(out + i*stride, stride, 8, 1, 1, cols, NULL, bytestream2_get_byte(&gb)); } break; case C93_4X4_2COLOR: case C93_4X4_4COLOR: case C93_4X4_4COLOR_GRP: for (j = 0; j < 8; j += 4) { for (i = 0; i < 8; i += 4) { if (block_type == C93_4X4_2COLOR) { bytestream2_get_buffer(&gb, cols, 2); draw_n_color(out + i + j*stride, stride, 4, 4, 1, cols, NULL, bytestream2_get_le16(&gb)); } else if (block_type == C93_4X4_4COLOR) { bytestream2_get_buffer(&gb, cols, 4); draw_n_color(out + i + j*stride, stride, 4, 4, 2, cols, NULL, bytestream2_get_le32(&gb)); } else { bytestream2_get_buffer(&gb, grps, 4); draw_n_color(out + i + j*stride, stride, 4, 4, 1, cols, grps, bytestream2_get_le16(&gb)); } } } break; case C93_NOOP: break; case C93_8X8_INTRA: for (j = 0; j < 8; j++) bytestream2_get_buffer(&gb, out + j*stride, 8); break; default: av_log(avctx, AV_LOG_ERROR, "unexpected type %x at %dx%d\n", block_type, x, y); return AVERROR_INVALIDDATA; } bt >>= 4; out += 8; } } if (b & C93_HAS_PALETTE) { uint32_t *palette = (uint32_t *) newpic->data[1]; for (i = 0; i < 256; i++) { palette[i] = 0xFFU << 24 | bytestream2_get_be24(&gb); } newpic->palette_has_changed = 1; } else { if (oldpic->data[1]) memcpy(newpic->data[1], oldpic->data[1], 256 * 4); } if ((ret = av_frame_ref(data, newpic)) < 0) return ret; *got_frame = 1; return buf_size; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { AnmContext *s = avctx->priv_data; const int buf_size = avpkt->size; uint8_t *dst, *dst_end; int count; if(avctx->reget_buffer(avctx, &s->frame) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } dst = s->frame.data[0]; dst_end = s->frame.data[0] + s->frame.linesize[0]*avctx->height; bytestream2_init(&s->gb, avpkt->data, buf_size); if (bytestream2_get_byte(&s->gb) != 0x42) { av_log_ask_for_sample(avctx, "unknown record type\n"); return buf_size; } if (bytestream2_get_byte(&s->gb)) { av_log_ask_for_sample(avctx, "padding bytes not supported\n"); return buf_size; } bytestream2_skip(&s->gb, 2); s->x = 0; do { /* if statements are ordered by probability */ #define OP(gb, pixel, count) \ op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame.linesize[0]) int type = bytestream2_get_byte(&s->gb); count = type & 0x7F; type >>= 7; if (count) { if (OP(type ? NULL : &s->gb, -1, count)) break; } else if (!type) { int pixel; count = bytestream2_get_byte(&s->gb); /* count==0 gives nop */ pixel = bytestream2_get_byte(&s->gb); if (OP(NULL, pixel, count)) break; } else { int pixel; type = bytestream2_get_le16(&s->gb); count = type & 0x3FFF; type >>= 14; if (!count) { if (type == 0) break; // stop if (type == 2) { av_log_ask_for_sample(avctx, "unknown opcode"); return AVERROR_INVALIDDATA; } continue; } pixel = type == 3 ? bytestream2_get_byte(&s->gb) : -1; if (type == 1) count += 0x4000; if (OP(type == 2 ? &s->gb : NULL, pixel, count)) break; } } while (bytestream2_get_bytes_left(&s->gb) > 0); memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }
unsigned ff_tget_short(GetByteContext *gb, int le) { unsigned v = le ? bytestream2_get_le16(gb) : bytestream2_get_be16(gb); return v; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { PicContext *s = avctx->priv_data; uint32_t *palette; int bits_per_plane, bpp, etype, esize, npal, pos_after_pal; int i, x, y, plane, tmp; bytestream2_init(&s->g, avpkt->data, avpkt->size); if (bytestream2_get_bytes_left(&s->g) < 11) return AVERROR_INVALIDDATA; if (bytestream2_get_le16u(&s->g) != 0x1234) return AVERROR_INVALIDDATA; s->width = bytestream2_get_le16u(&s->g); s->height = bytestream2_get_le16u(&s->g); bytestream2_skip(&s->g, 4); tmp = bytestream2_get_byteu(&s->g); bits_per_plane = tmp & 0xF; s->nb_planes = (tmp >> 4) + 1; bpp = bits_per_plane * s->nb_planes; if (bits_per_plane > 8 || bpp < 1 || bpp > 32) { av_log_ask_for_sample(s, "unsupported bit depth\n"); return AVERROR_INVALIDDATA; } if (bytestream2_peek_byte(&s->g) == 0xFF) { bytestream2_skip(&s->g, 2); etype = bytestream2_get_le16(&s->g); esize = bytestream2_get_le16(&s->g); if (bytestream2_get_bytes_left(&s->g) < esize) return AVERROR_INVALIDDATA; } else { etype = -1; esize = 0; } avctx->pix_fmt = PIX_FMT_PAL8; if (s->width != avctx->width && s->height != avctx->height) { if (av_image_check_size(s->width, s->height, 0, avctx) < 0) return -1; avcodec_set_dimensions(avctx, s->width, s->height); if (s->frame.data[0]) avctx->release_buffer(avctx, &s->frame); } if (avctx->get_buffer(avctx, &s->frame) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]); s->frame.pict_type = AV_PICTURE_TYPE_I; s->frame.palette_has_changed = 1; pos_after_pal = bytestream2_tell(&s->g) + esize; palette = (uint32_t*)s->frame.data[1]; if (etype == 1 && esize > 1 && bytestream2_peek_byte(&s->g) < 6) { int idx = bytestream2_get_byte(&s->g); npal = 4; for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ]; } else if (etype == 2) { npal = FFMIN(esize, 16); for (i = 0; i < npal; i++) { int pal_idx = bytestream2_get_byte(&s->g); palette[i] = ff_cga_palette[FFMIN(pal_idx, 16)]; } } else if (etype == 3) { npal = FFMIN(esize, 16); for (i = 0; i < npal; i++) { int pal_idx = bytestream2_get_byte(&s->g); palette[i] = ff_ega_palette[FFMIN(pal_idx, 63)]; } } else if (etype == 4 || etype == 5) { npal = FFMIN(esize / 3, 256); for (i = 0; i < npal; i++) palette[i] = bytestream2_get_be24(&s->g) << 2; } else { if (bpp == 1) { npal = 2; palette[0] = 0x000000; palette[1] = 0xFFFFFF; } else if (bpp == 2) { npal = 4; for (i = 0; i < npal; i++) palette[i] = ff_cga_palette[ cga_mode45_index[0][i] ]; } else { npal = 16; memcpy(palette, ff_cga_palette, npal * 4); } } // fill remaining palette entries memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4); // skip remaining palette bytes bytestream2_seek(&s->g, pos_after_pal, SEEK_SET); x = 0; y = s->height - 1; plane = 0; if (bytestream2_get_le16(&s->g)) { while (bytestream2_get_bytes_left(&s->g) >= 6) { int stop_size, marker, t1, t2; t1 = bytestream2_get_bytes_left(&s->g); t2 = bytestream2_get_le16(&s->g); stop_size = t1 - FFMIN(t1, t2); // ignore uncompressed block size bytestream2_skip(&s->g, 2); marker = bytestream2_get_byte(&s->g); while (plane < s->nb_planes && bytestream2_get_bytes_left(&s->g) > stop_size) { int run = 1; int val = bytestream2_get_byte(&s->g); if (val == marker) { run = bytestream2_get_byte(&s->g); if (run == 0) run = bytestream2_get_le16(&s->g); val = bytestream2_get_byte(&s->g); } if (!bytestream2_get_bytes_left(&s->g)) break; if (bits_per_plane == 8) { picmemset_8bpp(s, val, run, &x, &y); if (y < 0) break; } else { picmemset(s, val, run, &x, &y, &plane, bits_per_plane); } } } } else { av_log_ask_for_sample(s, "uncompressed image\n"); return avpkt->size; } *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return avpkt->size; }
static int dxv_decompress_dxt5(AVCodecContext *avctx) { DXVContext *ctx = avctx->priv_data; GetByteContext *gbc = &ctx->gbc; uint32_t value, op; int idx, prev, state = 0; int pos = 4; int run = 0; int probe, check; /* Copy the first four elements */ AV_WL32(ctx->tex_data + 0, bytestream2_get_le32(gbc)); AV_WL32(ctx->tex_data + 4, bytestream2_get_le32(gbc)); AV_WL32(ctx->tex_data + 8, bytestream2_get_le32(gbc)); AV_WL32(ctx->tex_data + 12, bytestream2_get_le32(gbc)); /* Process input until the whole texture has been filled */ while (pos < ctx->tex_size / 4) { if (run) { run--; prev = AV_RL32(ctx->tex_data + 4 * (pos - 4)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; prev = AV_RL32(ctx->tex_data + 4 * (pos - 4)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; } else { if (state == 0) { value = bytestream2_get_le32(gbc); state = 16; } op = value & 0x3; value >>= 2; state--; switch (op) { case 0: /* Long copy */ check = bytestream2_get_byte(gbc) + 1; if (check == 256) { do { probe = bytestream2_get_le16(gbc); check += probe; } while (probe == 0xFFFF); } while (check && pos < ctx->tex_size / 4) { prev = AV_RL32(ctx->tex_data + 4 * (pos - 4)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; prev = AV_RL32(ctx->tex_data + 4 * (pos - 4)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; prev = AV_RL32(ctx->tex_data + 4 * (pos - 4)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; prev = AV_RL32(ctx->tex_data + 4 * (pos - 4)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; check--; } /* Restart (or exit) the loop */ continue; break; case 1: /* Load new run value */ run = bytestream2_get_byte(gbc); if (run == 255) { do { probe = bytestream2_get_le16(gbc); run += probe; } while (probe == 0xFFFF); } /* Copy two dwords from previous data */ prev = AV_RL32(ctx->tex_data + 4 * (pos - 4)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; prev = AV_RL32(ctx->tex_data + 4 * (pos - 4)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; break; case 2: /* Copy two dwords from a previous index */ idx = 8 + bytestream2_get_le16(gbc); prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; break; case 3: /* Copy two dwords from input */ prev = bytestream2_get_le32(gbc); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; prev = bytestream2_get_le32(gbc); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; break; } } CHECKPOINT(4); /* Copy two elements from a previous offset or from the input buffer */ if (op) { prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; } else { CHECKPOINT(4); if (op) prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); else prev = bytestream2_get_le32(gbc); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; CHECKPOINT(4); if (op) prev = AV_RL32(ctx->tex_data + 4 * (pos - idx)); else prev = bytestream2_get_le32(gbc); AV_WL32(ctx->tex_data + 4 * pos, prev); pos++; } } return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { AnmContext *s = avctx->priv_data; const int buf_size = avpkt->size; uint8_t *dst, *dst_end; int count, ret; if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; dst = s->frame->data[0]; dst_end = s->frame->data[0] + s->frame->linesize[0]*avctx->height; bytestream2_init(&s->gb, avpkt->data, buf_size); if (bytestream2_get_byte(&s->gb) != 0x42) { avpriv_request_sample(avctx, "Unknown record type"); return AVERROR_INVALIDDATA; } if (bytestream2_get_byte(&s->gb)) { avpriv_request_sample(avctx, "Padding bytes"); return AVERROR_PATCHWELCOME; } bytestream2_skip(&s->gb, 2); s->x = 0; do { /* if statements are ordered by probability */ #define OP(gb, pixel, count) \ op(&dst, dst_end, (gb), (pixel), (count), &s->x, avctx->width, s->frame->linesize[0]) int type = bytestream2_get_byte(&s->gb); count = type & 0x7F; type >>= 7; if (count) { if (OP(type ? NULL : &s->gb, -1, count)) break; } else if (!type) { int pixel; count = bytestream2_get_byte(&s->gb); /* count==0 gives nop */ pixel = bytestream2_get_byte(&s->gb); if (OP(NULL, pixel, count)) break; } else { int pixel; type = bytestream2_get_le16(&s->gb); count = type & 0x3FFF; type >>= 14; if (!count) { if (type == 0) break; // stop if (type == 2) { avpriv_request_sample(avctx, "Unknown opcode"); return AVERROR_PATCHWELCOME; } continue; } pixel = type == 3 ? bytestream2_get_byte(&s->gb) : -1; if (type == 1) count += 0x4000; if (OP(type == 2 ? &s->gb : NULL, pixel, count)) break; } } while (bytestream2_get_bytes_left(&s->gb) > 0); memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE); *got_frame = 1; if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; return buf_size; }
static int rscc_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { RsccContext *ctx = avctx->priv_data; GetByteContext *gbc = &ctx->gbc; GetByteContext tiles_gbc; AVFrame *frame = data; const uint8_t *pixels, *raw; uint8_t *inflated_tiles = NULL; int tiles_nb, packed_size, pixel_size = 0; int i, ret = 0; bytestream2_init(gbc, avpkt->data, avpkt->size); /* Size check */ if (bytestream2_get_bytes_left(gbc) < 12) { av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size); return AVERROR_INVALIDDATA; } /* Read number of tiles, and allocate the array */ tiles_nb = bytestream2_get_le16(gbc); av_fast_malloc(&ctx->tiles, &ctx->tiles_size, tiles_nb * sizeof(*ctx->tiles)); if (!ctx->tiles) { ret = AVERROR(ENOMEM); goto end; } av_log(avctx, AV_LOG_DEBUG, "Frame with %d tiles.\n", tiles_nb); /* When there are more than 5 tiles, they are packed together with * a size header. When that size does not match the number of tiles * times the tile size, it means it needs to be inflated as well */ if (tiles_nb > 5) { uLongf packed_tiles_size; if (tiles_nb < 32) packed_tiles_size = bytestream2_get_byte(gbc); else packed_tiles_size = bytestream2_get_le16(gbc); ff_dlog(avctx, "packed tiles of size %lu.\n", packed_tiles_size); /* If necessary, uncompress tiles, and hijack the bytestream reader */ if (packed_tiles_size != tiles_nb * TILE_SIZE) { uLongf length = tiles_nb * TILE_SIZE; inflated_tiles = av_malloc(length); if (!inflated_tiles) { ret = AVERROR(ENOMEM); goto end; } ret = uncompress(inflated_tiles, &length, gbc->buffer, packed_tiles_size); if (ret) { av_log(avctx, AV_LOG_ERROR, "Tile deflate error %d.\n", ret); ret = AVERROR_UNKNOWN; goto end; } /* Skip the compressed tile section in the main byte reader, * and point it to read the newly uncompressed data */ bytestream2_skip(gbc, packed_tiles_size); bytestream2_init(&tiles_gbc, inflated_tiles, length); gbc = &tiles_gbc; } } /* Fill in array of tiles, keeping track of how many pixels are updated */ for (i = 0; i < tiles_nb; i++) { ctx->tiles[i].x = bytestream2_get_le16(gbc); ctx->tiles[i].w = bytestream2_get_le16(gbc); ctx->tiles[i].y = bytestream2_get_le16(gbc); ctx->tiles[i].h = bytestream2_get_le16(gbc); pixel_size += ctx->tiles[i].w * ctx->tiles[i].h * ctx->component_size; ff_dlog(avctx, "tile %d orig(%d,%d) %dx%d.\n", i, ctx->tiles[i].x, ctx->tiles[i].y, ctx->tiles[i].w, ctx->tiles[i].h); if (ctx->tiles[i].w == 0 || ctx->tiles[i].h == 0) { av_log(avctx, AV_LOG_ERROR, "invalid tile %d at (%d.%d) with size %dx%d.\n", i, ctx->tiles[i].x, ctx->tiles[i].y, ctx->tiles[i].w, ctx->tiles[i].h); ret = AVERROR_INVALIDDATA; goto end; } else if (ctx->tiles[i].x + ctx->tiles[i].w > avctx->width || ctx->tiles[i].y + ctx->tiles[i].h > avctx->height) { av_log(avctx, AV_LOG_ERROR, "out of bounds tile %d at (%d.%d) with size %dx%d.\n", i, ctx->tiles[i].x, ctx->tiles[i].y, ctx->tiles[i].w, ctx->tiles[i].h); ret = AVERROR_INVALIDDATA; goto end; } } /* Reset the reader in case it had been modified before */ gbc = &ctx->gbc; /* Extract how much pixel data the tiles contain */ if (pixel_size < 0x100) packed_size = bytestream2_get_byte(gbc); else if (pixel_size < 0x10000) packed_size = bytestream2_get_le16(gbc); else if (pixel_size < 0x1000000) packed_size = bytestream2_get_le24(gbc); else packed_size = bytestream2_get_le32(gbc); ff_dlog(avctx, "pixel_size %d packed_size %d.\n", pixel_size, packed_size); if (packed_size < 0) { av_log(avctx, AV_LOG_ERROR, "Invalid tile size %d\n", packed_size); ret = AVERROR_INVALIDDATA; goto end; } /* Get pixels buffer, it may be deflated or just raw */ if (pixel_size == packed_size) { if (bytestream2_get_bytes_left(gbc) < pixel_size) { av_log(avctx, AV_LOG_ERROR, "Insufficient input for %d\n", pixel_size); ret = AVERROR_INVALIDDATA; goto end; } pixels = gbc->buffer; } else { uLongf len = ctx->inflated_size; if (bytestream2_get_bytes_left(gbc) < packed_size) { av_log(avctx, AV_LOG_ERROR, "Insufficient input for %d\n", packed_size); ret = AVERROR_INVALIDDATA; goto end; } ret = uncompress(ctx->inflated_buf, &len, gbc->buffer, packed_size); if (ret) { av_log(avctx, AV_LOG_ERROR, "Pixel deflate error %d.\n", ret); ret = AVERROR_UNKNOWN; goto end; } pixels = ctx->inflated_buf; } /* Allocate when needed */ ret = ff_reget_buffer(avctx, ctx->reference); if (ret < 0) goto end; /* Pointer to actual pixels, will be updated when data is consumed */ raw = pixels; for (i = 0; i < tiles_nb; i++) { uint8_t *dst = ctx->reference->data[0] + ctx->reference->linesize[0] * (avctx->height - ctx->tiles[i].y - 1) + ctx->tiles[i].x * ctx->component_size; av_image_copy_plane(dst, -1 * ctx->reference->linesize[0], raw, ctx->tiles[i].w * ctx->component_size, ctx->tiles[i].w * ctx->component_size, ctx->tiles[i].h); raw += ctx->tiles[i].w * ctx->component_size * ctx->tiles[i].h; } /* Frame is ready to be output */ ret = av_frame_ref(frame, ctx->reference); if (ret < 0) goto end; /* Keyframe when the number of pixels updated matches the whole surface */ if (pixel_size == ctx->inflated_size) { frame->pict_type = AV_PICTURE_TYPE_I; frame->key_frame = 1; } else { frame->pict_type = AV_PICTURE_TYPE_P; } *got_frame = 1; end: av_free(inflated_tiles); return ret; }
static int flic_decode_frame_24BPP(AVCodecContext *avctx, void *data, int *got_frame, const uint8_t *buf, int buf_size) { FlicDecodeContext *s = avctx->priv_data; GetByteContext g2; int pixel_ptr; unsigned char palette_idx1; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j, ret; int lines; int compressed_lines; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; int pixel; unsigned int pixel_limit; bytestream2_init(&g2, buf, buf_size); if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; pixels = s->frame->data[0]; pixel_limit = s->avctx->height * s->frame->linesize[0]; frame_size = bytestream2_get_le32(&g2); bytestream2_skip(&g2, 2); /* skip the magic number */ num_chunks = bytestream2_get_le16(&g2); bytestream2_skip(&g2, 8); /* skip padding */ if (frame_size > buf_size) frame_size = buf_size; if (frame_size < 16) return AVERROR_INVALIDDATA; frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0) && bytestream2_get_bytes_left(&g2) >= 4) { int stream_ptr_after_chunk; chunk_size = bytestream2_get_le32(&g2); if (chunk_size > frame_size) { av_log(avctx, AV_LOG_WARNING, "Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size); chunk_size = frame_size; } stream_ptr_after_chunk = bytestream2_tell(&g2) - 4 + chunk_size; chunk_type = bytestream2_get_le16(&g2); switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: /* For some reason, it seems that non-palettized flics do * include one of these chunks in their first frame. * Why I do not know, it seems rather extraneous. */ ff_dlog(avctx, "Unexpected Palette chunk %d in non-palettized FLC\n", chunk_type); bytestream2_skip(&g2, chunk_size - 6); break; case FLI_DELTA: case FLI_DTA_LC: y_ptr = 0; compressed_lines = bytestream2_get_le16(&g2); while (compressed_lines > 0) { if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk) break; if (y_ptr > pixel_limit) return AVERROR_INVALIDDATA; line_packets = bytestream2_get_le16(&g2); if (line_packets < 0) { line_packets = -line_packets; if (line_packets > s->avctx->height) return AVERROR_INVALIDDATA; y_ptr += line_packets * s->frame->linesize[0]; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk) break; pixel_skip = bytestream2_get_byte(&g2); pixel_ptr += (pixel_skip*3); /* Pixel is 3 bytes wide */ pixel_countdown -= pixel_skip; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run < 0) { byte_run = -byte_run; pixel = bytestream2_get_le24(&g2); CHECK_PIXEL_PTR(3 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown -= 1) { AV_WL24(&pixels[pixel_ptr], pixel); pixel_ptr += 3; } } else { if (bytestream2_tell(&g2) + 2*byte_run > stream_ptr_after_chunk) break; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { pixel = bytestream2_get_le24(&g2); AV_WL24(&pixels[pixel_ptr], pixel); pixel_ptr += 3; } } } y_ptr += s->frame->linesize[0]; } } break; case FLI_LC: av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-palettized FLC\n"); bytestream2_skip(&g2, chunk_size - 6); break; case FLI_BLACK: /* set the whole frame to 0x00 which is black for 24 bit mode. */ memset(pixels, 0x00, s->frame->linesize[0] * s->avctx->height); break; case FLI_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = (s->avctx->width * 3); while (pixel_countdown > 0) { if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk) break; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run > 0) { palette_idx1 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk) break; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { palette_idx1 = bytestream2_get_byte(&g2); pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } } y_ptr += s->frame->linesize[0]; } break; case FLI_DTA_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */ while (pixel_countdown > 0) { if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk) break; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run > 0) { pixel = bytestream2_get_le24(&g2); CHECK_PIXEL_PTR(3 * byte_run); for (j = 0; j < byte_run; j++) { AV_WL24(pixels + pixel_ptr, pixel); pixel_ptr += 3; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n", pixel_countdown); } } else { /* copy pixels if byte_run < 0 */ byte_run = -byte_run; if (bytestream2_tell(&g2) + 3 * byte_run > stream_ptr_after_chunk) break; CHECK_PIXEL_PTR(3 * byte_run); for (j = 0; j < byte_run; j++) { pixel = bytestream2_get_le24(&g2); AV_WL24(pixels + pixel_ptr, pixel); pixel_ptr += 3; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n", pixel_countdown); } } } y_ptr += s->frame->linesize[0]; } break; case FLI_COPY: case FLI_DTA_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > (unsigned int)(FFALIGN(s->avctx->width, 2) * s->avctx->height)*3) { av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \ "bigger than image, skipping chunk\n", chunk_size - 6); bytestream2_skip(&g2, chunk_size - 6); } else { for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height; y_ptr += s->frame->linesize[0]) { pixel_countdown = s->avctx->width; pixel_ptr = 0; while (pixel_countdown > 0) { pixel = bytestream2_get_le24(&g2); AV_WL24(&pixels[y_ptr + pixel_ptr], pixel); pixel_ptr += 3; pixel_countdown--; } if (s->avctx->width & 1) bytestream2_skip(&g2, 3); } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ bytestream2_skip(&g2, chunk_size - 6); break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1)) av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2)); if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; return buf_size; }
static int gif_read_image(GifState *s, AVFrame *frame) { int left, top, width, height, bits_per_pixel, code_size, flags; int is_interleaved, has_local_palette, y, pass, y1, linesize, n, i; uint8_t *ptr, *spal, *palette, *ptr1; left = bytestream2_get_le16(&s->gb); top = bytestream2_get_le16(&s->gb); width = bytestream2_get_le16(&s->gb); height = bytestream2_get_le16(&s->gb); flags = bytestream2_get_byte(&s->gb); is_interleaved = flags & 0x40; has_local_palette = flags & 0x80; bits_per_pixel = (flags & 0x07) + 1; av_dlog(s->avctx, "gif: image x=%d y=%d w=%d h=%d\n", left, top, width, height); if (has_local_palette) { bytestream2_get_buffer(&s->gb, s->local_palette, 3 * (1 << bits_per_pixel)); palette = s->local_palette; } else { palette = s->global_palette; bits_per_pixel = s->bits_per_pixel; } /* verify that all the image is inside the screen dimensions */ if (left + width > s->screen_width || top + height > s->screen_height || !width || !height) { av_log(s->avctx, AV_LOG_ERROR, "Invalid image dimensions.\n"); return AVERROR_INVALIDDATA; } /* build the palette */ n = (1 << bits_per_pixel); spal = palette; for(i = 0; i < n; i++) { s->image_palette[i] = (0xffu << 24) | AV_RB24(spal); spal += 3; } for(; i < 256; i++) s->image_palette[i] = (0xffu << 24); /* handle transparency */ if (s->transparent_color_index >= 0) s->image_palette[s->transparent_color_index] = 0; /* now get the image data */ code_size = bytestream2_get_byte(&s->gb); ff_lzw_decode_init(s->lzw, code_size, s->gb.buffer, bytestream2_get_bytes_left(&s->gb), FF_LZW_GIF); /* read all the image */ linesize = frame->linesize[0]; ptr1 = frame->data[0] + top * linesize + left; ptr = ptr1; pass = 0; y1 = 0; for (y = 0; y < height; y++) { ff_lzw_decode(s->lzw, ptr, width); if (is_interleaved) { switch(pass) { default: case 0: case 1: y1 += 8; ptr += linesize * 8; if (y1 >= height) { y1 = pass ? 2 : 4; ptr = ptr1 + linesize * y1; pass++; } break; case 2: y1 += 4; ptr += linesize * 4; if (y1 >= height) { y1 = 1; ptr = ptr1 + linesize; pass++; } break; case 3: y1 += 2; ptr += linesize * 2; break; } } else { ptr += linesize; } } /* read the garbage data until end marker is found */ ff_lzw_decode_tail(s->lzw); bytestream2_skip(&s->gb, ff_lzw_size_read(s->lzw)); return 0; }
static int flic_decode_frame_8BPP(AVCodecContext *avctx, void *data, int *got_frame, const uint8_t *buf, int buf_size) { FlicDecodeContext *s = avctx->priv_data; GetByteContext g2; int stream_ptr_after_color_chunk; int pixel_ptr; int palette_ptr; unsigned char palette_idx1; unsigned char palette_idx2; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j, ret; int color_packets; int color_changes; int color_shift; unsigned char r, g, b; int lines; int compressed_lines; int starting_line; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; unsigned int pixel_limit; bytestream2_init(&g2, buf, buf_size); if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } pixels = s->frame->data[0]; pixel_limit = s->avctx->height * s->frame->linesize[0]; frame_size = bytestream2_get_le32(&g2); bytestream2_skip(&g2, 2); /* skip the magic number */ num_chunks = bytestream2_get_le16(&g2); bytestream2_skip(&g2, 8); /* skip padding */ frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0)) { chunk_size = bytestream2_get_le32(&g2); chunk_type = bytestream2_get_le16(&g2); switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: stream_ptr_after_color_chunk = bytestream2_tell(&g2) + chunk_size - 6; /* check special case: If this file is from the Magic Carpet * game and uses 6-bit colors even though it reports 256-color * chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during * initialization) */ if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE)) color_shift = 0; else color_shift = 2; /* set up the palette */ color_packets = bytestream2_get_le16(&g2); palette_ptr = 0; for (i = 0; i < color_packets; i++) { /* first byte is how many colors to skip */ palette_ptr += bytestream2_get_byte(&g2); /* next byte indicates how many entries to change */ color_changes = bytestream2_get_byte(&g2); /* if there are 0 color changes, there are actually 256 */ if (color_changes == 0) color_changes = 256; for (j = 0; j < color_changes; j++) { unsigned int entry; /* wrap around, for good measure */ if ((unsigned)palette_ptr >= 256) palette_ptr = 0; r = bytestream2_get_byte(&g2) << color_shift; g = bytestream2_get_byte(&g2) << color_shift; b = bytestream2_get_byte(&g2) << color_shift; entry = (r << 16) | (g << 8) | b; if (s->palette[palette_ptr] != entry) s->new_palette = 1; s->palette[palette_ptr++] = entry; } } /* color chunks sometimes have weird 16-bit alignment issues; * therefore, take the hardline approach and skip * to the value calculated w.r.t. the size specified by the color * chunk header */ if (stream_ptr_after_color_chunk - bytestream2_tell(&g2) > 0) bytestream2_skip(&g2, stream_ptr_after_color_chunk - bytestream2_tell(&g2)); break; case FLI_DELTA: y_ptr = 0; compressed_lines = bytestream2_get_le16(&g2); while (compressed_lines > 0) { line_packets = bytestream2_get_le16(&g2); if ((line_packets & 0xC000) == 0xC000) { // line skip opcode line_packets = -line_packets; y_ptr += line_packets * s->frame->linesize[0]; } else if ((line_packets & 0xC000) == 0x4000) { av_log(avctx, AV_LOG_ERROR, "Undefined opcode (%x) in DELTA_FLI\n", line_packets); } else if ((line_packets & 0xC000) == 0x8000) { // "last byte" opcode pixel_ptr= y_ptr + s->frame->linesize[0] - 1; CHECK_PIXEL_PTR(0); pixels[pixel_ptr] = line_packets & 0xff; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = bytestream2_get_byte(&g2); pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = bytestream2_get_byte(&g2); palette_idx2 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run * 2); for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { pixels[pixel_ptr++] = palette_idx1; pixels[pixel_ptr++] = palette_idx2; } } else { CHECK_PIXEL_PTR(byte_run * 2); for (j = 0; j < byte_run * 2; j++, pixel_countdown--) { pixels[pixel_ptr++] = bytestream2_get_byte(&g2); } } } y_ptr += s->frame->linesize[0]; } } break; case FLI_LC: /* line compressed */ starting_line = bytestream2_get_le16(&g2); y_ptr = 0; y_ptr += starting_line * s->frame->linesize[0]; compressed_lines = bytestream2_get_le16(&g2); while (compressed_lines > 0) { pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; line_packets = bytestream2_get_byte(&g2); if (line_packets > 0) { for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = bytestream2_get_byte(&g2); pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = sign_extend(bytestream2_get_byte(&g2),8); if (byte_run > 0) { CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { pixels[pixel_ptr++] = bytestream2_get_byte(&g2); } } else if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { pixels[pixel_ptr++] = palette_idx1; } } } } y_ptr += s->frame->linesize[0]; compressed_lines--; } break; case FLI_BLACK: /* set the whole frame to color 0 (which is usually black) */ memset(pixels, 0, s->frame->linesize[0] * s->avctx->height); break; case FLI_BRUN: /* Byte run compression: This chunk type only occurs in the first * FLI frame and it will update the entire frame. */ y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (!byte_run) { av_log(avctx, AV_LOG_ERROR, "Invalid byte run value.\n"); return AVERROR_INVALIDDATA; } if (byte_run > 0) { palette_idx1 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = bytestream2_get_byte(&g2); pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } } y_ptr += s->frame->linesize[0]; } break; case FLI_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > s->avctx->width * s->avctx->height) { av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \ "bigger than image, skipping chunk\n", chunk_size - 6); bytestream2_skip(&g2, chunk_size - 6); } else { for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height; y_ptr += s->frame->linesize[0]) { bytestream2_get_buffer(&g2, &pixels[y_ptr], s->avctx->width); } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ bytestream2_skip(&g2, chunk_size - 6); break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1)) av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ "and final chunk ptr = %d\n", buf_size, buf_size - bytestream2_get_bytes_left(&g2)); /* make the palette available on the way out */ memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE); if (s->new_palette) { s->frame->palette_has_changed = 1; s->new_palette = 0; } if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; return buf_size; }
static int flic_decode_frame_15_16BPP(AVCodecContext *avctx, void *data, int *got_frame, const uint8_t *buf, int buf_size) { /* Note, the only difference between the 15Bpp and 16Bpp */ /* Format is the pixel format, the packets are processed the same. */ FlicDecodeContext *s = avctx->priv_data; GetByteContext g2; int pixel_ptr; unsigned char palette_idx1; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j, ret; int lines; int compressed_lines; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; int pixel; unsigned int pixel_limit; bytestream2_init(&g2, buf, buf_size); if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return ret; } pixels = s->frame->data[0]; pixel_limit = s->avctx->height * s->frame->linesize[0]; frame_size = bytestream2_get_le32(&g2); bytestream2_skip(&g2, 2); /* skip the magic number */ num_chunks = bytestream2_get_le16(&g2); bytestream2_skip(&g2, 8); /* skip padding */ frame_size -= 16; /* iterate through the chunks */ while ((frame_size > 0) && (num_chunks > 0)) { chunk_size = bytestream2_get_le32(&g2); chunk_type = bytestream2_get_le16(&g2); switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: /* For some reason, it seems that non-palettized flics do * include one of these chunks in their first frame. * Why I do not know, it seems rather extraneous. */ ff_dlog(avctx, "Unexpected Palette chunk %d in non-palettized FLC\n", chunk_type); bytestream2_skip(&g2, chunk_size - 6); break; case FLI_DELTA: case FLI_DTA_LC: y_ptr = 0; compressed_lines = bytestream2_get_le16(&g2); while (compressed_lines > 0) { line_packets = bytestream2_get_le16(&g2); if (line_packets < 0) { line_packets = -line_packets; y_ptr += line_packets * s->frame->linesize[0]; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ pixel_skip = bytestream2_get_byte(&g2); pixel_ptr += (pixel_skip*2); /* Pixel is 2 bytes wide */ pixel_countdown -= pixel_skip; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run < 0) { byte_run = -byte_run; pixel = bytestream2_get_le16(&g2); CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { *((signed short*)(&pixels[pixel_ptr])) = pixel; pixel_ptr += 2; } } else { CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2); pixel_ptr += 2; } } } y_ptr += s->frame->linesize[0]; } } break; case FLI_LC: av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-palettized FLC\n"); bytestream2_skip(&g2, chunk_size - 6); break; case FLI_BLACK: /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */ memset(pixels, 0x0000, s->frame->linesize[0] * s->avctx->height); break; case FLI_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = (s->avctx->width * 2); while (pixel_countdown > 0) { byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run > 0) { palette_idx1 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { palette_idx1 = bytestream2_get_byte(&g2); pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } } /* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed. * This does not give us any good opportunity to perform word endian conversion * during decompression. So if it is required (i.e., this is not a LE target, we do * a second pass over the line here, swapping the bytes. */ #if HAVE_BIGENDIAN pixel_ptr = y_ptr; pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[pixel_ptr]); pixel_ptr += 2; } #endif y_ptr += s->frame->linesize[0]; } break; case FLI_DTA_BRUN: y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */ while (pixel_countdown > 0) { byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run > 0) { pixel = bytestream2_get_le16(&g2); CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++) { *((signed short*)(&pixels[pixel_ptr])) = pixel; pixel_ptr += 2; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n", pixel_countdown); } } else { /* copy pixels if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(2 * byte_run); for (j = 0; j < byte_run; j++) { *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2); pixel_ptr += 2; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n", pixel_countdown); } } } y_ptr += s->frame->linesize[0]; } break; case FLI_COPY: case FLI_DTA_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 > (unsigned int)(s->avctx->width * s->avctx->height)*2) { av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \ "bigger than image, skipping chunk\n", chunk_size - 6); bytestream2_skip(&g2, chunk_size - 6); } else { for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height; y_ptr += s->frame->linesize[0]) { pixel_countdown = s->avctx->width; pixel_ptr = 0; while (pixel_countdown > 0) { *((signed short*)(&pixels[y_ptr + pixel_ptr])) = bytestream2_get_le16(&g2); pixel_ptr += 2; pixel_countdown--; } } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ bytestream2_skip(&g2, chunk_size - 6); break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type); break; } frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1, possibly); if it doesn't, issue a warning */ if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1)) av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2)); if ((ret = av_frame_ref(data, s->frame)) < 0) return ret; *got_frame = 1; return buf_size; }
static int flic_decode_frame_8BPP(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { FlicDecodeContext *s = avctx->priv_data; GetByteContext g2; int pixel_ptr; int palette_ptr; unsigned char palette_idx1; unsigned char palette_idx2; unsigned int frame_size; int num_chunks; unsigned int chunk_size; int chunk_type; int i, j; int color_packets; int color_changes; int color_shift; unsigned char r, g, b; int lines; int compressed_lines; int starting_line; signed short line_packets; int y_ptr; int byte_run; int pixel_skip; int pixel_countdown; unsigned char *pixels; unsigned int pixel_limit; bytestream2_init(&g2, buf, buf_size); s->frame.reference = 3; s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE; if (avctx->reget_buffer(avctx, &s->frame) < 0) { av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); return -1; } pixels = s->frame.data[0]; pixel_limit = s->avctx->height * s->frame.linesize[0]; if (buf_size < 16 || buf_size > INT_MAX - (3 * 256 + FF_INPUT_BUFFER_PADDING_SIZE)) return AVERROR_INVALIDDATA; frame_size = bytestream2_get_le32(&g2); if (frame_size > buf_size) frame_size = buf_size; bytestream2_skip(&g2, 2); /* skip the magic number */ num_chunks = bytestream2_get_le16(&g2); bytestream2_skip(&g2, 8); /* skip padding */ frame_size -= 16; /* iterate through the chunks */ while ((frame_size >= 6) && (num_chunks > 0)) { int stream_ptr_after_chunk; chunk_size = bytestream2_get_le32(&g2); if (chunk_size > frame_size) { av_log(avctx, AV_LOG_WARNING, "Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size); chunk_size = frame_size; } stream_ptr_after_chunk = bytestream2_tell(&g2) - 4 + chunk_size; chunk_type = bytestream2_get_le16(&g2); switch (chunk_type) { case FLI_256_COLOR: case FLI_COLOR: /* check special case: If this file is from the Magic Carpet * game and uses 6-bit colors even though it reports 256-color * chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during * initialization) */ if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE)) color_shift = 0; else color_shift = 2; /* set up the palette */ color_packets = bytestream2_get_le16(&g2); palette_ptr = 0; for (i = 0; i < color_packets; i++) { /* first byte is how many colors to skip */ palette_ptr += bytestream2_get_byte(&g2); /* next byte indicates how many entries to change */ color_changes = bytestream2_get_byte(&g2); /* if there are 0 color changes, there are actually 256 */ if (color_changes == 0) color_changes = 256; if (bytestream2_tell(&g2) + color_changes * 3 > stream_ptr_after_chunk) break; for (j = 0; j < color_changes; j++) { unsigned int entry; /* wrap around, for good measure */ if ((unsigned)palette_ptr >= 256) palette_ptr = 0; r = bytestream2_get_byte(&g2) << color_shift; g = bytestream2_get_byte(&g2) << color_shift; b = bytestream2_get_byte(&g2) << color_shift; entry = 0xFF << 24 | r << 16 | g << 8 | b; if (color_shift == 2) entry |= entry >> 6 & 0x30303; if (s->palette[palette_ptr] != entry) s->new_palette = 1; s->palette[palette_ptr++] = entry; } } break; case FLI_DELTA: y_ptr = 0; compressed_lines = bytestream2_get_le16(&g2); while (compressed_lines > 0) { if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk) break; line_packets = bytestream2_get_le16(&g2); if ((line_packets & 0xC000) == 0xC000) { // line skip opcode line_packets = -line_packets; y_ptr += line_packets * s->frame.linesize[0]; } else if ((line_packets & 0xC000) == 0x4000) { av_log(avctx, AV_LOG_ERROR, "Undefined opcode (%x) in DELTA_FLI\n", line_packets); } else if ((line_packets & 0xC000) == 0x8000) { // "last byte" opcode pixel_ptr= y_ptr + s->frame.linesize[0] - 1; CHECK_PIXEL_PTR(0); pixels[pixel_ptr] = line_packets & 0xff; } else { compressed_lines--; pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; for (i = 0; i < line_packets; i++) { if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk) break; /* account for the skip bytes */ pixel_skip = bytestream2_get_byte(&g2); pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = bytestream2_get_byte(&g2); palette_idx2 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run * 2); for (j = 0; j < byte_run; j++, pixel_countdown -= 2) { pixels[pixel_ptr++] = palette_idx1; pixels[pixel_ptr++] = palette_idx2; } } else { CHECK_PIXEL_PTR(byte_run * 2); if (bytestream2_tell(&g2) + byte_run * 2 > stream_ptr_after_chunk) break; for (j = 0; j < byte_run * 2; j++, pixel_countdown--) { pixels[pixel_ptr++] = bytestream2_get_byte(&g2); } } } y_ptr += s->frame.linesize[0]; } } break; case FLI_LC: /* line compressed */ starting_line = bytestream2_get_le16(&g2); y_ptr = 0; y_ptr += starting_line * s->frame.linesize[0]; compressed_lines = bytestream2_get_le16(&g2); while (compressed_lines > 0) { pixel_ptr = y_ptr; CHECK_PIXEL_PTR(0); pixel_countdown = s->avctx->width; if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk) break; line_packets = bytestream2_get_byte(&g2); if (line_packets > 0) { for (i = 0; i < line_packets; i++) { /* account for the skip bytes */ if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk) break; pixel_skip = bytestream2_get_byte(&g2); pixel_ptr += pixel_skip; pixel_countdown -= pixel_skip; byte_run = sign_extend(bytestream2_get_byte(&g2),8); if (byte_run > 0) { CHECK_PIXEL_PTR(byte_run); if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk) break; for (j = 0; j < byte_run; j++, pixel_countdown--) { pixels[pixel_ptr++] = bytestream2_get_byte(&g2); } } else if (byte_run < 0) { byte_run = -byte_run; palette_idx1 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++, pixel_countdown--) { pixels[pixel_ptr++] = palette_idx1; } } } } y_ptr += s->frame.linesize[0]; compressed_lines--; } break; case FLI_BLACK: /* set the whole frame to color 0 (which is usually black) */ memset(pixels, 0, s->frame.linesize[0] * s->avctx->height); break; case FLI_BRUN: /* Byte run compression: This chunk type only occurs in the first * FLI frame and it will update the entire frame. */ y_ptr = 0; for (lines = 0; lines < s->avctx->height; lines++) { pixel_ptr = y_ptr; /* disregard the line packets; instead, iterate through all * pixels on a row */ bytestream2_skip(&g2, 1); pixel_countdown = s->avctx->width; while (pixel_countdown > 0) { if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk) break; byte_run = sign_extend(bytestream2_get_byte(&g2), 8); if (byte_run > 0) { palette_idx1 = bytestream2_get_byte(&g2); CHECK_PIXEL_PTR(byte_run); for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = palette_idx1; pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } else { /* copy bytes if byte_run < 0 */ byte_run = -byte_run; CHECK_PIXEL_PTR(byte_run); if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk) break; for (j = 0; j < byte_run; j++) { pixels[pixel_ptr++] = bytestream2_get_byte(&g2); pixel_countdown--; if (pixel_countdown < 0) av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n", pixel_countdown, lines); } } } y_ptr += s->frame.linesize[0]; } break; case FLI_COPY: /* copy the chunk (uncompressed frame) */ if (chunk_size - 6 != s->avctx->width * s->avctx->height) { av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \ "has incorrect size, skipping chunk\n", chunk_size - 6); bytestream2_skip(&g2, chunk_size - 6); } else { for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height; y_ptr += s->frame.linesize[0]) { bytestream2_get_buffer(&g2, &pixels[y_ptr], s->avctx->width); } } break; case FLI_MINI: /* some sort of a thumbnail? disregard this chunk... */ break; default: av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type); break; } if (stream_ptr_after_chunk - bytestream2_tell(&g2) > 0) bytestream2_skip(&g2, stream_ptr_after_chunk - bytestream2_tell(&g2)); frame_size -= chunk_size; num_chunks--; } /* by the end of the chunk, the stream ptr should equal the frame * size (minus 1 or 2, possibly); if it doesn't, issue a warning */ if (bytestream2_get_bytes_left(&g2) > 2) av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \ "and final chunk ptr = %d\n", buf_size, buf_size - bytestream2_get_bytes_left(&g2)); /* make the palette available on the way out */ memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE); if (s->new_palette) { s->frame.palette_has_changed = 1; s->new_palette = 0; } *data_size=sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }