/** * Expand an RLE row into a channel. * @param in_buf input buffer * @param in_end end of input buffer * @param out_buf Points to one line after the output buffer. * @param out_end end of line in output buffer * @param pixelstride pixel stride of input buffer * @return size of output in bytes, -1 if buffer overflows */ static int expand_rle_row(const uint8_t *in_buf, const uint8_t* in_end, unsigned char *out_buf, uint8_t* out_end, int pixelstride) { unsigned char pixel, count; unsigned char *orig = out_buf; while (1) { if(in_buf + 1 > in_end) return -1; pixel = bytestream_get_byte(&in_buf); if (!(count = (pixel & 0x7f))) { return (out_buf - orig) / pixelstride; } /* Check for buffer overflow. */ if(out_buf + pixelstride * count >= out_end) return -1; if (pixel & 0x80) { while (count--) { *out_buf = bytestream_get_byte(&in_buf); out_buf += pixelstride; } } else { pixel = bytestream_get_byte(&in_buf); while (count--) { *out_buf = pixel; out_buf += pixelstride; } } } }
/** * Parse the palette segment packet. * * The palette segment contains details of the palette, * a maximum of 256 colors can be defined. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process */ static void parse_palette_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; const uint8_t *buf_end = buf + buf_size; const uint8_t *cm = ff_cropTbl + MAX_NEG_CROP; int color_id; int y, cb, cr, alpha; int r, g, b, r_add, g_add, b_add; /* Skip two null bytes */ buf += 2; while (buf < buf_end) { color_id = bytestream_get_byte(&buf); y = bytestream_get_byte(&buf); cr = bytestream_get_byte(&buf); cb = bytestream_get_byte(&buf); alpha = bytestream_get_byte(&buf); YUV_TO_RGB1(cb, cr); YUV_TO_RGB2(r, g, b, y); av_dlog(avctx, "Color %d := (%d,%d,%d,%d)\n", color_id, r, g, b, alpha); /* Store color in palette */ ctx->clut[color_id] = RGBA(r,g,b,alpha); } }
/** * Parse the presentation segment packet. * * The presentation segment contains details on the video * width, video height, x & y subtitle position. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Implement cropping * @todo TODO: Implement forcing of subtitles */ static void parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; int x, y; int w = bytestream_get_be16(&buf); int h = bytestream_get_be16(&buf); av_dlog(avctx, "Video Dimensions %dx%d\n", w, h); if (av_image_check_size(w, h, 0, avctx) >= 0) avcodec_set_dimensions(avctx, w, h); /* Skip 1 bytes of unknown, frame rate? */ buf++; ctx->presentation.id_number = bytestream_get_be16(&buf); /* * Skip 3 bytes of unknown: * state * palette_update_flag (0x80), * palette_id_to_use, */ buf += 3; ctx->presentation.object_number = bytestream_get_byte(&buf); ctx->presentation.composition_flag = 0; if (!ctx->presentation.object_number) return; /* * Skip 3 bytes of unknown: * object_id_ref (2 bytes), * window_id_ref, */ buf += 3; ctx->presentation.composition_flag = bytestream_get_byte(&buf); x = bytestream_get_be16(&buf); y = bytestream_get_be16(&buf); /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/ av_dlog(avctx, "Subtitle Placement x=%d, y=%d\n", x, y); if (x > avctx->width || y > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n", x, y, avctx->width, avctx->height); x = 0; y = 0; } /* Fill in dimensions */ ctx->presentation.x = x; ctx->presentation.y = y; }
static int gif_read_header1(GifState *s) { uint8_t sig[6]; int v, n; int background_color_index; if (s->bytestream_end < s->bytestream + 13) return AVERROR_INVALIDDATA; /* read gif signature */ bytestream_get_buffer(&s->bytestream, sig, 6); if (memcmp(sig, gif87a_sig, 6) != 0 && memcmp(sig, gif89a_sig, 6) != 0) return AVERROR_INVALIDDATA; /* read screen header */ s->transparent_color_index = -1; s->screen_width = bytestream_get_le16(&s->bytestream); s->screen_height = bytestream_get_le16(&s->bytestream); if( (unsigned)s->screen_width > 32767 || (unsigned)s->screen_height > 32767){ av_log(s->avctx, AV_LOG_ERROR, "picture size too large\n"); return AVERROR_INVALIDDATA; } av_fast_malloc(&s->idx_line, &s->idx_line_size, s->screen_width); if (!s->idx_line) return AVERROR(ENOMEM); v = bytestream_get_byte(&s->bytestream); s->color_resolution = ((v & 0x70) >> 4) + 1; s->has_global_palette = (v & 0x80); s->bits_per_pixel = (v & 0x07) + 1; background_color_index = bytestream_get_byte(&s->bytestream); n = bytestream_get_byte(&s->bytestream); if (n) { s->avctx->sample_aspect_ratio.num = n + 15; s->avctx->sample_aspect_ratio.den = 64; } av_dlog(s->avctx, "screen_w=%d screen_h=%d bpp=%d global_palette=%d\n", s->screen_width, s->screen_height, s->bits_per_pixel, s->has_global_palette); if (s->has_global_palette) { s->background_color_index = background_color_index; n = 1 << s->bits_per_pixel; if (s->bytestream_end < s->bytestream + n * 3) return AVERROR_INVALIDDATA; gif_read_palette(&s->bytestream, s->global_palette, n); s->bg_color = s->global_palette[s->background_color_index]; } else s->background_color_index = -1; return 0; }
/** * Decode the RLE data. * * The subtitle is stored as an Run Length Encoded image. * * @param avctx contains the current codec context * @param sub pointer to the processed subtitle data * @param buf pointer to the RLE data to process * @param buf_size size of the RLE data to process */ static int decode_rle(AVCodecContext *avctx, AVSubtitle *sub, const uint8_t *buf, unsigned int buf_size) { const uint8_t *rle_bitmap_end; int pixel_count, line_count; rle_bitmap_end = buf + buf_size; sub->rects[0]->pict.data[0] = av_malloc(sub->rects[0]->w * sub->rects[0]->h); if (!sub->rects[0]->pict.data[0]) return -1; pixel_count = 0; line_count = 0; while (buf < rle_bitmap_end && line_count < sub->rects[0]->h) { uint8_t flags, color; int run; color = bytestream_get_byte(&buf); run = 1; if (color == 0x00) { flags = bytestream_get_byte(&buf); run = flags & 0x3f; if (flags & 0x40) run = (run << 8) + bytestream_get_byte(&buf); color = flags & 0x80 ? bytestream_get_byte(&buf) : 0; } if (run > 0 && pixel_count + run <= sub->rects[0]->w * sub->rects[0]->h) { memset(sub->rects[0]->pict.data[0] + pixel_count, color, run); pixel_count += run; } else if (!run) { /* * New Line. Check if correct pixels decoded, if not display warning * and adjust bitmap pointer to correct new line position. */ if (pixel_count % sub->rects[0]->w > 0) av_log(avctx, AV_LOG_ERROR, "Decoded %d pixels, when line should be %d pixels\n", pixel_count % sub->rects[0]->w, sub->rects[0]->w); line_count++; } } if (pixel_count < sub->rects[0]->w * sub->rects[0]->h) { av_log(avctx, AV_LOG_ERROR, "Insufficient RLE data for subtitle\n"); return -1; } av_dlog(avctx, "Pixel Count = %d, Area = %d\n", pixel_count, sub->rects[0]->w * sub->rects[0]->h); return 0; }
static void tgq_decode_mb(TgqContext *s, int mb_y, int mb_x, const uint8_t **bs, const uint8_t *buf_end){ int mode; int i; int8_t dc[6]; mode = bytestream_get_byte(bs); if (mode>buf_end-*bs) { av_log(s->avctx, AV_LOG_ERROR, "truncated macroblock\n"); return; } if (mode>12) { GetBitContext gb; init_get_bits(&gb, *bs, mode*8); for(i=0; i<6; i++) tgq_decode_block(s, s->block[i], &gb); tgq_idct_put_mb(s, s->block, mb_x, mb_y); }else{ if (mode==3) { memset(dc, (*bs)[0], 4); dc[4] = (*bs)[1]; dc[5] = (*bs)[2]; }else if (mode==6) { memcpy(dc, *bs, 6); }else if (mode==12) { for(i=0; i<6; i++) dc[i] = (*bs)[i*2]; }else{ av_log(s->avctx, AV_LOG_ERROR, "unsupported mb mode %i\n", mode); } tgq_idct_put_mb_dconly(s, mb_x, mb_y, dc); } *bs += mode; }
static int gif_parse_next_image(GifState *s) { int ret, code; for (;;) { code = bytestream_get_byte(&s->bytestream); #ifdef DEBUG dprintf("gif: code=%02x '%c'\n", code, code); #endif switch (code) { case ',': if (gif_read_image(s) < 0) return -1; ret = 0; goto the_end; case ';': /* end of image */ ret = -1; goto the_end; case '!': if (gif_read_extension(s) < 0) return -1; break; case EOF: default: /* error or errneous EOF */ ret = -1; goto the_end; } } the_end: return ret; }
static int gif_parse_next_image(GifState *s, int *got_picture) { int ret; *got_picture = sizeof(AVPicture); while (s->bytestream < s->bytestream_end) { int code = bytestream_get_byte(&s->bytestream); av_dlog(s->avctx, "code=%02x '%c'\n", code, code); switch (code) { case GIF_IMAGE_SEPARATOR: return gif_read_image(s); case GIF_EXTENSION_INTRODUCER: if ((ret = gif_read_extension(s)) < 0) return ret; break; case GIF_TRAILER: /* end of image */ *got_picture = 0; return 0; default: /* erroneous block label */ return AVERROR_INVALIDDATA; } } return AVERROR_EOF; }
/** * Parse the presentation segment packet. * * The presentation segment contains details on the video * width, video height, x & y subtitle position. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Implement cropping * @todo TODO: Implement forcing of subtitles * @todo TODO: Blanking of subtitle */ static void parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; int x, y; uint8_t block; int w = bytestream_get_be16(&buf); int h = bytestream_get_be16(&buf); dprintf(avctx, "Video Dimensions %dx%d\n", w, h); if (av_image_check_size(w, h, 0, avctx) >= 0) avcodec_set_dimensions(avctx, w, h); /* Skip 1 bytes of unknown, frame rate? */ buf++; ctx->presentation.id_number = bytestream_get_be16(&buf); /* Next byte is the state. */ block = bytestream_get_byte(&buf);; if (block == 0x80) { /* * Skip 7 bytes of unknown: * palette_update_flag (0x80), * palette_id_to_use, * Object Number (if > 0 determines if more data to process), * object_id_ref (2 bytes), * window_id_ref, * composition_flag (0x80 - object cropped, 0x40 - object forced) */ buf += 7; x = bytestream_get_be16(&buf); y = bytestream_get_be16(&buf); /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/ dprintf(avctx, "Subtitle Placement x=%d, y=%d\n", x, y); if (x > avctx->width || y > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n", x, y, avctx->width, avctx->height); x = 0; y = 0; } /* Fill in dimensions */ ctx->presentation.x = x; ctx->presentation.y = y; } else if (block == 0x00) { /* TODO: Blank context as subtitle should not be displayed. * If the subtitle is blanked now the subtitle is not * on screen long enough to read, due to a delay in * initial display timing. */ } }
static int gif_read_header1(GifState *s) { uint8_t sig[6]; int v, n; int has_global_palette; if (s->bytestream_end < s->bytestream + 13) return -1; /* read gif signature */ bytestream_get_buffer(&s->bytestream, sig, 6); if (memcmp(sig, gif87a_sig, 6) != 0 && memcmp(sig, gif89a_sig, 6) != 0) return -1; /* read screen header */ s->transparent_color_index = -1; s->screen_width = bytestream_get_le16(&s->bytestream); s->screen_height = bytestream_get_le16(&s->bytestream); if( (unsigned)s->screen_width > 32767 || (unsigned)s->screen_height > 32767){ av_log(NULL, AV_LOG_ERROR, "picture size too large\n"); return -1; } v = bytestream_get_byte(&s->bytestream); s->color_resolution = ((v & 0x70) >> 4) + 1; has_global_palette = (v & 0x80); s->bits_per_pixel = (v & 0x07) + 1; s->background_color_index = bytestream_get_byte(&s->bytestream); bytestream_get_byte(&s->bytestream); /* ignored */ #ifdef DEBUG dprintf(s->avctx, "gif: screen_w=%d screen_h=%d bpp=%d global_palette=%d\n", s->screen_width, s->screen_height, s->bits_per_pixel, has_global_palette); #endif if (has_global_palette) { n = 1 << s->bits_per_pixel; if (s->bytestream_end < s->bytestream + n * 3) return -1; bytestream_get_buffer(&s->bytestream, s->global_palette, n * 3); } return 0; }
void ff_flac_parse_block_header(const uint8_t *block_header, int *last, int *type, int *size) { int tmp = bytestream_get_byte(&block_header); if (last) *last = tmp & 0x80; if (type) *type = tmp & 0x7F; if (size) *size = bytestream_get_be24(&block_header); }
/** * Parses the picture segment packet. * * The picture segment contains details on the sequence id, * width, height and Run Length Encoded (RLE) bitmap data. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Enable support for RLE data over multiple packets */ static int parse_picture_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; uint8_t sequence_desc; unsigned int rle_bitmap_len, width, height; /* skip 3 unknown bytes: Object ID (2 bytes), Version Number */ buf += 3; /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */ sequence_desc = bytestream_get_byte(&buf); if (!(sequence_desc & 0x80)) { av_log(avctx, AV_LOG_ERROR, "Decoder does not support object data over multiple packets.\n"); return -1; } /* Decode rle bitmap length */ rle_bitmap_len = bytestream_get_be24(&buf); /* Check to ensure we have enough data for rle_bitmap_length if just a single packet */ if (rle_bitmap_len > buf_size - 7) { av_log(avctx, AV_LOG_ERROR, "Not enough RLE data for specified length of %d.\n", rle_bitmap_len); return -1; } ctx->picture.rle_data_len = rle_bitmap_len; /* Get bitmap dimensions from data */ width = bytestream_get_be16(&buf); height = bytestream_get_be16(&buf); /* Make sure the bitmap is not too large */ if (ctx->presentation.video_w < width || ctx->presentation.video_h < height) { av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger then video.\n"); return -1; } ctx->picture.w = width; ctx->picture.h = height; av_fast_malloc(&ctx->picture.rle, &ctx->picture.rle_buffer_size, rle_bitmap_len); if (!ctx->picture.rle) return -1; memcpy(ctx->picture.rle, buf, rle_bitmap_len); return 0; }
static int gif_read_extension(GifState *s) { int ext_code, ext_len, i, gce_flags, gce_transparent_index; /* extension */ ext_code = bytestream_get_byte(&s->bytestream); ext_len = bytestream_get_byte(&s->bytestream); #ifdef DEBUG dprintf(s->avctx, "gif: ext_code=0x%x len=%d\n", ext_code, ext_len); #endif switch(ext_code) { case 0xf9: if (ext_len != 4) goto discard_ext; s->transparent_color_index = -1; gce_flags = bytestream_get_byte(&s->bytestream); s->gce_delay = bytestream_get_le16(&s->bytestream); gce_transparent_index = bytestream_get_byte(&s->bytestream); if (gce_flags & 0x01) s->transparent_color_index = gce_transparent_index; else s->transparent_color_index = -1; s->gce_disposal = (gce_flags >> 2) & 0x7; #ifdef DEBUG dprintf(s->avctx, "gif: gce_flags=%x delay=%d tcolor=%d disposal=%d\n", gce_flags, s->gce_delay, s->transparent_color_index, s->gce_disposal); #endif ext_len = bytestream_get_byte(&s->bytestream); break; } /* NOTE: many extension blocks can come after */ discard_ext: while (ext_len != 0) { for (i = 0; i < ext_len; i++) bytestream_get_byte(&s->bytestream); ext_len = bytestream_get_byte(&s->bytestream); #ifdef DEBUG dprintf(s->avctx, "gif: ext_len1=%d\n", ext_len); #endif } return 0; }
static int mimic_decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { MimicContext *ctx = avctx->priv_data; int is_pframe; int width, height; int quality, num_coeffs; int swap_buf_size = buf_size - MIMIC_HEADER_SIZE; if(buf_size < MIMIC_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "insufficient data\n"); return -1; } buf += 2; /* some constant (always 256) */ quality = bytestream_get_le16(&buf); width = bytestream_get_le16(&buf); height = bytestream_get_le16(&buf); buf += 4; /* some constant */ is_pframe = bytestream_get_le32(&buf); num_coeffs = bytestream_get_byte(&buf); buf += 3; /* some constant */ if(!ctx->avctx) { int i; if(!(width == 160 && height == 120) && !(width == 320 && height == 240)) { av_log(avctx, AV_LOG_ERROR, "invalid width/height!\n"); return -1; } ctx->avctx = avctx; avctx->width = width; avctx->height = height; avctx->pix_fmt = PIX_FMT_YUV420P; for(i = 0; i < 3; i++) { ctx->num_vblocks[i] = -((-height) >> (3 + !!i)); ctx->num_hblocks[i] = width >> (3 + !!i) ; } } else if(width != ctx->avctx->width || height != ctx->avctx->height) {
static int gif_parse_next_image(GifState *s) { while (s->bytestream < s->bytestream_end) { int code = bytestream_get_byte(&s->bytestream); #ifdef DEBUG dprintf(s->avctx, "gif: code=%02x '%c'\n", code, code); #endif switch (code) { case ',': return gif_read_image(s); case '!': if (gif_read_extension(s) < 0) return -1; break; case ';': /* end of image */ default: /* error or erroneous EOF */ return -1; } } return -1; }
static int gif_parse_next_image(GifState *s, AVFrame *frame) { while (s->bytestream < s->bytestream_end) { int code = bytestream_get_byte(&s->bytestream); int ret; av_dlog(s->avctx, "gif: code=%02x '%c'\n", code, code); switch (code) { case ',': return gif_read_image(s, frame); case '!': if ((ret = gif_read_extension(s)) < 0) return ret; break; case ';': /* end of image */ default: /* error or erroneous EOF */ return AVERROR_INVALIDDATA; } } return AVERROR_INVALIDDATA; }
static int gif_read_extension(GifState *s) { int ext_code, ext_len, i, gce_flags, gce_transparent_index; /* There must be at least 2 bytes: * 1 for extension label and 1 for extension length. */ if (s->bytestream_end < s->bytestream + 2) return AVERROR_INVALIDDATA; ext_code = bytestream_get_byte(&s->bytestream); ext_len = bytestream_get_byte(&s->bytestream); av_dlog(s->avctx, "ext_code=0x%x len=%d\n", ext_code, ext_len); switch(ext_code) { case GIF_GCE_EXT_LABEL: if (ext_len != 4) goto discard_ext; /* We need at least 5 bytes more: 4 is for extension body * and 1 for next block size. */ if (s->bytestream_end < s->bytestream + 5) return AVERROR_INVALIDDATA; s->transparent_color_index = -1; gce_flags = bytestream_get_byte(&s->bytestream); bytestream_get_le16(&s->bytestream); // delay during which the frame is shown gce_transparent_index = bytestream_get_byte(&s->bytestream); if (gce_flags & 0x01) s->transparent_color_index = gce_transparent_index; else s->transparent_color_index = -1; s->gce_disposal = (gce_flags >> 2) & 0x7; av_dlog(s->avctx, "gce_flags=%x tcolor=%d disposal=%d\n", gce_flags, s->transparent_color_index, s->gce_disposal); if (s->gce_disposal > 3) { s->gce_disposal = GCE_DISPOSAL_NONE; av_dlog(s->avctx, "invalid value in gce_disposal (%d). Using default value of 0.\n", ext_len); } ext_len = bytestream_get_byte(&s->bytestream); break; } /* NOTE: many extension blocks can come after */ discard_ext: while (ext_len != 0) { /* There must be at least ext_len bytes and 1 for next block size byte. */ if (s->bytestream_end < s->bytestream + ext_len + 1) return AVERROR_INVALIDDATA; for (i = 0; i < ext_len; i++) bytestream_get_byte(&s->bytestream); ext_len = bytestream_get_byte(&s->bytestream); av_dlog(s->avctx, "ext_len1=%d\n", ext_len); } return 0; }
static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end; uint8_t segment_type; int segment_length; #ifdef DEBUG_PACKET_CONTENTS int i; av_log(avctx, AV_LOG_INFO, "PGS sub packet:\n"); for (i = 0; i < buf_size; i++) { av_log(avctx, AV_LOG_INFO, "%02x ", buf[i]); if (i % 16 == 15) av_log(avctx, AV_LOG_INFO, "\n"); } if (i & 15) av_log(avctx, AV_LOG_INFO, "\n"); #endif *data_size = 0; /* Ensure that we have received at a least a segment code and segment length */ if (buf_size < 3) return -1; buf_end = buf + buf_size; /* Step through buffer to identify segments */ while (buf < buf_end) { segment_type = bytestream_get_byte(&buf); segment_length = bytestream_get_be16(&buf); av_dlog(avctx, "Segment Length %d, Segment Type %x\n", segment_length, segment_type); if (segment_type != DISPLAY_SEGMENT && segment_length > buf_end - buf) break; switch (segment_type) { case PALETTE_SEGMENT: parse_palette_segment(avctx, buf, segment_length); break; case PICTURE_SEGMENT: parse_picture_segment(avctx, buf, segment_length); break; case PRESENTATION_SEGMENT: parse_presentation_segment(avctx, buf, segment_length); break; case WINDOW_SEGMENT: /* * Window Segment Structure (No new information provided): * 2 bytes: Unkown, * 2 bytes: X position of subtitle, * 2 bytes: Y position of subtitle, * 2 bytes: Width of subtitle, * 2 bytes: Height of subtitle. */ break; case DISPLAY_SEGMENT: *data_size = display_end_segment(avctx, data, buf, segment_length); break; default: av_log(avctx, AV_LOG_ERROR, "Unknown subtitle segment type 0x%x, length %d\n", segment_type, segment_length); break; } buf += segment_length; } return buf_size; }
/** * Parse the picture segment packet. * * The picture segment contains details on the sequence id, * width, height and Run Length Encoded (RLE) bitmap data. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Enable support for RLE data over multiple packets */ static int parse_picture_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; uint8_t sequence_desc; unsigned int rle_bitmap_len, width, height; if (buf_size <= 4) return -1; buf_size -= 4; /* skip 3 unknown bytes: Object ID (2 bytes), Version Number */ buf += 3; /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */ sequence_desc = bytestream_get_byte(&buf); if (!(sequence_desc & 0x80)) { /* Additional RLE data */ if (buf_size > ctx->picture.rle_remaining_len) return -1; memcpy(ctx->picture.rle + ctx->picture.rle_data_len, buf, buf_size); ctx->picture.rle_data_len += buf_size; ctx->picture.rle_remaining_len -= buf_size; return 0; } if (buf_size <= 7) return -1; buf_size -= 7; /* Decode rle bitmap length, stored size includes width/height data */ rle_bitmap_len = bytestream_get_be24(&buf) - 2*2; /* Get bitmap dimensions from data */ width = bytestream_get_be16(&buf); height = bytestream_get_be16(&buf); /* Make sure the bitmap is not too large */ if (avctx->width < width || avctx->height < height) { av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger then video.\n"); return -1; } ctx->picture.w = width; ctx->picture.h = height; av_fast_malloc(&ctx->picture.rle, &ctx->picture.rle_buffer_size, rle_bitmap_len); if (!ctx->picture.rle) return -1; memcpy(ctx->picture.rle, buf, buf_size); ctx->picture.rle_data_len = buf_size; ctx->picture.rle_remaining_len = rle_bitmap_len - buf_size; return 0; }
static int flac_read_header(AVFormatContext *s) { int ret, metadata_last=0, metadata_type, metadata_size, found_streaminfo=0; uint8_t header[4]; uint8_t *buffer=NULL,*tmp=NULL; AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_FLAC; st->need_parsing = AVSTREAM_PARSE_FULL_RAW; /* the parameters will be extracted from the compressed bitstream */ /* if fLaC marker is not found, assume there is no header */ if (avio_rl32(s->pb) != MKTAG('f','L','a','C')) { avio_seek(s->pb, -4, SEEK_CUR); return 0; } /* process metadata blocks */ while (!url_feof(s->pb) && !metadata_last) { avio_read(s->pb, header, 4); avpriv_flac_parse_block_header(header, &metadata_last, &metadata_type, &metadata_size); switch (metadata_type) { /* allocate and read metadata block for supported types */ case FLAC_METADATA_TYPE_STREAMINFO: case FLAC_METADATA_TYPE_CUESHEET: case FLAC_METADATA_TYPE_PICTURE: case FLAC_METADATA_TYPE_VORBIS_COMMENT: buffer = av_mallocz(metadata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!buffer) { return AVERROR(ENOMEM); } if (avio_read(s->pb, buffer, metadata_size) != metadata_size) { RETURN_ERROR(AVERROR(EIO)); } break; /* skip metadata block for unsupported types */ default: ret = avio_skip(s->pb, metadata_size); if (ret < 0) return ret; } if (metadata_type == FLAC_METADATA_TYPE_STREAMINFO) { FLACStreaminfo si; /* STREAMINFO can only occur once */ if (found_streaminfo) { RETURN_ERROR(AVERROR_INVALIDDATA); } if (metadata_size != FLAC_STREAMINFO_SIZE) { RETURN_ERROR(AVERROR_INVALIDDATA); } found_streaminfo = 1; st->codec->extradata = av_malloc(metadata_size + 8 + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) { RETURN_ERROR(AVERROR(ENOMEM)); } st->codec->extradata_size = metadata_size + 8; AV_WL32(st->codec->extradata, MKTAG('f','L','a','C')); memcpy(st->codec->extradata + 4, header, 4); memcpy(st->codec->extradata + 8, buffer, metadata_size); av_freep(&buffer); /* get codec params from STREAMINFO header */ avpriv_flac_parse_streaminfo(st->codec, &si, st->codec->extradata + 8); /* set time base and duration */ if (si.samplerate > 0) { avpriv_set_pts_info(st, 64, 1, si.samplerate); if (si.samples > 0) st->duration = si.samples; } } else if (metadata_type == FLAC_METADATA_TYPE_CUESHEET) { uint8_t isrc[13]; uint64_t start; const uint8_t *offset; int i, chapters, track, ti; if (metadata_size < 431) RETURN_ERROR(AVERROR_INVALIDDATA); offset = buffer + 395; chapters = bytestream_get_byte(&offset) - 1; if (chapters <= 0) RETURN_ERROR(AVERROR_INVALIDDATA); for (i = 0; i < chapters; i++) { if (offset + 36 - buffer > metadata_size) RETURN_ERROR(AVERROR_INVALIDDATA); start = bytestream_get_be64(&offset); track = bytestream_get_byte(&offset); bytestream_get_buffer(&offset, isrc, 12); isrc[12] = 0; offset += 14; ti = bytestream_get_byte(&offset); if (ti <= 0) RETURN_ERROR(AVERROR_INVALIDDATA); offset += ti * 12; avpriv_new_chapter(s, track, st->time_base, start, AV_NOPTS_VALUE, isrc); } av_freep(&buffer); } else if (metadata_type == FLAC_METADATA_TYPE_PICTURE) { ret = parse_picture(s, buffer, metadata_size); av_freep(&buffer); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Error parsing attached picture.\n"); return ret; } } else { /* STREAMINFO must be the first block */ if (!found_streaminfo) { RETURN_ERROR(AVERROR_INVALIDDATA); } /* process supported blocks other than STREAMINFO */ if (metadata_type == FLAC_METADATA_TYPE_VORBIS_COMMENT) { /* append VorbisComment to extradata */ tmp = av_realloc(st->codec->extradata, st->codec->extradata_size + 4 + metadata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!tmp) { RETURN_ERROR(AVERROR(ENOMEM)); } st->codec->extradata = tmp; tmp += st->codec->extradata_size; memcpy(tmp, header, 4); memcpy(tmp + 4, buffer, metadata_size); st->codec->extradata_size = st->codec->extradata_size + 4 + metadata_size; if (ff_vorbis_comment(s, &s->metadata, buffer, metadata_size)) { av_log(s, AV_LOG_WARNING, "error parsing VorbisComment metadata\n"); } } av_freep(&buffer); } } return 0; fail: av_free(buffer); return ret; }
static int gif_read_image(GifState *s) { int left, top, width, height, bits_per_pixel, code_size, flags; int is_interleaved, has_local_palette, y, pass, y1, linesize, n, i; uint8_t *ptr, *spal, *palette, *ptr1; left = bytestream_get_le16(&s->bytestream); top = bytestream_get_le16(&s->bytestream); width = bytestream_get_le16(&s->bytestream); height = bytestream_get_le16(&s->bytestream); flags = bytestream_get_byte(&s->bytestream); is_interleaved = flags & 0x40; has_local_palette = flags & 0x80; bits_per_pixel = (flags & 0x07) + 1; #ifdef DEBUG dprintf(s->avctx, "gif: image x=%d y=%d w=%d h=%d\n", left, top, width, height); #endif if (has_local_palette) { bytestream_get_buffer(&s->bytestream, s->local_palette, 3 * (1 << bits_per_pixel)); palette = s->local_palette; } else { palette = s->global_palette; bits_per_pixel = s->bits_per_pixel; } /* verify that all the image is inside the screen dimensions */ if (left + width > s->screen_width || top + height > s->screen_height) return AVERROR(EINVAL); /* build the palette */ n = (1 << bits_per_pixel); spal = palette; for(i = 0; i < n; i++) { s->image_palette[i] = (0xff << 24) | AV_RB24(spal); spal += 3; } for(; i < 256; i++) s->image_palette[i] = (0xff << 24); /* handle transparency */ if (s->transparent_color_index >= 0) s->image_palette[s->transparent_color_index] = 0; /* now get the image data */ code_size = bytestream_get_byte(&s->bytestream); ff_lzw_decode_init(s->lzw, code_size, s->bytestream, s->bytestream_end - s->bytestream, FF_LZW_GIF); /* read all the image */ linesize = s->picture.linesize[0]; ptr1 = s->picture.data[0] + top * linesize + left; ptr = ptr1; pass = 0; y1 = 0; for (y = 0; y < height; y++) { ff_lzw_decode(s->lzw, ptr, width); if (is_interleaved) { switch(pass) { default: case 0: case 1: y1 += 8; ptr += linesize * 8; if (y1 >= height) { y1 = pass ? 2 : 4; ptr = ptr1 + linesize * y1; pass++; } break; case 2: y1 += 4; ptr += linesize * 4; if (y1 >= height) { y1 = 1; ptr = ptr1 + linesize; pass++; } break; case 3: y1 += 2; ptr += linesize * 2; break; } } else { ptr += linesize; } } /* read the garbage data until end marker is found */ ff_lzw_decode_tail(s->lzw); s->bytestream = ff_lzw_cur_ptr(s->lzw); return 0; }
/** * Parse the presentation segment packet. * * The presentation segment contains details on the video * width, video height, x & y subtitle position. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Implement cropping */ static void parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; int w = bytestream_get_be16(&buf); int h = bytestream_get_be16(&buf); uint16_t object_index; av_dlog(avctx, "Video Dimensions %dx%d\n", w, h); if (av_image_check_size(w, h, 0, avctx) >= 0) avcodec_set_dimensions(avctx, w, h); /* Skip 1 bytes of unknown, frame rate? */ buf++; ctx->presentation.id_number = bytestream_get_be16(&buf); /* * Skip 3 bytes of unknown: * state * palette_update_flag (0x80), * palette_id_to_use, */ buf += 3; ctx->presentation.object_count = bytestream_get_byte(&buf); if (!ctx->presentation.object_count) return; /* Verify that enough bytes are remaining for all of the objects. */ buf_size -= 11; if (buf_size < ctx->presentation.object_count * 8) { ctx->presentation.object_count = 0; return; } av_freep(&ctx->presentation.objects); ctx->presentation.objects = av_malloc(sizeof(PGSSubPictureReference) * ctx->presentation.object_count); if (!ctx->presentation.objects) { ctx->presentation.object_count = 0; return; } for (object_index = 0; object_index < ctx->presentation.object_count; ++object_index) { PGSSubPictureReference *reference = &ctx->presentation.objects[object_index]; reference->picture_id = bytestream_get_be16(&buf); /* * Skip 2 bytes of unknown: * window_id_ref, * composition_flag (0x80 - object cropped, 0x40 - object forced) */ buf++; reference->composition = bytestream_get_byte(&buf); reference->x = bytestream_get_be16(&buf); reference->y = bytestream_get_be16(&buf); /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/ av_dlog(avctx, "Subtitle Placement ID=%d, x=%d, y=%d\n", reference->picture_id, reference->x, reference->y); if (reference->x > avctx->width || reference->y > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n", reference->x, reference->y, avctx->width, avctx->height); reference->x = 0; reference->y = 0; } } }
// decodes the frame static int utah_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { UTAHContext *context = avctx->priv_data; const uint8_t *buffer = avpkt->data; uint8_t* pic_buffer; int buffer_size = avpkt->size; int height = 0; int width = 0; int ret; int hsize = 14; int line = 0; int n_bytes = 0; AVFrame *picture = data; AVFrame *pic = &context->picture; avctx->pix_fmt = AV_PIX_FMT_RGB24; // ensure the image has the correct header size if(buffer_size < hsize) { av_log(avctx, AV_LOG_ERROR, "Image is not a .utah image(invalid hsize size)\n"); return AVERROR_INVALIDDATA; } // ensure the image is a utah image if(bytestream_get_byte(&buffer) != 'U' || bytestream_get_byte(&buffer)!='T') { av_log(avctx, AV_LOG_ERROR, "Invalid .utah image\n"); return AVERROR_INVALIDDATA; } height = bytestream_get_le32(&buffer);// Get the height from the packet buffer width = bytestream_get_le32(&buffer);// get the width from the packet buffer avctx->height = height; line = bytestream_get_le32(&buffer); avctx->width =width; // get the number of bytes n_bytes = height*line + hsize; if(n_bytes != buffer_size) { av_log(avctx, AV_LOG_ERROR, "Invalid image size"); return AVERROR_INVALIDDATA; } if (pic->data[0]) { avctx->release_buffer(avctx, pic); } pic->reference = 0; if ((ret = ff_get_buffer(avctx, pic)) < 0) { return ret; } memset(pic->data[0], 0, height*pic->linesize[0]); pic_buffer = pic->data[0]; for(int row = 0; row<fheight;row++) { memcpy(pic_buffer, buffer, line); pic_buffer += pic->linesize[0]; buffer += line; } *picture = context->picture; *got_frame = 1; return buffer_size; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { AnmContext *s = avctx->priv_data; const uint8_t *buf = avpkt->data; const int buf_size = avpkt->size; const uint8_t *buf_end = buf + buf_size; uint8_t *dst, *dst_end; int count; if(avctx->reget_buffer(avctx, &s->frame) < 0){ av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } dst = s->frame.data[0]; dst_end = s->frame.data[0] + s->frame.linesize[0]*avctx->height; if (buf[0] != 0x42) { av_log_ask_for_sample(avctx, "unknown record type\n"); return buf_size; } if (buf[1]) { av_log_ask_for_sample(avctx, "padding bytes not supported\n"); return buf_size; } buf += 4; s->x = 0; do { /* if statements are ordered by probability */ #define OP(buf, pixel, count) \ op(&dst, dst_end, (buf), buf_end, (pixel), (count), &s->x, avctx->width, s->frame.linesize[0]) int type = bytestream_get_byte(&buf); count = type & 0x7F; type >>= 7; if (count) { if (OP(type ? NULL : &buf, -1, count)) break; } else if (!type) { int pixel; count = bytestream_get_byte(&buf); /* count==0 gives nop */ pixel = bytestream_get_byte(&buf); if (OP(NULL, pixel, count)) break; } else { int pixel; type = bytestream_get_le16(&buf); count = type & 0x3FFF; type >>= 14; if (!count) { if (type == 0) break; // stop if (type == 2) { av_log_ask_for_sample(avctx, "unknown opcode"); return AVERROR_INVALIDDATA; } continue; } pixel = type == 3 ? bytestream_get_byte(&buf) : -1; if (type == 1) count += 0x4000; if (OP(type == 2 ? &buf : NULL, pixel, count)) break; } } while (buf + 1 < buf_end); *data_size = sizeof(AVFrame); *(AVFrame*)data = s->frame; return buf_size; }
/* Decoder for mpff format. Copies data from the passed in *AVPacket to the data pointer argument */ static int mpff_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { //Pointer to the av packet's data const uint8_t *buf = avpkt->data; //The size of the buffer is the size of the avpacket int buf_size = avpkt->size; //Sets up a AVFrame pointer to the data pointer AVFrame *p = data; int width, height, linesize, ret; int i, j; //byte toadd; uint8_t *ptr, *ptr0; //Conditional statement checks that the header file indicates //mpff format if (bytestream_get_byte(&buf) != 'M' ||bytestream_get_byte(&buf) != 'P' ||bytestream_get_byte(&buf) != 'F' ||bytestream_get_byte(&buf) != 'F') { av_log(avctx, AV_LOG_ERROR, "bad magic number\n"); return AVERROR_INVALIDDATA; } //Get the width out of the buffer width = bytestream_get_le32(&buf); //Get the height out of the buffer height = bytestream_get_le32(&buf); //set up the av context avctx->width = width; avctx->height = height; //RGB8 is the only pixel fmt supported (see structure) avctx->pix_fmt = AV_PIX_FMT_RGB8; //Set up av context if ((ret = ff_get_buffer(avctx, p, 0)) < 0) return ret; p->pict_type = AV_PICTURE_TYPE_I; p->key_frame = 1; //ptr is the frame's data and the line size is just //the size of the frame ptr = p->data[0]; ptr0 = ptr; linesize = p->linesize[0]; //Loop over each line of the image for (i = 0; i < avctx->height; i++) { for (j = 0; j < avctx->width; j++) { //Copy the bits from ptr to the buffer //advance the buffer and ptr pointers bytestream_put_byte(&ptr, bytestream_get_byte(&buf)); } ptr = ptr0 + linesize; ptr0 = ptr; /* //copy the bits from the buffer to ptr memcpy(ptr, buf, width); //Move the buffer and ptr pointer up buf += linesize; //width; ptr += linesize; */ } *got_frame = 1; return buf_size; }
static int gif_read_image(GifState *s) { int left, top, width, height, bits_per_pixel, code_size, flags; int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size; uint32_t *ptr, *pal, *px, *pr, *ptr1; int ret; uint8_t *idx; /* At least 9 bytes of Image Descriptor. */ if (s->bytestream_end < s->bytestream + 9) return AVERROR_INVALIDDATA; left = bytestream_get_le16(&s->bytestream); top = bytestream_get_le16(&s->bytestream); width = bytestream_get_le16(&s->bytestream); height = bytestream_get_le16(&s->bytestream); flags = bytestream_get_byte(&s->bytestream); is_interleaved = flags & 0x40; has_local_palette = flags & 0x80; bits_per_pixel = (flags & 0x07) + 1; av_dlog(s->avctx, "image x=%d y=%d w=%d h=%d\n", left, top, width, height); if (has_local_palette) { pal_size = 1 << bits_per_pixel; if (s->bytestream_end < s->bytestream + pal_size * 3) return AVERROR_INVALIDDATA; gif_read_palette(&s->bytestream, s->local_palette, pal_size); pal = s->local_palette; } else { if (!s->has_global_palette) { av_log(s->avctx, AV_LOG_FATAL, "picture doesn't have either global or local palette.\n"); return AVERROR_INVALIDDATA; } pal = s->global_palette; } if (s->keyframe) { if (s->transparent_color_index == -1 && s->has_global_palette) { /* transparency wasn't set before the first frame, fill with background color */ gif_fill(&s->picture, s->bg_color); } else { /* otherwise fill with transparent color. * this is necessary since by default picture filled with 0x80808080. */ gif_fill(&s->picture, s->trans_color); } } /* verify that all the image is inside the screen dimensions */ if (left + width > s->screen_width || top + height > s->screen_height) return AVERROR(EINVAL); /* process disposal method */ if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) { gif_fill_rect(&s->picture, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h); } else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) { gif_copy_img_rect(s->stored_img, (uint32_t *)s->picture.data[0], s->picture.linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h); } s->gce_prev_disposal = s->gce_disposal; if (s->gce_disposal != GCE_DISPOSAL_NONE) { s->gce_l = left; s->gce_t = top; s->gce_w = width; s->gce_h = height; if (s->gce_disposal == GCE_DISPOSAL_BACKGROUND) { if (s->background_color_index == s->transparent_color_index) s->stored_bg_color = s->trans_color; else s->stored_bg_color = s->bg_color; } else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) { av_fast_malloc(&s->stored_img, &s->stored_img_size, s->picture.linesize[0] * s->picture.height); if (!s->stored_img) return AVERROR(ENOMEM); gif_copy_img_rect((uint32_t *)s->picture.data[0], s->stored_img, s->picture.linesize[0] / sizeof(uint32_t), left, top, width, height); } } /* Expect at least 2 bytes: 1 for lzw code size and 1 for block size. */ if (s->bytestream_end < s->bytestream + 2) return AVERROR_INVALIDDATA; /* now get the image data */ code_size = bytestream_get_byte(&s->bytestream); if ((ret = ff_lzw_decode_init(s->lzw, code_size, s->bytestream, s->bytestream_end - s->bytestream, FF_LZW_GIF)) < 0) { av_log(s->avctx, AV_LOG_ERROR, "LZW init failed\n"); return ret; } /* read all the image */ linesize = s->picture.linesize[0] / sizeof(uint32_t); ptr1 = (uint32_t *)s->picture.data[0] + top * linesize + left; ptr = ptr1; pass = 0; y1 = 0; for (y = 0; y < height; y++) { if (ff_lzw_decode(s->lzw, s->idx_line, width) == 0) goto decode_tail; pr = ptr + width; for (px = ptr, idx = s->idx_line; px < pr; px++, idx++) { if (*idx != s->transparent_color_index) *px = pal[*idx]; } if (is_interleaved) { switch(pass) { default: case 0: case 1: y1 += 8; ptr += linesize * 8; if (y1 >= height) { y1 = pass ? 2 : 4; ptr = ptr1 + linesize * y1; pass++; } break; case 2: y1 += 4; ptr += linesize * 4; if (y1 >= height) { y1 = 1; ptr = ptr1 + linesize; pass++; } break; case 3: y1 += 2; ptr += linesize * 2; break; } } else { ptr += linesize; } } decode_tail: /* read the garbage data until end marker is found */ ff_lzw_decode_tail(s->lzw); s->bytestream = ff_lzw_cur_ptr(s->lzw); /* Graphic Control Extension's scope is single frame. * Remove its influence. */ s->transparent_color_index = -1; s->gce_disposal = GCE_DISPOSAL_NONE; return 0; }
static int dpcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end = buf + buf_size; DPCMContext *s = avctx->priv_data; int out = 0; int predictor[2]; int ch = 0; int stereo = s->channels - 1; int16_t *output_samples = data; /* calculate output size */ switch(avctx->codec->id) { case CODEC_ID_ROQ_DPCM: out = buf_size - 8; break; case CODEC_ID_INTERPLAY_DPCM: out = buf_size - 6 - s->channels; break; case CODEC_ID_XAN_DPCM: out = buf_size - 2 * s->channels; break; case CODEC_ID_SOL_DPCM: if (avctx->codec_tag != 3) out = buf_size * 2; else out = buf_size; break; } out *= av_get_bytes_per_sample(avctx->sample_fmt); if (out <= 0) { av_log(avctx, AV_LOG_ERROR, "packet is too small\n"); return AVERROR(EINVAL); } if (*data_size < out) { av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n"); return AVERROR(EINVAL); } switch(avctx->codec->id) { case CODEC_ID_ROQ_DPCM: buf += 6; if (stereo) { predictor[1] = (int16_t)(bytestream_get_byte(&buf) << 8); predictor[0] = (int16_t)(bytestream_get_byte(&buf) << 8); } else { predictor[0] = (int16_t)bytestream_get_le16(&buf); } /* decode the samples */ while (buf < buf_end) { predictor[ch] += s->roq_square_array[*buf++]; predictor[ch] = av_clip_int16(predictor[ch]); *output_samples++ = predictor[ch]; /* toggle channel */ ch ^= stereo; } break; case CODEC_ID_INTERPLAY_DPCM: buf += 6; /* skip over the stream mask and stream length */ for (ch = 0; ch < s->channels; ch++) { predictor[ch] = (int16_t)bytestream_get_le16(&buf); *output_samples++ = predictor[ch]; } ch = 0; while (buf < buf_end) { predictor[ch] += interplay_delta_table[*buf++]; predictor[ch] = av_clip_int16(predictor[ch]); *output_samples++ = predictor[ch]; /* toggle channel */ ch ^= stereo; } break; case CODEC_ID_XAN_DPCM: { int shift[2] = { 4, 4 }; for (ch = 0; ch < s->channels; ch++) predictor[ch] = (int16_t)bytestream_get_le16(&buf); ch = 0; while (buf < buf_end) { uint8_t n = *buf++; int16_t diff = (n & 0xFC) << 8; if ((n & 0x03) == 3) shift[ch]++; else shift[ch] -= (2 * (n & 3)); /* saturate the shifter to a lower limit of 0 */ if (shift[ch] < 0) shift[ch] = 0; diff >>= shift[ch]; predictor[ch] += diff; predictor[ch] = av_clip_int16(predictor[ch]); *output_samples++ = predictor[ch]; /* toggle channel */ ch ^= stereo; } break; } case CODEC_ID_SOL_DPCM: if (avctx->codec_tag != 3) { uint8_t *output_samples_u8 = data; while (buf < buf_end) { uint8_t n = *buf++; s->sample[0] += s->sol_table[n >> 4]; s->sample[0] = av_clip_uint8(s->sample[0]); *output_samples_u8++ = s->sample[0]; s->sample[stereo] += s->sol_table[n & 0x0F]; s->sample[stereo] = av_clip_uint8(s->sample[stereo]); *output_samples_u8++ = s->sample[stereo]; } } else { while (buf < buf_end) {
static int flac_read_header(AVFormatContext *s) { int ret, metadata_last=0, metadata_type, metadata_size, found_streaminfo=0; uint8_t header[4]; uint8_t *buffer=NULL; AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = AV_CODEC_ID_FLAC; st->need_parsing = AVSTREAM_PARSE_FULL_RAW; /* the parameters will be extracted from the compressed bitstream */ /* if fLaC marker is not found, assume there is no header */ if (avio_rl32(s->pb) != MKTAG('f','L','a','C')) { avio_seek(s->pb, -4, SEEK_CUR); return 0; } /* process metadata blocks */ while (!avio_feof(s->pb) && !metadata_last) { avio_read(s->pb, header, 4); flac_parse_block_header(header, &metadata_last, &metadata_type, &metadata_size); switch (metadata_type) { /* allocate and read metadata block for supported types */ case FLAC_METADATA_TYPE_STREAMINFO: case FLAC_METADATA_TYPE_CUESHEET: case FLAC_METADATA_TYPE_PICTURE: case FLAC_METADATA_TYPE_VORBIS_COMMENT: buffer = av_mallocz(metadata_size + FF_INPUT_BUFFER_PADDING_SIZE); if (!buffer) { return AVERROR(ENOMEM); } if (avio_read(s->pb, buffer, metadata_size) != metadata_size) { RETURN_ERROR(AVERROR(EIO)); } break; /* skip metadata block for unsupported types */ default: ret = avio_skip(s->pb, metadata_size); if (ret < 0) return ret; } if (metadata_type == FLAC_METADATA_TYPE_STREAMINFO) { FLACStreaminfo si; /* STREAMINFO can only occur once */ if (found_streaminfo) { RETURN_ERROR(AVERROR_INVALIDDATA); } if (metadata_size != FLAC_STREAMINFO_SIZE) { RETURN_ERROR(AVERROR_INVALIDDATA); } found_streaminfo = 1; st->codec->extradata = buffer; st->codec->extradata_size = metadata_size; buffer = NULL; /* get codec params from STREAMINFO header */ avpriv_flac_parse_streaminfo(st->codec, &si, st->codec->extradata); /* set time base and duration */ if (si.samplerate > 0) { avpriv_set_pts_info(st, 64, 1, si.samplerate); if (si.samples > 0) st->duration = si.samples; } } else if (metadata_type == FLAC_METADATA_TYPE_CUESHEET) { uint8_t isrc[13]; uint64_t start; const uint8_t *offset; int i, chapters, track, ti; if (metadata_size < 431) RETURN_ERROR(AVERROR_INVALIDDATA); offset = buffer + 395; chapters = bytestream_get_byte(&offset) - 1; if (chapters <= 0) RETURN_ERROR(AVERROR_INVALIDDATA); for (i = 0; i < chapters; i++) { if (offset + 36 - buffer > metadata_size) RETURN_ERROR(AVERROR_INVALIDDATA); start = bytestream_get_be64(&offset); track = bytestream_get_byte(&offset); bytestream_get_buffer(&offset, isrc, 12); isrc[12] = 0; offset += 14; ti = bytestream_get_byte(&offset); if (ti <= 0) RETURN_ERROR(AVERROR_INVALIDDATA); offset += ti * 12; avpriv_new_chapter(s, track, st->time_base, start, AV_NOPTS_VALUE, isrc); } av_freep(&buffer); } else if (metadata_type == FLAC_METADATA_TYPE_PICTURE) { ret = ff_flac_parse_picture(s, buffer, metadata_size); av_freep(&buffer); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Error parsing attached picture.\n"); return ret; } } else { /* STREAMINFO must be the first block */ if (!found_streaminfo) { RETURN_ERROR(AVERROR_INVALIDDATA); } /* process supported blocks other than STREAMINFO */ if (metadata_type == FLAC_METADATA_TYPE_VORBIS_COMMENT) { AVDictionaryEntry *chmask; ret = ff_vorbis_comment(s, &s->metadata, buffer, metadata_size, 1); if (ret < 0) { av_log(s, AV_LOG_WARNING, "error parsing VorbisComment metadata\n"); } else if (ret > 0) { s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED; } /* parse the channels mask if present */ chmask = av_dict_get(s->metadata, "WAVEFORMATEXTENSIBLE_CHANNEL_MASK", NULL, 0); if (chmask) { uint64_t mask = strtol(chmask->value, NULL, 0); if (!mask || mask & ULLN(~0x3ffff)) { av_log(s, AV_LOG_WARNING, "Invalid value of WAVEFORMATEXTENSIBLE_CHANNEL_MASK\n"); } else { st->codec->channel_layout = mask; av_dict_set(&s->metadata, "WAVEFORMATEXTENSIBLE_CHANNEL_MASK", NULL, 0); } } } av_freep(&buffer); } } ret = ff_replaygain_export(st, s->metadata); if (ret < 0) return ret; return 0; fail: av_free(buffer); return ret; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *in_buf, int buf_size) { SgiState *s = avctx->priv_data; AVFrame *picture = data; AVFrame *p = &s->picture; const uint8_t *in_end = in_buf + buf_size; unsigned int dimension, bytes_per_channel, rle; int ret = 0; uint8_t *out_buf, *out_end; if (buf_size < SGI_HEADER_SIZE){ av_log(avctx, AV_LOG_ERROR, "buf_size too small (%d)\n", buf_size); return -1; } /* Test for SGI magic. */ if (bytestream_get_be16(&in_buf) != SGI_MAGIC) { av_log(avctx, AV_LOG_ERROR, "bad magic number\n"); return -1; } rle = bytestream_get_byte(&in_buf); bytes_per_channel = bytestream_get_byte(&in_buf); dimension = bytestream_get_be16(&in_buf); s->width = bytestream_get_be16(&in_buf); s->height = bytestream_get_be16(&in_buf); s->depth = bytestream_get_be16(&in_buf); if (bytes_per_channel != 1) { av_log(avctx, AV_LOG_ERROR, "wrong channel number\n"); return -1; } /* Check for supported image dimensions. */ if (dimension != 2 && dimension != 3) { av_log(avctx, AV_LOG_ERROR, "wrong dimension number\n"); return -1; } if (s->depth == SGI_GRAYSCALE) { avctx->pix_fmt = PIX_FMT_GRAY8; } else if (s->depth == SGI_RGB) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (s->depth == SGI_RGBA) { avctx->pix_fmt = PIX_FMT_RGBA; } else { av_log(avctx, AV_LOG_ERROR, "wrong picture format\n"); return -1; } if (avcodec_check_dimensions(avctx, s->width, s->height)) return -1; avcodec_set_dimensions(avctx, s->width, s->height); if (p->data[0]) avctx->release_buffer(avctx, p); p->reference = 0; if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n"); return -1; } p->pict_type = FF_I_TYPE; p->key_frame = 1; out_buf = p->data[0]; out_end = out_buf + p->linesize[0] * s->height; s->linesize = p->linesize[0]; /* Skip header. */ in_buf += SGI_HEADER_SIZE - 12; if (rle) { ret = read_rle_sgi(out_end, in_buf, in_end, s); } else { ret = read_uncompressed_sgi(out_buf, out_end, in_buf, in_end, s); } if (ret == 0) { *picture = s->picture; *data_size = sizeof(AVPicture); return buf_size; } else { return -1; } }