static void ff_amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *data_end) { unsigned int size; char buf[1024]; if (data >= data_end) return; switch (*data++) { case AMF_DATA_TYPE_NUMBER: av_log(ctx, AV_LOG_DEBUG, " number %g\n", av_int2dbl(AV_RB64(data))); return; case AMF_DATA_TYPE_BOOL: av_log(ctx, AV_LOG_DEBUG, " bool %d\n", *data); return; case AMF_DATA_TYPE_STRING: case AMF_DATA_TYPE_LONG_STRING: if (data[-1] == AMF_DATA_TYPE_STRING) { size = bytestream_get_be16(&data); } else { size = bytestream_get_be32(&data); } size = FFMIN(size, sizeof(buf) - 1); memcpy(buf, data, size); buf[size] = 0; av_log(ctx, AV_LOG_DEBUG, " string '%s'\n", buf); return; case AMF_DATA_TYPE_NULL: av_log(ctx, AV_LOG_DEBUG, " NULL\n"); return; case AMF_DATA_TYPE_ARRAY: data += 4; case AMF_DATA_TYPE_OBJECT: av_log(ctx, AV_LOG_DEBUG, " {\n"); for (;;) { int t; size = bytestream_get_be16(&data); av_strlcpy(buf, data, FFMIN(sizeof(buf), size + 1)); if (!size) { av_log(ctx, AV_LOG_DEBUG, " }\n"); data++; break; } if (size >= data_end - data) return; data += size; av_log(ctx, AV_LOG_DEBUG, " %s: ", buf); ff_amf_tag_contents(ctx, data, data_end); t = ff_amf_tag_size(data, data_end); if (t < 0 || t >= data_end - data) return; data += t; } return; case AMF_DATA_TYPE_OBJECT_END: av_log(ctx, AV_LOG_DEBUG, " }\n"); return; default: return; } }
/** * Parse the presentation segment packet. * * The presentation segment contains details on the video * width, video height, x & y subtitle position. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Implement cropping * @todo TODO: Implement forcing of subtitles * @todo TODO: Blanking of subtitle */ static void parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; int x, y; uint8_t block; int w = bytestream_get_be16(&buf); int h = bytestream_get_be16(&buf); dprintf(avctx, "Video Dimensions %dx%d\n", w, h); if (av_image_check_size(w, h, 0, avctx) >= 0) avcodec_set_dimensions(avctx, w, h); /* Skip 1 bytes of unknown, frame rate? */ buf++; ctx->presentation.id_number = bytestream_get_be16(&buf); /* Next byte is the state. */ block = bytestream_get_byte(&buf);; if (block == 0x80) { /* * Skip 7 bytes of unknown: * palette_update_flag (0x80), * palette_id_to_use, * Object Number (if > 0 determines if more data to process), * object_id_ref (2 bytes), * window_id_ref, * composition_flag (0x80 - object cropped, 0x40 - object forced) */ buf += 7; x = bytestream_get_be16(&buf); y = bytestream_get_be16(&buf); /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/ dprintf(avctx, "Subtitle Placement x=%d, y=%d\n", x, y); if (x > avctx->width || y > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n", x, y, avctx->width, avctx->height); x = 0; y = 0; } /* Fill in dimensions */ ctx->presentation.x = x; ctx->presentation.y = y; } else if (block == 0x00) { /* TODO: Blank context as subtitle should not be displayed. * If the subtitle is blanked now the subtitle is not * on screen long enough to read, due to a delay in * initial display timing. */ } }
/** * Parse the presentation segment packet. * * The presentation segment contains details on the video * width, video height, x & y subtitle position. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Implement cropping * @todo TODO: Implement forcing of subtitles */ static void parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; int x, y; int w = bytestream_get_be16(&buf); int h = bytestream_get_be16(&buf); av_dlog(avctx, "Video Dimensions %dx%d\n", w, h); if (av_image_check_size(w, h, 0, avctx) >= 0) avcodec_set_dimensions(avctx, w, h); /* Skip 1 bytes of unknown, frame rate? */ buf++; ctx->presentation.id_number = bytestream_get_be16(&buf); /* * Skip 3 bytes of unknown: * state * palette_update_flag (0x80), * palette_id_to_use, */ buf += 3; ctx->presentation.object_number = bytestream_get_byte(&buf); if (!ctx->presentation.object_number) return; /* * Skip 4 bytes of unknown: * object_id_ref (2 bytes), * window_id_ref, * composition_flag (0x80 - object cropped, 0x40 - object forced) */ buf += 4; x = bytestream_get_be16(&buf); y = bytestream_get_be16(&buf); /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/ av_dlog(avctx, "Subtitle Placement x=%d, y=%d\n", x, y); if (x > avctx->width || y > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n", x, y, avctx->width, avctx->height); x = 0; y = 0; } /* Fill in dimensions */ ctx->presentation.x = x; ctx->presentation.y = y; }
/** * Parses the picture segment packet. * * The picture segment contains details on the sequence id, * width, height and Run Length Encoded (RLE) bitmap data. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Enable support for RLE data over multiple packets */ static int parse_picture_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; uint8_t sequence_desc; unsigned int rle_bitmap_len, width, height; /* skip 3 unknown bytes: Object ID (2 bytes), Version Number */ buf += 3; /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */ sequence_desc = bytestream_get_byte(&buf); if (!(sequence_desc & 0x80)) { av_log(avctx, AV_LOG_ERROR, "Decoder does not support object data over multiple packets.\n"); return -1; } /* Decode rle bitmap length */ rle_bitmap_len = bytestream_get_be24(&buf); /* Check to ensure we have enough data for rle_bitmap_length if just a single packet */ if (rle_bitmap_len > buf_size - 7) { av_log(avctx, AV_LOG_ERROR, "Not enough RLE data for specified length of %d.\n", rle_bitmap_len); return -1; } ctx->picture.rle_data_len = rle_bitmap_len; /* Get bitmap dimensions from data */ width = bytestream_get_be16(&buf); height = bytestream_get_be16(&buf); /* Make sure the bitmap is not too large */ if (ctx->presentation.video_w < width || ctx->presentation.video_h < height) { av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger then video.\n"); return -1; } ctx->picture.w = width; ctx->picture.h = height; av_fast_malloc(&ctx->picture.rle, &ctx->picture.rle_buffer_size, rle_bitmap_len); if (!ctx->picture.rle) return -1; memcpy(ctx->picture.rle, buf, rle_bitmap_len); return 0; }
int ff_amf_get_field_value(const uint8_t *data, const uint8_t *data_end, const uint8_t *name, uint8_t *dst, int dst_size) { int namelen = strlen((char *)name); int len; while (*data != AMF_DATA_TYPE_OBJECT && data < data_end) { len = ff_amf_tag_size(data, data_end); if (len < 0) len = data_end - data; data += len; } if (data_end - data < 3) return -1; data++; for (;;) { int size = bytestream_get_be16(&data); if (!size) break; if (data + size >= data_end || data + size < data) return -1; data += size; if (size == namelen && !memcmp(data-size, name, namelen)) { switch (*data++) { case AMF_DATA_TYPE_NUMBER: snprintf((char *)dst, dst_size, "%g", av_int2dbl(AV_RB64(data))); break; case AMF_DATA_TYPE_BOOL: snprintf((char *)dst, dst_size, "%s", *data ? "true" : "false"); break; case AMF_DATA_TYPE_STRING: len = bytestream_get_be16(&data); av_strlcpy((char *)dst, (const char*)data, FFMIN(len+1, dst_size)); break; default: return -1; } return 0; } len = ff_amf_tag_size(data, data_end); if (len < 0 || data + len >= data_end || data + len < data) return -1; data += len; } return -1; }
int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end) { const uint8_t *base = data; if (data >= data_end) return -1; switch (*data++) { case AMF_DATA_TYPE_NUMBER: return 9; case AMF_DATA_TYPE_BOOL: return 2; case AMF_DATA_TYPE_STRING: return 3 + AV_RB16(data); case AMF_DATA_TYPE_LONG_STRING: return 5 + AV_RB32(data); case AMF_DATA_TYPE_NULL: return 1; case AMF_DATA_TYPE_ARRAY: data += 4; case AMF_DATA_TYPE_OBJECT: for (;;) { int size = bytestream_get_be16(&data); int t; if (!size) { data++; break; } if (size < 0 || size >= data_end - data) return -1; data += size; t = ff_amf_tag_size(data, data_end); if (t < 0 || t >= data_end - data) return -1; data += t; } return data - base; case AMF_DATA_TYPE_OBJECT_END: return 1; default: return -1; } }
static int alac_set_info(ALACContext *alac) { const unsigned char *ptr = alac->avctx->extradata; ptr += 4; /* size */ ptr += 4; /* alac */ ptr += 4; /* 0 ? */ if(AV_RB32(ptr) >= UINT_MAX/4){ av_log(alac->avctx, AV_LOG_ERROR, "setinfo_max_samples_per_frame too large\n"); return -1; } /* buffer size / 2 ? */ alac->setinfo_max_samples_per_frame = bytestream_get_be32(&ptr); alac->setinfo_7a = *ptr++; alac->setinfo_sample_size = *ptr++; alac->setinfo_rice_historymult = *ptr++; alac->setinfo_rice_initialhistory = *ptr++; alac->setinfo_rice_kmodifier = *ptr++; /* channels? */ alac->setinfo_7f = *ptr++; alac->setinfo_80 = bytestream_get_be16(&ptr); /* max coded frame size */ alac->setinfo_82 = bytestream_get_be32(&ptr); /* bitrate ? */ alac->setinfo_86 = bytestream_get_be32(&ptr); /* samplerate */ alac->setinfo_8a_rate = bytestream_get_be32(&ptr); allocate_buffers(alac); return 0; }
int ff_amf_match_string(const uint8_t *data, int size, const char *str) { int len = strlen(str); int amf_len, type; if (size < 1) return 0; type = *data++; if (type != AMF_DATA_TYPE_LONG_STRING && type != AMF_DATA_TYPE_STRING) return 0; if (type == AMF_DATA_TYPE_LONG_STRING) { if ((size -= 4 + 1) < 0) return 0; amf_len = bytestream_get_be32(&data); } else { if ((size -= 2 + 1) < 0) return 0; amf_len = bytestream_get_be16(&data); } if (amf_len > size) return 0; if (amf_len != len) return 0; return !memcmp(data, str, len); }
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size) { /* cast to avoid compiler warning */ ff_init_range_encoder(c, (uint8_t *)buf, buf_size); c->low = bytestream_get_be16((const uint8_t **)&c->bytestream); }
static int dts_probe(AVProbeData *p) { const uint8_t *buf, *bufp; uint32_t state = -1; int markers[3] = {0}; int sum, max; int64_t diff = 0; buf = p->buf; for(; buf < (p->buf+p->buf_size)-2; buf+=2) { bufp = buf; state = (state << 16) | bytestream_get_be16(&bufp); /* regular bitstream */ if (state == DCA_MARKER_RAW_BE || state == DCA_MARKER_RAW_LE) markers[0]++; /* 14 bits big-endian bitstream */ if (state == DCA_MARKER_14B_BE) if ((bytestream_get_be16(&bufp) & 0xFFF0) == 0x07F0) markers[1]++; /* 14 bits little-endian bitstream */ if (state == DCA_MARKER_14B_LE) if ((bytestream_get_be16(&bufp) & 0xF0FF) == 0xF007) markers[2]++; if (buf - p->buf >= 4) diff += FFABS(AV_RL16(buf) - AV_RL16(buf-4)); } sum = markers[0] + markers[1] + markers[2]; max = markers[1] > markers[0]; max = markers[2] > markers[max] ? 2 : max; if (markers[max] > 3 && p->buf_size / markers[max] < 32*1024 && markers[max] * 4 > sum * 3 && diff / p->buf_size > 200) return AVPROBE_SCORE_EXTENSION + 1; return 0; }
int ff_amf_tag_size(const uint8_t *data, const uint8_t *data_end) { const uint8_t *base = data; AMFDataType type; unsigned nb = -1; int parse_key = 1; if (data >= data_end) return -1; switch ((type = *data++)) { case AMF_DATA_TYPE_NUMBER: return 9; case AMF_DATA_TYPE_BOOL: return 2; case AMF_DATA_TYPE_STRING: return 3 + AV_RB16(data); case AMF_DATA_TYPE_LONG_STRING: return 5 + AV_RB32(data); case AMF_DATA_TYPE_NULL: return 1; case AMF_DATA_TYPE_ARRAY: parse_key = 0; case AMF_DATA_TYPE_MIXEDARRAY: nb = bytestream_get_be32(&data); case AMF_DATA_TYPE_OBJECT: while (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY) { int t; if (parse_key) { int size = bytestream_get_be16(&data); if (!size) { data++; break; } if (size < 0 || size >= data_end - data) return -1; data += size; } t = ff_amf_tag_size(data, data_end); if (t < 0 || t >= data_end - data) return -1; data += t; } return data - base; case AMF_DATA_TYPE_OBJECT_END: return 1; default: return -1; } }
static int oggvorbis_decode_init(AVCodecContext *avccontext) { OggVorbisDecContext *context = avccontext->priv_data ; uint8_t *p= avccontext->extradata; int i, hsizes[3]; unsigned char *headers[3], *extradata = avccontext->extradata; vorbis_info_init(&context->vi) ; vorbis_comment_init(&context->vc) ; if(! avccontext->extradata_size || ! p) { av_log(avccontext, AV_LOG_ERROR, "vorbis extradata absent\n"); return -1; } if(p[0] == 0 && p[1] == 30) { for(i = 0; i < 3; i++){ hsizes[i] = bytestream_get_be16((const uint8_t **)&p); headers[i] = p; p += hsizes[i]; } } else if(*p == 2) { unsigned int offset = 1; p++; for(i=0; i<2; i++) { hsizes[i] = 0; while((*p == 0xFF) && (offset < avccontext->extradata_size)) { hsizes[i] += 0xFF; offset++; p++; } if(offset >= avccontext->extradata_size - 1) { av_log(avccontext, AV_LOG_ERROR, "vorbis header sizes damaged\n"); return -1; } hsizes[i] += *p; offset++; p++; } hsizes[2] = avccontext->extradata_size - hsizes[0]-hsizes[1]-offset; #if 0 av_log(avccontext, AV_LOG_DEBUG, "vorbis header sizes: %d, %d, %d, / extradata_len is %d \n", hsizes[0], hsizes[1], hsizes[2], avccontext->extradata_size); #endif headers[0] = extradata + offset; headers[1] = extradata + offset + hsizes[0]; headers[2] = extradata + offset + hsizes[0] + hsizes[1]; } else { av_log(avccontext, AV_LOG_ERROR, "vorbis initial header len is wrong: %d\n", *p); return -1; } for(i=0; i<3; i++){ context->op.b_o_s= i==0; context->op.bytes = hsizes[i]; context->op.packet = headers[i]; if(vorbis_synthesis_headerin(&context->vi, &context->vc, &context->op)<0){ av_log(avccontext, AV_LOG_ERROR, "%d. vorbis header damaged\n", i+1); return -1; } } avccontext->channels = context->vi.channels; avccontext->sample_rate = context->vi.rate; avccontext->sample_fmt = AV_SAMPLE_FMT_S16; avccontext->time_base= (AVRational){1, avccontext->sample_rate}; vorbis_synthesis_init(&context->vd, &context->vi); vorbis_block_init(&context->vd, &context->vb); return 0 ; }
/** * Parse the presentation segment packet. * * The presentation segment contains details on the video * width, video height, x & y subtitle position. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Implement cropping */ static void parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; int w = bytestream_get_be16(&buf); int h = bytestream_get_be16(&buf); uint16_t object_index; av_dlog(avctx, "Video Dimensions %dx%d\n", w, h); if (av_image_check_size(w, h, 0, avctx) >= 0) avcodec_set_dimensions(avctx, w, h); /* Skip 1 bytes of unknown, frame rate? */ buf++; ctx->presentation.id_number = bytestream_get_be16(&buf); /* * Skip 3 bytes of unknown: * state * palette_update_flag (0x80), * palette_id_to_use, */ buf += 3; ctx->presentation.object_count = bytestream_get_byte(&buf); if (!ctx->presentation.object_count) return; /* Verify that enough bytes are remaining for all of the objects. */ buf_size -= 11; if (buf_size < ctx->presentation.object_count * 8) { ctx->presentation.object_count = 0; return; } av_freep(&ctx->presentation.objects); ctx->presentation.objects = av_malloc(sizeof(PGSSubPictureReference) * ctx->presentation.object_count); if (!ctx->presentation.objects) { ctx->presentation.object_count = 0; return; } for (object_index = 0; object_index < ctx->presentation.object_count; ++object_index) { PGSSubPictureReference *reference = &ctx->presentation.objects[object_index]; reference->picture_id = bytestream_get_be16(&buf); /* * Skip 2 bytes of unknown: * window_id_ref, * composition_flag (0x80 - object cropped, 0x40 - object forced) */ buf++; reference->composition = bytestream_get_byte(&buf); reference->x = bytestream_get_be16(&buf); reference->y = bytestream_get_be16(&buf); /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/ av_dlog(avctx, "Subtitle Placement ID=%d, x=%d, y=%d\n", reference->picture_id, reference->x, reference->y); if (reference->x > avctx->width || reference->y > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n", reference->x, reference->y, avctx->width, avctx->height); reference->x = 0; reference->y = 0; } } }
static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; const uint8_t *buf_end; uint8_t segment_type; int segment_length; #ifdef DEBUG_PACKET_CONTENTS int i; av_log(avctx, AV_LOG_INFO, "PGS sub packet:\n"); for (i = 0; i < buf_size; i++) { av_log(avctx, AV_LOG_INFO, "%02x ", buf[i]); if (i % 16 == 15) av_log(avctx, AV_LOG_INFO, "\n"); } if (i & 15) av_log(avctx, AV_LOG_INFO, "\n"); #endif *data_size = 0; /* Ensure that we have received at a least a segment code and segment length */ if (buf_size < 3) return -1; buf_end = buf + buf_size; /* Step through buffer to identify segments */ while (buf < buf_end) { segment_type = bytestream_get_byte(&buf); segment_length = bytestream_get_be16(&buf); av_dlog(avctx, "Segment Length %d, Segment Type %x\n", segment_length, segment_type); if (segment_type != DISPLAY_SEGMENT && segment_length > buf_end - buf) break; switch (segment_type) { case PALETTE_SEGMENT: parse_palette_segment(avctx, buf, segment_length); break; case PICTURE_SEGMENT: parse_picture_segment(avctx, buf, segment_length); break; case PRESENTATION_SEGMENT: parse_presentation_segment(avctx, buf, segment_length); break; case WINDOW_SEGMENT: /* * Window Segment Structure (No new information provided): * 2 bytes: Unkown, * 2 bytes: X position of subtitle, * 2 bytes: Y position of subtitle, * 2 bytes: Width of subtitle, * 2 bytes: Height of subtitle. */ break; case DISPLAY_SEGMENT: *data_size = display_end_segment(avctx, data, buf, segment_length); break; default: av_log(avctx, AV_LOG_ERROR, "Unknown subtitle segment type 0x%x, length %d\n", segment_type, segment_length); break; } buf += segment_length; } return buf_size; }
static int cook_decode_init(AVCodecContext *avctx) { COOKContext *q = avctx->priv_data; const uint8_t *edata_ptr = avctx->extradata; /* Take care of the codec specific extradata. */ if (avctx->extradata_size <= 0) { av_log(avctx,AV_LOG_ERROR,"Necessary extradata missing!\n"); return -1; } else { /* 8 for mono, 16 for stereo, ? for multichannel Swap to right endianness so we don't need to care later on. */ av_log(avctx,AV_LOG_DEBUG,"codecdata_length=%d\n",avctx->extradata_size); if (avctx->extradata_size >= 8){ q->cookversion = bytestream_get_be32(&edata_ptr); q->samples_per_frame = bytestream_get_be16(&edata_ptr); q->subbands = bytestream_get_be16(&edata_ptr); } if (avctx->extradata_size >= 16){ bytestream_get_be32(&edata_ptr); //Unknown unused q->js_subband_start = bytestream_get_be16(&edata_ptr); q->js_vlc_bits = bytestream_get_be16(&edata_ptr); } } /* Take data from the AVCodecContext (RM container). */ q->sample_rate = avctx->sample_rate; q->nb_channels = avctx->channels; q->bit_rate = avctx->bit_rate; /* Initialize RNG. */ av_init_random(1, &q->random_state); /* Initialize extradata related variables. */ q->samples_per_channel = q->samples_per_frame / q->nb_channels; q->bits_per_subpacket = avctx->block_align * 8; /* Initialize default data states. */ q->log2_numvector_size = 5; q->total_subbands = q->subbands; /* Initialize version-dependent variables */ av_log(NULL,AV_LOG_DEBUG,"q->cookversion=%x\n",q->cookversion); q->joint_stereo = 0; switch (q->cookversion) { case MONO: if (q->nb_channels != 1) { av_log(avctx,AV_LOG_ERROR,"Container channels != 1, report sample!\n"); return -1; } av_log(avctx,AV_LOG_DEBUG,"MONO\n"); break; case STEREO: if (q->nb_channels != 1) { q->bits_per_subpacket = q->bits_per_subpacket/2; } av_log(avctx,AV_LOG_DEBUG,"STEREO\n"); break; case JOINT_STEREO: if (q->nb_channels != 2) { av_log(avctx,AV_LOG_ERROR,"Container channels != 2, report sample!\n"); return -1; } av_log(avctx,AV_LOG_DEBUG,"JOINT_STEREO\n"); if (avctx->extradata_size >= 16){ q->total_subbands = q->subbands + q->js_subband_start; q->joint_stereo = 1; } if (q->samples_per_channel > 256) { q->log2_numvector_size = 6; } if (q->samples_per_channel > 512) { q->log2_numvector_size = 7; } break; case MC_COOK: av_log(avctx,AV_LOG_ERROR,"MC_COOK not supported!\n"); return -1; break; default: av_log(avctx,AV_LOG_ERROR,"Unknown Cook version, report sample!\n"); return -1; break; } /* Initialize variable relations */ q->numvector_size = (1 << q->log2_numvector_size); /* Generate tables */ init_pow2table(); init_gain_table(q); init_cplscales_table(q); if (init_cook_vlc_tables(q) != 0) return -1; if(avctx->block_align >= UINT_MAX/2) return -1; /* Pad the databuffer with: DECODE_BYTES_PAD1 or DECODE_BYTES_PAD2 for decode_bytes(), FF_INPUT_BUFFER_PADDING_SIZE, for the bitstreamreader. */ if (q->nb_channels==2 && q->joint_stereo==0) { q->decoded_bytes_buffer = av_mallocz(avctx->block_align/2 + DECODE_BYTES_PAD2(avctx->block_align/2) + FF_INPUT_BUFFER_PADDING_SIZE); } else { q->decoded_bytes_buffer = av_mallocz(avctx->block_align + DECODE_BYTES_PAD1(avctx->block_align) + FF_INPUT_BUFFER_PADDING_SIZE); } if (q->decoded_bytes_buffer == NULL) return -1; q->gains1.now = q->gain_1; q->gains1.previous = q->gain_2; q->gains2.now = q->gain_3; q->gains2.previous = q->gain_4; /* Initialize transform. */ if ( init_cook_mlt(q) != 0 ) return -1; /* Initialize COOK signal arithmetic handling */ if (1) { q->scalar_dequant = scalar_dequant_float; q->decouple = decouple_float; q->imlt_window = imlt_window_float; q->interpolate = interpolate_float; q->saturate_output = saturate_output_float; } /* Try to catch some obviously faulty streams, othervise it might be exploitable */ if (q->total_subbands > 53) { av_log(avctx,AV_LOG_ERROR,"total_subbands > 53, report sample!\n"); return -1; } if (q->subbands > 50) { av_log(avctx,AV_LOG_ERROR,"subbands > 50, report sample!\n"); return -1; } if ((q->samples_per_channel == 256) || (q->samples_per_channel == 512) || (q->samples_per_channel == 1024)) { } else { av_log(avctx,AV_LOG_ERROR,"unknown amount of samples_per_channel = %d, report sample!\n",q->samples_per_channel); return -1; } if ((q->js_vlc_bits > 6) || (q->js_vlc_bits < 0)) { av_log(avctx,AV_LOG_ERROR,"q->js_vlc_bits = %d, only >= 0 and <= 6 allowed!\n",q->js_vlc_bits); return -1; } avctx->sample_fmt = SAMPLE_FMT_S16; #ifdef COOKDEBUG dump_cook_context(q); #endif return 0; }
static int dts_probe(AVProbeData *p) { const uint8_t *buf, *bufp; uint32_t state = -1; int markers[4*16] = {0}; int sum, max, i; int64_t diff = 0; uint8_t hdr[12 + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 }; buf = p->buf + FFMIN(4096, p->buf_size); for(; buf < (p->buf+p->buf_size)-2; buf+=2) { int marker, sample_blocks, sample_rate, sr_code, framesize; int lfe; GetBitContext gb; bufp = buf; state = (state << 16) | bytestream_get_be16(&bufp); if (buf - p->buf >= 4) diff += FFABS(((int16_t)AV_RL16(buf)) - (int16_t)AV_RL16(buf-4)); /* regular bitstream */ if (state == DCA_SYNCWORD_CORE_BE) marker = 0; else if (state == DCA_SYNCWORD_CORE_LE) marker = 1; /* 14 bits big-endian bitstream */ else if (state == DCA_SYNCWORD_CORE_14B_BE && (bytestream_get_be16(&bufp) & 0xFFF0) == 0x07F0) marker = 2; /* 14 bits little-endian bitstream */ else if (state == DCA_SYNCWORD_CORE_14B_LE && (bytestream_get_be16(&bufp) & 0xF0FF) == 0xF007) marker = 3; else continue; if (avpriv_dca_convert_bitstream(buf-2, 12, hdr, 12) < 0) continue; init_get_bits(&gb, hdr, 96); skip_bits_long(&gb, 39); sample_blocks = get_bits(&gb, 7) + 1; if (sample_blocks < 8) continue; framesize = get_bits(&gb, 14) + 1; if (framesize < 95) continue; skip_bits(&gb, 6); sr_code = get_bits(&gb, 4); sample_rate = avpriv_dca_sample_rates[sr_code]; if (sample_rate == 0) continue; get_bits(&gb, 5); if (get_bits(&gb, 1)) continue; skip_bits_long(&gb, 9); lfe = get_bits(&gb, 2); if (lfe > 2) continue; marker += 4* sr_code; markers[marker] ++; } sum = max = 0; for (i=0; i<FF_ARRAY_ELEMS(markers); i++) { sum += markers[i]; if (markers[max] < markers[i]) max = i; } if (markers[max] > 3 && p->buf_size / markers[max] < 32*1024 && markers[max] * 4 > sum * 3 && diff / p->buf_size > 200) return AVPROBE_SCORE_EXTENSION + 1; return 0; }
static void amf_tag_contents(void *ctx, const uint8_t *data, const uint8_t *data_end) { unsigned int size, nb = -1; char buf[1024]; AMFDataType type; int parse_key = 1; if (data >= data_end) return; switch ((type = *data++)) { case AMF_DATA_TYPE_NUMBER: av_log(ctx, AV_LOG_DEBUG, " number %g\n", av_int2double(AV_RB64(data))); return; case AMF_DATA_TYPE_BOOL: av_log(ctx, AV_LOG_DEBUG, " bool %d\n", *data); return; case AMF_DATA_TYPE_STRING: case AMF_DATA_TYPE_LONG_STRING: if (type == AMF_DATA_TYPE_STRING) { size = bytestream_get_be16(&data); } else { size = bytestream_get_be32(&data); } size = FFMIN(size, sizeof(buf) - 1); memcpy(buf, data, size); buf[size] = 0; av_log(ctx, AV_LOG_DEBUG, " string '%s'\n", buf); return; case AMF_DATA_TYPE_NULL: av_log(ctx, AV_LOG_DEBUG, " NULL\n"); return; case AMF_DATA_TYPE_ARRAY: parse_key = 0; case AMF_DATA_TYPE_MIXEDARRAY: nb = bytestream_get_be32(&data); case AMF_DATA_TYPE_OBJECT: av_log(ctx, AV_LOG_DEBUG, " {\n"); while (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY) { int t; if (parse_key) { size = bytestream_get_be16(&data); size = FFMIN(size, sizeof(buf) - 1); if (!size) { av_log(ctx, AV_LOG_DEBUG, " }\n"); data++; break; } memcpy(buf, data, size); buf[size] = 0; if (size >= data_end - data) return; data += size; av_log(ctx, AV_LOG_DEBUG, " %s: ", buf); } amf_tag_contents(ctx, data, data_end); t = ff_amf_tag_size(data, data_end); if (t < 0 || t >= data_end - data) return; data += t; } return; case AMF_DATA_TYPE_OBJECT_END: av_log(ctx, AV_LOG_DEBUG, " }\n"); return; default: return; } }
/** * Parse the presentation segment packet. * * The presentation segment contains details on the video * width, video height, x & y subtitle position. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Implement cropping * @todo TODO: Implement forcing of subtitles */ static int parse_presentation_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int64_t pts) { PGSSubContext *ctx = avctx->priv_data; int x, y, ret; int w = bytestream_get_be16(&buf); int h = bytestream_get_be16(&buf); ctx->presentation.pts = pts; av_dlog(avctx, "Video Dimensions %dx%d\n", w, h); ret = ff_set_dimensions(avctx, w, h); if (ret < 0) return ret; /* Skip 1 bytes of unknown, frame rate? */ buf++; ctx->presentation.id_number = bytestream_get_be16(&buf); /* * Skip 3 bytes of unknown: * state * palette_update_flag (0x80), * palette_id_to_use, */ buf += 3; ctx->presentation.object_number = bytestream_get_byte(&buf); ctx->presentation.composition_flag = 0; if (!ctx->presentation.object_number) return 0; /* * Skip 3 bytes of unknown: * object_id_ref (2 bytes), * window_id_ref, */ buf += 3; ctx->presentation.composition_flag = bytestream_get_byte(&buf); x = bytestream_get_be16(&buf); y = bytestream_get_be16(&buf); /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/ av_dlog(avctx, "Subtitle Placement x=%d, y=%d\n", x, y); if (x > avctx->width || y > avctx->height) { av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n", x, y, avctx->width, avctx->height); x = 0; y = 0; } /* Fill in dimensions */ ctx->presentation.x = x; ctx->presentation.y = y; return 0; }
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *in_buf, int buf_size) { SgiState *s = avctx->priv_data; AVFrame *picture = data; AVFrame *p = &s->picture; const uint8_t *in_end = in_buf + buf_size; unsigned int dimension, bytes_per_channel, rle; int ret = 0; uint8_t *out_buf, *out_end; if (buf_size < SGI_HEADER_SIZE){ av_log(avctx, AV_LOG_ERROR, "buf_size too small (%d)\n", buf_size); return -1; } /* Test for SGI magic. */ if (bytestream_get_be16(&in_buf) != SGI_MAGIC) { av_log(avctx, AV_LOG_ERROR, "bad magic number\n"); return -1; } rle = bytestream_get_byte(&in_buf); bytes_per_channel = bytestream_get_byte(&in_buf); dimension = bytestream_get_be16(&in_buf); s->width = bytestream_get_be16(&in_buf); s->height = bytestream_get_be16(&in_buf); s->depth = bytestream_get_be16(&in_buf); if (bytes_per_channel != 1) { av_log(avctx, AV_LOG_ERROR, "wrong channel number\n"); return -1; } /* Check for supported image dimensions. */ if (dimension != 2 && dimension != 3) { av_log(avctx, AV_LOG_ERROR, "wrong dimension number\n"); return -1; } if (s->depth == SGI_GRAYSCALE) { avctx->pix_fmt = PIX_FMT_GRAY8; } else if (s->depth == SGI_RGB) { avctx->pix_fmt = PIX_FMT_RGB24; } else if (s->depth == SGI_RGBA) { avctx->pix_fmt = PIX_FMT_RGBA; } else { av_log(avctx, AV_LOG_ERROR, "wrong picture format\n"); return -1; } if (avcodec_check_dimensions(avctx, s->width, s->height)) return -1; avcodec_set_dimensions(avctx, s->width, s->height); if (p->data[0]) avctx->release_buffer(avctx, p); p->reference = 0; if (avctx->get_buffer(avctx, p) < 0) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed.\n"); return -1; } p->pict_type = FF_I_TYPE; p->key_frame = 1; out_buf = p->data[0]; out_end = out_buf + p->linesize[0] * s->height; s->linesize = p->linesize[0]; /* Skip header. */ in_buf += SGI_HEADER_SIZE - 12; if (rle) { ret = read_rle_sgi(out_end, in_buf, in_end, s); } else { ret = read_uncompressed_sgi(out_buf, out_end, in_buf, in_end, s); } if (ret == 0) { *picture = s->picture; *data_size = sizeof(AVPicture); return buf_size; } else { return -1; } }
/** * Parse the picture segment packet. * * The picture segment contains details on the sequence id, * width, height and Run Length Encoded (RLE) bitmap data. * * @param avctx contains the current codec context * @param buf pointer to the packet to process * @param buf_size size of packet to process * @todo TODO: Enable support for RLE data over multiple packets */ static int parse_picture_segment(AVCodecContext *avctx, const uint8_t *buf, int buf_size) { PGSSubContext *ctx = avctx->priv_data; uint8_t sequence_desc; unsigned int rle_bitmap_len, width, height; if (buf_size <= 4) return -1; buf_size -= 4; /* skip 3 unknown bytes: Object ID (2 bytes), Version Number */ buf += 3; /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */ sequence_desc = bytestream_get_byte(&buf); if (!(sequence_desc & 0x80)) { /* Additional RLE data */ if (buf_size > ctx->picture.rle_remaining_len) return -1; memcpy(ctx->picture.rle + ctx->picture.rle_data_len, buf, buf_size); ctx->picture.rle_data_len += buf_size; ctx->picture.rle_remaining_len -= buf_size; return 0; } if (buf_size <= 7) return -1; buf_size -= 7; /* Decode rle bitmap length, stored size includes width/height data */ rle_bitmap_len = bytestream_get_be24(&buf) - 2*2; /* Get bitmap dimensions from data */ width = bytestream_get_be16(&buf); height = bytestream_get_be16(&buf); /* Make sure the bitmap is not too large */ if (avctx->width < width || avctx->height < height) { av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger then video.\n"); return -1; } ctx->picture.w = width; ctx->picture.h = height; av_fast_malloc(&ctx->picture.rle, &ctx->picture.rle_buffer_size, rle_bitmap_len); if (!ctx->picture.rle) return -1; memcpy(ctx->picture.rle, buf, buf_size); ctx->picture.rle_data_len = buf_size; ctx->picture.rle_remaining_len = rle_bitmap_len - buf_size; return 0; }
static int pcm_bluray_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *src = avpkt->data; int buf_size = avpkt->size; int num_source_channels, channel, retval; int sample_size, samples, output_size; int16_t *dst16 = data; int32_t *dst32 = data; if (buf_size < 4) { av_log(avctx, AV_LOG_ERROR, "PCM packet too small\n"); return -1; } if (pcm_bluray_parse_header(avctx, src)) return -1; src += 4; buf_size -= 4; /* There's always an even number of channels in the source */ num_source_channels = FFALIGN(avctx->channels, 2); sample_size = (num_source_channels * avctx->bits_per_coded_sample) >> 3; samples = buf_size / sample_size; output_size = samples * avctx->channels * (avctx->sample_fmt == SAMPLE_FMT_S32 ? 4 : 2); if (output_size > *data_size) { av_log(avctx, AV_LOG_ERROR, "Insufficient output buffer space (%d bytes, needed %d bytes)\n", *data_size, output_size); return -1; } *data_size = output_size; if (samples) { switch (avctx->channel_layout) { /* cases with same number of source and coded channels */ case CH_LAYOUT_STEREO: case CH_LAYOUT_4POINT0: case CH_LAYOUT_2_2: samples *= num_source_channels; if (SAMPLE_FMT_S16 == avctx->sample_fmt) { #if HAVE_BIGENDIAN memcpy(dst16, src, output_size); #else do { *dst16++ = bytestream_get_be16(&src); } while (--samples); #endif } else { do { *dst32++ = bytestream_get_be24(&src) << 8; } while (--samples); } break; /* cases where number of source channels = coded channels + 1 */ case CH_LAYOUT_MONO: case CH_LAYOUT_SURROUND: case CH_LAYOUT_2_1: case CH_LAYOUT_5POINT0: if (SAMPLE_FMT_S16 == avctx->sample_fmt) { do { #if HAVE_BIGENDIAN memcpy(dst16, src, avctx->channels * 2); dst16 += avctx->channels; src += sample_size; #else channel = avctx->channels; do { *dst16++ = bytestream_get_be16(&src); } while (--channel); src += 2; #endif } while (--samples); } else { do { channel = avctx->channels; do { *dst32++ = bytestream_get_be24(&src) << 8; } while (--channel); src += 3; } while (--samples); } break; /* remapping: L, R, C, LBack, RBack, LF */ case CH_LAYOUT_5POINT1: if (SAMPLE_FMT_S16 == avctx->sample_fmt) { do { dst16[0] = bytestream_get_be16(&src); dst16[1] = bytestream_get_be16(&src); dst16[2] = bytestream_get_be16(&src); dst16[4] = bytestream_get_be16(&src); dst16[5] = bytestream_get_be16(&src); dst16[3] = bytestream_get_be16(&src); dst16 += 6; } while (--samples); } else { do { dst32[0] = bytestream_get_be24(&src) << 8; dst32[1] = bytestream_get_be24(&src) << 8; dst32[2] = bytestream_get_be24(&src) << 8; dst32[4] = bytestream_get_be24(&src) << 8; dst32[5] = bytestream_get_be24(&src) << 8; dst32[3] = bytestream_get_be24(&src) << 8; dst32 += 6; } while (--samples); } break; /* remapping: L, R, C, LSide, LBack, RBack, RSide, <unused> */ case CH_LAYOUT_7POINT0: if (SAMPLE_FMT_S16 == avctx->sample_fmt) { do { dst16[0] = bytestream_get_be16(&src); dst16[1] = bytestream_get_be16(&src); dst16[2] = bytestream_get_be16(&src); dst16[5] = bytestream_get_be16(&src); dst16[3] = bytestream_get_be16(&src); dst16[4] = bytestream_get_be16(&src); dst16[6] = bytestream_get_be16(&src); dst16 += 7; src += 2; } while (--samples); } else { do { dst32[0] = bytestream_get_be24(&src) << 8; dst32[1] = bytestream_get_be24(&src) << 8; dst32[2] = bytestream_get_be24(&src) << 8; dst32[5] = bytestream_get_be24(&src) << 8; dst32[3] = bytestream_get_be24(&src) << 8; dst32[4] = bytestream_get_be24(&src) << 8; dst32[6] = bytestream_get_be24(&src) << 8; dst32 += 7; src += 3; } while (--samples); } break; /* remapping: L, R, C, LSide, LBack, RBack, RSide, LF */ case CH_LAYOUT_7POINT1: if (SAMPLE_FMT_S16 == avctx->sample_fmt) { do { dst16[0] = bytestream_get_be16(&src); dst16[1] = bytestream_get_be16(&src); dst16[2] = bytestream_get_be16(&src); dst16[6] = bytestream_get_be16(&src); dst16[4] = bytestream_get_be16(&src); dst16[5] = bytestream_get_be16(&src); dst16[7] = bytestream_get_be16(&src); dst16[3] = bytestream_get_be16(&src); dst16 += 8; } while (--samples); } else { do { dst32[0] = bytestream_get_be24(&src) << 8; dst32[1] = bytestream_get_be24(&src) << 8; dst32[2] = bytestream_get_be24(&src) << 8; dst32[6] = bytestream_get_be24(&src) << 8; dst32[4] = bytestream_get_be24(&src) << 8; dst32[5] = bytestream_get_be24(&src) << 8; dst32[7] = bytestream_get_be24(&src) << 8; dst32[3] = bytestream_get_be24(&src) << 8; dst32 += 8; } while (--samples); } break; } } retval = src - avpkt->data; if (avctx->debug & FF_DEBUG_BITSTREAM) dprintf(avctx, "pcm_bluray_decode_frame: decoded %d -> %d bytes\n", retval, *data_size); return retval; }
/** * Cook initialization * * @param avctx pointer to the AVCodecContext */ static av_cold int cook_decode_init(AVCodecContext *avctx) { COOKContext *q = avctx->priv_data; const uint8_t *edata_ptr = avctx->extradata; const uint8_t *edata_ptr_end = edata_ptr + avctx->extradata_size; int extradata_size = avctx->extradata_size; int s = 0; unsigned int channel_mask = 0; int samples_per_frame = 0; int ret; q->avctx = avctx; /* Take care of the codec specific extradata. */ if (extradata_size <= 0) { av_log(avctx, AV_LOG_ERROR, "Necessary extradata missing!\n"); return AVERROR_INVALIDDATA; } av_log(avctx, AV_LOG_DEBUG, "codecdata_length=%d\n", avctx->extradata_size); /* Take data from the AVCodecContext (RM container). */ if (!avctx->channels) { av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n"); return AVERROR_INVALIDDATA; } /* Initialize RNG. */ av_lfg_init(&q->random_state, 0); ff_dsputil_init(&q->dsp, avctx); while (edata_ptr < edata_ptr_end) { /* 8 for mono, 16 for stereo, ? for multichannel Swap to right endianness so we don't need to care later on. */ if (extradata_size >= 8) { q->subpacket[s].cookversion = bytestream_get_be32(&edata_ptr); samples_per_frame = bytestream_get_be16(&edata_ptr); q->subpacket[s].subbands = bytestream_get_be16(&edata_ptr); extradata_size -= 8; } if (extradata_size >= 8) { bytestream_get_be32(&edata_ptr); // Unknown unused q->subpacket[s].js_subband_start = bytestream_get_be16(&edata_ptr); q->subpacket[s].js_vlc_bits = bytestream_get_be16(&edata_ptr); extradata_size -= 8; } /* Initialize extradata related variables. */ q->subpacket[s].samples_per_channel = samples_per_frame / avctx->channels; q->subpacket[s].bits_per_subpacket = avctx->block_align * 8; /* Initialize default data states. */ q->subpacket[s].log2_numvector_size = 5; q->subpacket[s].total_subbands = q->subpacket[s].subbands; q->subpacket[s].num_channels = 1; /* Initialize version-dependent variables */ av_log(avctx, AV_LOG_DEBUG, "subpacket[%i].cookversion=%x\n", s, q->subpacket[s].cookversion); q->subpacket[s].joint_stereo = 0; switch (q->subpacket[s].cookversion) { case MONO: if (avctx->channels != 1) { av_log_ask_for_sample(avctx, "Container channels != 1.\n"); return AVERROR_PATCHWELCOME; } av_log(avctx, AV_LOG_DEBUG, "MONO\n"); break; case STEREO: if (avctx->channels != 1) { q->subpacket[s].bits_per_subpdiv = 1; q->subpacket[s].num_channels = 2; } av_log(avctx, AV_LOG_DEBUG, "STEREO\n"); break; case JOINT_STEREO: if (avctx->channels != 2) { av_log_ask_for_sample(avctx, "Container channels != 2.\n"); return AVERROR_PATCHWELCOME; } av_log(avctx, AV_LOG_DEBUG, "JOINT_STEREO\n"); if (avctx->extradata_size >= 16) { q->subpacket[s].total_subbands = q->subpacket[s].subbands + q->subpacket[s].js_subband_start; q->subpacket[s].joint_stereo = 1; q->subpacket[s].num_channels = 2; } if (q->subpacket[s].samples_per_channel > 256) { q->subpacket[s].log2_numvector_size = 6; } if (q->subpacket[s].samples_per_channel > 512) { q->subpacket[s].log2_numvector_size = 7; } break; case MC_COOK: av_log(avctx, AV_LOG_DEBUG, "MULTI_CHANNEL\n"); if (extradata_size >= 4) channel_mask |= q->subpacket[s].channel_mask = bytestream_get_be32(&edata_ptr); if (av_get_channel_layout_nb_channels(q->subpacket[s].channel_mask) > 1) { q->subpacket[s].total_subbands = q->subpacket[s].subbands + q->subpacket[s].js_subband_start; q->subpacket[s].joint_stereo = 1; q->subpacket[s].num_channels = 2; q->subpacket[s].samples_per_channel = samples_per_frame >> 1; if (q->subpacket[s].samples_per_channel > 256) { q->subpacket[s].log2_numvector_size = 6; } if (q->subpacket[s].samples_per_channel > 512) { q->subpacket[s].log2_numvector_size = 7; } } else q->subpacket[s].samples_per_channel = samples_per_frame; break; default: av_log_ask_for_sample(avctx, "Unknown Cook version.\n"); return AVERROR_PATCHWELCOME; }
static int pcm_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; PCMDecode *s = avctx->priv_data; int sample_size, c, n; short *samples; const uint8_t *src, *src8, *src2[MAX_CHANNELS]; uint8_t *dstu8; int16_t *dst_int16_t; int32_t *dst_int32_t; int64_t *dst_int64_t; uint16_t *dst_uint16_t; uint32_t *dst_uint32_t; samples = data; src = buf; if (avctx->sample_fmt!=avctx->codec->sample_fmts[0]) { av_log(avctx, AV_LOG_ERROR, "invalid sample_fmt\n"); return -1; } if(avctx->channels <= 0 || avctx->channels > MAX_CHANNELS){ av_log(avctx, AV_LOG_ERROR, "PCM channels out of bounds\n"); return -1; } sample_size = av_get_bits_per_sample(avctx->codec_id)/8; /* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */ if (CODEC_ID_PCM_DVD == avctx->codec_id) /* 2 samples are interleaved per block in PCM_DVD */ sample_size = avctx->bits_per_coded_sample * 2 / 8; n = avctx->channels * sample_size; if(n && buf_size % n){ if (buf_size < n) { av_log(avctx, AV_LOG_ERROR, "invalid PCM packet\n"); return -1; }else buf_size -= buf_size % n; } buf_size= FFMIN(buf_size, *data_size/2); *data_size=0; n = buf_size/sample_size; switch(avctx->codec->id) { case CODEC_ID_PCM_U32LE: DECODE(uint32_t, le32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_U32BE: DECODE(uint32_t, be32, src, samples, n, 0, 0x80000000) break; case CODEC_ID_PCM_S24LE: DECODE(int32_t, le24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_S24BE: DECODE(int32_t, be24, src, samples, n, 8, 0) break; case CODEC_ID_PCM_U24LE: DECODE(uint32_t, le24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_U24BE: DECODE(uint32_t, be24, src, samples, n, 8, 0x800000) break; case CODEC_ID_PCM_S24DAUD: for(;n>0;n--) { uint32_t v = bytestream_get_be24(&src); v >>= 4; // sync flags are here *samples++ = av_reverse[(v >> 8) & 0xff] + (av_reverse[v & 0xff] << 8); } break; case CODEC_ID_PCM_S16LE_PLANAR: n /= avctx->channels; for(c=0;c<avctx->channels;c++) src2[c] = &src[c*n*2]; for(;n>0;n--) for(c=0;c<avctx->channels;c++) *samples++ = bytestream_get_le16(&src2[c]); src = src2[avctx->channels-1]; break; case CODEC_ID_PCM_U16LE: DECODE(uint16_t, le16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_U16BE: DECODE(uint16_t, be16, src, samples, n, 0, 0x8000) break; case CODEC_ID_PCM_S8: dstu8= (uint8_t*)samples; for(;n>0;n--) { *dstu8++ = *src++ + 128; } samples= (short*)dstu8; break; #if HAVE_BIGENDIAN case CODEC_ID_PCM_F64LE: DECODE(int64_t, le64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_F32LE: DECODE(int32_t, le32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16LE: DECODE(int16_t, le16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64BE: case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: case CODEC_ID_PCM_S16BE: #else case CODEC_ID_PCM_F64BE: DECODE(int64_t, be64, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F32BE: case CODEC_ID_PCM_S32BE: DECODE(int32_t, be32, src, samples, n, 0, 0) break; case CODEC_ID_PCM_S16BE: DECODE(int16_t, be16, src, samples, n, 0, 0) break; case CODEC_ID_PCM_F64LE: case CODEC_ID_PCM_F32LE: case CODEC_ID_PCM_S32LE: case CODEC_ID_PCM_S16LE: #endif /* HAVE_BIGENDIAN */ case CODEC_ID_PCM_U8: memcpy(samples, src, n*sample_size); src += n*sample_size; samples = (short*)((uint8_t*)data + n*sample_size); break; case CODEC_ID_PCM_ZORK: for(;n>0;n--) { int x= *src++; if(x&128) x-= 128; else x = -x; *samples++ = x << 8; } break; case CODEC_ID_PCM_ALAW: case CODEC_ID_PCM_MULAW: for(;n>0;n--) { *samples++ = s->table[*src++]; } break; case CODEC_ID_PCM_DVD: dst_int32_t = data; n /= avctx->channels; switch (avctx->bits_per_coded_sample) { case 16: while (n--) { c = avctx->channels; while (c--) *dst_int32_t++ = bytestream_get_be16(&src) << 16; } break; case 20: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8 &0xf0) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++ &0x0f) << 12); } src = src8; } break; case 24: while (n--) { c = avctx->channels; src8 = src + 4*c; while (c--) { *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); *dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8); } src = src8; } break; default: av_log(avctx, AV_LOG_ERROR, "PCM DVD unsupported sample depth %i\n", avctx->bits_per_coded_sample); return -1; break; } samples = (short *) dst_int32_t; break; default: return -1; } *data_size = (uint8_t *)samples - (uint8_t *)data; return src - buf; }