コード例 #1
0
ファイル: flacdec.c プロジェクト: ElfSundae/FFmpegAudioTest
void ff_flac_parse_streaminfo(AVCodecContext *avctx, struct FLACStreaminfo *s,
                              const uint8_t *buffer)
{
    GetBitContext gb;
    init_get_bits(&gb, buffer, FLAC_STREAMINFO_SIZE*8);

    skip_bits(&gb, 16); /* skip min blocksize */
    s->max_blocksize = get_bits(&gb, 16);
    if (s->max_blocksize < FLAC_MIN_BLOCKSIZE) {
        av_log(avctx, AV_LOG_WARNING, "invalid max blocksize: %d\n",
               s->max_blocksize);
        s->max_blocksize = 16;
    }

    skip_bits(&gb, 24); /* skip min frame size */
    s->max_framesize = get_bits_long(&gb, 24);

    s->samplerate = get_bits_long(&gb, 20);
    s->channels = get_bits(&gb, 3) + 1;
    s->bps = get_bits(&gb, 5) + 1;

    avctx->channels = s->channels;
    avctx->sample_rate = s->samplerate;
    avctx->bits_per_raw_sample = s->bps;

    s->samples  = get_bits_long(&gb, 32) << 4;
    s->samples |= get_bits(&gb, 4);

    skip_bits_long(&gb, 64); /* md5 sum */
    skip_bits_long(&gb, 64); /* md5 sum */

    dump_headers(avctx, s);
}
コード例 #2
0
static int
flac_header (AVFormatContext *s, int idx)
{
    struct ogg *ogg = s->priv_data;
    struct ogg_stream *os = ogg->streams + idx;
    AVStream *st = s->streams[idx];
    GetBitContext gb;
    FLACStreaminfo si;
    int mdt;

    if (os->buf[os->pstart] == 0xff)
        return 0;

    init_get_bits(&gb, os->buf + os->pstart, os->psize * 8);
    skip_bits1(&gb); /* metadata_last */
    mdt = get_bits(&gb, 7);

    if (mdt == OGG_FLAC_METADATA_TYPE_STREAMINFO)
    {
        uint8_t *streaminfo_start = os->buf + os->pstart + 5 + 4 + 4 + 4;
        skip_bits_long(&gb, 4 * 8); /* "FLAC" */
        if(get_bits(&gb, 8) != 1) /* unsupported major version */
            return -1;
        skip_bits_long(&gb, 8 + 16); /* minor version + header count */
        skip_bits_long(&gb, 4 * 8); /* "fLaC" */

        /* METADATA_BLOCK_HEADER */
        if (get_bits_long(&gb, 32) != FLAC_STREAMINFO_SIZE)
            return -1;

        ff_flac_parse_streaminfo(st->codec, &si, streaminfo_start);

        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id = CODEC_ID_FLAC;

        st->codec->extradata =
            av_malloc(FLAC_STREAMINFO_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
        memcpy(st->codec->extradata, streaminfo_start, FLAC_STREAMINFO_SIZE);
        st->codec->extradata_size = FLAC_STREAMINFO_SIZE;

        av_set_pts_info(st, 64, 1, st->codec->sample_rate);
    }
    else if (mdt == FLAC_METADATA_TYPE_VORBIS_COMMENT)
    {
        ff_vorbis_comment (s, &st->metadata, os->buf + os->pstart + 4, os->psize - 4);
    }

    return 1;
}
コード例 #3
0
ファイル: hevc_sei.c プロジェクト: Emerica/FFmpeg
static int decode_nal_sei_prefix(GetBitContext *gb, void *logctx, HEVCSEI *s,
                                 const HEVCParamSets *ps, int type, int size)
{
    switch (type) {
    case 256:  // Mismatched value from HM 8.1
        return decode_nal_sei_decoded_picture_hash(&s->picture_hash, gb);
    case HEVC_SEI_TYPE_FRAME_PACKING:
        return decode_nal_sei_frame_packing_arrangement(&s->frame_packing, gb);
    case HEVC_SEI_TYPE_DISPLAY_ORIENTATION:
        return decode_nal_sei_display_orientation(&s->display_orientation, gb);
    case HEVC_SEI_TYPE_PICTURE_TIMING:
        return decode_nal_sei_pic_timing(s, gb, ps, logctx, size);
    case HEVC_SEI_TYPE_MASTERING_DISPLAY_INFO:
        return decode_nal_sei_mastering_display_info(&s->mastering_display, gb);
    case HEVC_SEI_TYPE_CONTENT_LIGHT_LEVEL_INFO:
        return decode_nal_sei_content_light_info(&s->content_light, gb);
    case HEVC_SEI_TYPE_ACTIVE_PARAMETER_SETS:
        return decode_nal_sei_active_parameter_sets(s, gb, logctx);
    case HEVC_SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35:
        return decode_nal_sei_user_data_registered_itu_t_t35(s, gb, size);
    case HEVC_SEI_TYPE_ALTERNATIVE_TRANSFER_CHARACTERISTICS:
        return decode_nal_sei_alternative_transfer(&s->alternative_transfer, gb);
    default:
        av_log(logctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
        skip_bits_long(gb, 8 * size);
        return 0;
    }
}
コード例 #4
0
ファイル: hevc_sei.c プロジェクト: Emerica/FFmpeg
static int decode_nal_sei_pic_timing(HEVCSEI *s, GetBitContext *gb, const HEVCParamSets *ps,
                                     void *logctx, int size)
{
    HEVCSEIPictureTiming *h = &s->picture_timing;
    HEVCSPS *sps;

    if (!ps->sps_list[s->active_seq_parameter_set_id])
        return(AVERROR(ENOMEM));
    sps = (HEVCSPS*)ps->sps_list[s->active_seq_parameter_set_id]->data;

    if (sps->vui.frame_field_info_present_flag) {
        int pic_struct = get_bits(gb, 4);
        h->picture_struct = AV_PICTURE_STRUCTURE_UNKNOWN;
        if (pic_struct == 2 || pic_struct == 10 || pic_struct == 12) {
            av_log(logctx, AV_LOG_DEBUG, "BOTTOM Field\n");
            h->picture_struct = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
        } else if (pic_struct == 1 || pic_struct == 9 || pic_struct == 11) {
            av_log(logctx, AV_LOG_DEBUG, "TOP Field\n");
            h->picture_struct = AV_PICTURE_STRUCTURE_TOP_FIELD;
        }
        get_bits(gb, 2);                   // source_scan_type
        get_bits(gb, 1);                   // duplicate_flag
        skip_bits1(gb);
        size--;
    }
    skip_bits_long(gb, 8 * size);

    return 0;
}
コード例 #5
0
void ff_dirac_init_arith_decoder(DiracArith *c, GetBitContext *gb, int length)
{
    int i;
    align_get_bits(gb);

    length = FFMIN(length, get_bits_left(gb)/8);

    c->bytestream     = gb->buffer + get_bits_count(gb)/8;
    c->bytestream_end = c->bytestream + length;
    skip_bits_long(gb, length*8);

    c->low = 0;
    for (i = 0; i < 4; i++) {
        c->low <<= 8;
        if (c->bytestream < c->bytestream_end)
            c->low |= *c->bytestream++;
        else
            c->low |= 0xff;
    }

    c->counter = -16;
    c->range   = 0xffff;

    for (i = 0; i < 256; i++) {
        ff_dirac_prob_branchless[i][0] =  ff_dirac_prob[255-i];
        ff_dirac_prob_branchless[i][1] = -ff_dirac_prob[i];
    }

    for (i = 0; i < DIRAC_CTX_COUNT; i++)
        c->contexts[i] = 0x8000;
}
コード例 #6
0
ファイル: hevc_sei.c プロジェクト: artclarke/humble-video
static int decode_pic_timing(HEVCContext *s, int size)
{
    GetBitContext *gb = &s->HEVClc->gb;
    HEVCSPS *sps;

    if (!s->ps.sps_list[s->active_seq_parameter_set_id])
        return(AVERROR(ENOMEM));
    sps = (HEVCSPS*)s->ps.sps_list[s->active_seq_parameter_set_id]->data;

    if (sps->vui.frame_field_info_present_flag) {
        int pic_struct = get_bits(gb, 4);
        s->picture_struct = AV_PICTURE_STRUCTURE_UNKNOWN;
        if (pic_struct == 2) {
            av_log(s->avctx, AV_LOG_DEBUG, "BOTTOM Field\n");
            s->picture_struct = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
        } else if (pic_struct == 1) {
            av_log(s->avctx, AV_LOG_DEBUG, "TOP Field\n");
            s->picture_struct = AV_PICTURE_STRUCTURE_TOP_FIELD;
        }
        get_bits(gb, 2);                   // source_scan_type
        get_bits(gb, 1);                   // duplicate_flag
        skip_bits1(gb);
        size--;
    }
    skip_bits_long(gb, 8 * size);

    return 0;
}
コード例 #7
0
ファイル: hevc_sei.c プロジェクト: Crawping/chromium_extract
static int decode_nal_sei_prefix(HEVCContext *s, int type, int size)
{
    GetBitContext *gb = &s->HEVClc->gb;

    switch (type) {
    case 256:  // Mismatched value from HM 8.1
        return decode_nal_sei_decoded_picture_hash(s);
    case SEI_TYPE_FRAME_PACKING:
        return decode_nal_sei_frame_packing_arrangement(s);
    case SEI_TYPE_DISPLAY_ORIENTATION:
        return decode_nal_sei_display_orientation(s);
    case SEI_TYPE_PICTURE_TIMING:
        {
            int ret = decode_pic_timing(s);
            av_log(s->avctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
            skip_bits(gb, 8 * size);
            return ret;
        }
    case SEI_TYPE_ACTIVE_PARAMETER_SETS:
        active_parameter_sets(s);
        av_log(s->avctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
        return 0;
    case SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35:
        return decode_nal_sei_user_data_registered_itu_t_t35(s, size);
    default:
        av_log(s->avctx, AV_LOG_DEBUG, "Skipped PREFIX SEI %d\n", type);
        skip_bits_long(gb, 8 * size);
        return 0;
    }
}
コード例 #8
0
ファイル: hevc_sei.c プロジェクト: Crawping/chromium_extract
static int decode_nal_sei_user_data_registered_itu_t_t35(HEVCContext *s, int size)
{
    uint32_t country_code;
    uint32_t user_identifier;

    GetBitContext *gb = &s->HEVClc->gb;

    if (size < 7)
        return AVERROR(EINVAL);
    size -= 7;

    country_code = get_bits(gb, 8);
    if (country_code == 0xFF) {
        skip_bits(gb, 8);
        size--;
    }

    skip_bits(gb, 8);
    skip_bits(gb, 8);

    user_identifier = get_bits_long(gb, 32);

    switch (user_identifier) {
        case MKBETAG('G', 'A', '9', '4'):
            return decode_registered_user_data_closed_caption(s, size);
        default:
            skip_bits_long(gb, size * 8);
            break;
    }
    return 0;
}
コード例 #9
0
static int dca_parse_params(const uint8_t *buf, int buf_size, int *duration,
                            int *sample_rate, int *framesize)
{
    GetBitContext gb;
    uint8_t hdr[12 + AV_INPUT_BUFFER_PADDING_SIZE] = { 0 };
    int ret, sample_blocks, sr_code;

    if (buf_size < 12)
        return AVERROR_INVALIDDATA;

    if ((ret = avpriv_dca_convert_bitstream(buf, 12, hdr, 12)) < 0)
        return ret;

    init_get_bits(&gb, hdr, 96);

    skip_bits_long(&gb, 39);
    sample_blocks = get_bits(&gb, 7) + 1;
    if (sample_blocks < 8)
        return AVERROR_INVALIDDATA;
    *duration = 256 * (sample_blocks / 8);

    *framesize = get_bits(&gb, 14) + 1;
    if (*framesize < 95)
        return AVERROR_INVALIDDATA;

    skip_bits(&gb, 6);
    sr_code      = get_bits(&gb, 4);
    *sample_rate = avpriv_dca_sample_rates[sr_code];
    if (*sample_rate == 0)
        return AVERROR_INVALIDDATA;

    return 0;
}
コード例 #10
0
ファイル: aacdec.c プロジェクト: hookbrother/FFmpeg
static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
        GetBitContext *gb, int asclen)
{
    AACContext *ac        = &latmctx->aac_ctx;
    AVCodecContext *avctx = ac->avctx;
    MPEG4AudioConfig m4ac = { 0 };
    int config_start_bit  = get_bits_count(gb);
    int sync_extension    = 0;
    int bits_consumed, esize;

    if (asclen) {
        sync_extension = 1;
        asclen         = FFMIN(asclen, get_bits_left(gb));
    } else
        asclen         = get_bits_left(gb);

    if (config_start_bit % 8) {
        avpriv_request_sample(latmctx->aac_ctx.avctx,
                              "Non-byte-aligned audio-specific config");
        return AVERROR_PATCHWELCOME;
    }
    if (asclen <= 0)
        return AVERROR_INVALIDDATA;
    bits_consumed = decode_audio_specific_config(NULL, avctx, &m4ac,
                    gb->buffer + (config_start_bit / 8),
                    asclen, sync_extension);

    if (bits_consumed < 0)
        return AVERROR_INVALIDDATA;

    if (!latmctx->initialized ||
            ac->oc[1].m4ac.sample_rate != m4ac.sample_rate ||
            ac->oc[1].m4ac.chan_config != m4ac.chan_config) {

        if(latmctx->initialized) {
            av_log(avctx, AV_LOG_INFO, "audio config changed\n");
        } else {
            av_log(avctx, AV_LOG_DEBUG, "initializing latmctx\n");
        }
        latmctx->initialized = 0;

        esize = (bits_consumed+7) / 8;

        if (avctx->extradata_size < esize) {
            av_free(avctx->extradata);
            avctx->extradata = av_malloc(esize + AV_INPUT_BUFFER_PADDING_SIZE);
            if (!avctx->extradata)
                return AVERROR(ENOMEM);
        }

        avctx->extradata_size = esize;
        memcpy(avctx->extradata, gb->buffer + (config_start_bit/8), esize);
        memset(avctx->extradata+esize, 0, AV_INPUT_BUFFER_PADDING_SIZE);
    }
    skip_bits_long(gb, bits_consumed);

    return bits_consumed;
}
コード例 #11
0
ファイル: dca_exss.c プロジェクト: BalintBanyasz/FFmpeg
/**
 * Skip mixing coefficients of a single mix out configuration (HD)
 */
static void dca_exss_skip_mix_coeffs(GetBitContext *gb, int channels, int out_ch)
{
    int i;

    for (i = 0; i < channels; i++) {
        int mix_map_mask = get_bits(gb, out_ch);
        int num_coeffs = av_popcount(mix_map_mask);
        skip_bits_long(gb, num_coeffs * 6);
    }
}
コード例 #12
0
ファイル: nellymoserdec.c プロジェクト: baoping/FFmpeg
static void nelly_decode_block(NellyMoserDecodeContext *s,
                               const unsigned char block[NELLY_BLOCK_LEN],
                               float audio[NELLY_SAMPLES])
{
    int i,j;
    float buf[NELLY_FILL_LEN], pows[NELLY_FILL_LEN];
    float *aptr, *bptr, *pptr, val, pval;
    int bits[NELLY_BUF_LEN];
    unsigned char v;

    init_get_bits(&s->gb, block, NELLY_BLOCK_LEN * 8);

    bptr = buf;
    pptr = pows;
    val = ff_nelly_init_table[get_bits(&s->gb, 6)];
    for (i=0 ; i<NELLY_BANDS ; i++) {
        if (i > 0)
            val += ff_nelly_delta_table[get_bits(&s->gb, 5)];
        pval = -pow(2, val/2048) * s->scale_bias;
        for (j = 0; j < ff_nelly_band_sizes_table[i]; j++) {
            *bptr++ = val;
            *pptr++ = pval;
        }

    }

    ff_nelly_get_sample_bits(buf, bits);

    for (i = 0; i < 2; i++) {
        aptr = audio + i * NELLY_BUF_LEN;

        init_get_bits(&s->gb, block, NELLY_BLOCK_LEN * 8);
        skip_bits_long(&s->gb, NELLY_HEADER_BITS + i*NELLY_DETAIL_BITS);

        for (j = 0; j < NELLY_FILL_LEN; j++) {
            if (bits[j] <= 0) {
                aptr[j] = M_SQRT1_2*pows[j];
                if (av_lfg_get(&s->random_state) & 1)
                    aptr[j] *= -1.0;
            } else {
                v = get_bits(&s->gb, bits[j]);
                aptr[j] = ff_nelly_dequantization_table[(1<<bits[j])-1+v]*pows[j];
            }
        }
        memset(&aptr[NELLY_FILL_LEN], 0,
               (NELLY_BUF_LEN - NELLY_FILL_LEN) * sizeof(float));

        s->imdct_ctx.imdct_calc(&s->imdct_ctx, s->imdct_out, aptr);
        /* XXX: overlapping and windowing should be part of a more
           generic imdct function */
        s->dsp.vector_fmul_reverse(s->state, s->state, ff_sine_128, NELLY_BUF_LEN);
        s->dsp.vector_fmul_add(aptr, s->imdct_out, ff_sine_128, s->state, NELLY_BUF_LEN);
        memcpy(s->state, s->imdct_out + NELLY_BUF_LEN, sizeof(float)*NELLY_BUF_LEN);
    }
}
コード例 #13
0
ファイル: hevc_sei.c プロジェクト: Emerica/FFmpeg
static int decode_nal_sei_suffix(GetBitContext *gb, void *logctx, HEVCSEI *s,
                                 int type, int size)
{
    switch (type) {
    case HEVC_SEI_TYPE_DECODED_PICTURE_HASH:
        return decode_nal_sei_decoded_picture_hash(&s->picture_hash, gb);
    default:
        av_log(logctx, AV_LOG_DEBUG, "Skipped SUFFIX SEI %d\n", type);
        skip_bits_long(gb, 8 * size);
        return 0;
    }
}
コード例 #14
0
ファイル: hevc_sei.c プロジェクト: Crawping/chromium_extract
static int decode_nal_sei_suffix(HEVCContext *s, int type, int size)
{
    GetBitContext *gb = &s->HEVClc->gb;

    switch (type) {
    case SEI_TYPE_DECODED_PICTURE_HASH:
        return decode_nal_sei_decoded_picture_hash(s);
    default:
        av_log(s->avctx, AV_LOG_DEBUG, "Skipped SUFFIX SEI %d\n", type);
        skip_bits_long(gb, 8 * size);
        return 0;
    }
}
コード例 #15
0
ファイル: mpeg4audio.c プロジェクト: ericma2014/IvyVideo
int ff_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size)
{
    GetBitContext gb;
    int specific_config_bitindex;

    init_get_bits(&gb, buf, buf_size*8);
    c->object_type = get_object_type(&gb);
    c->sample_rate = get_sample_rate(&gb, &c->sampling_index);
    c->chan_config = get_bits(&gb, 4);
    if (c->chan_config < FF_ARRAY_ELEMS(ff_mpeg4audio_channels))
        c->channels = ff_mpeg4audio_channels[c->chan_config];
    c->sbr = -1;
    if (c->object_type == AOT_SBR) {
        c->ext_object_type = c->object_type;
        c->sbr = 1;
        c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index);
        c->object_type = get_object_type(&gb);
        if (c->object_type == AOT_ER_BSAC)
            c->ext_chan_config = get_bits(&gb, 4);
    } else {
        c->ext_object_type = AOT_NULL;
        c->ext_sample_rate = 0;
    }
    specific_config_bitindex = get_bits_count(&gb);

    if (c->object_type == AOT_ALS) {
        skip_bits(&gb, 5);
        if (show_bits_long(&gb, 24) != MKBETAG('\0','A','L','S'))
            skip_bits_long(&gb, 24);

        specific_config_bitindex = get_bits_count(&gb);

        if (parse_config_ALS(&gb, c))
            return -1;
    }

    if (c->ext_object_type != AOT_SBR) {
        int bits_left = buf_size*8 - get_bits_count(&gb);
        for (; bits_left > 15; bits_left--) {
            if (show_bits(&gb, 11) == 0x2b7) { // sync extension
                get_bits(&gb, 11);
                c->ext_object_type = get_object_type(&gb);
                if (c->ext_object_type == AOT_SBR && (c->sbr = get_bits1(&gb)) == 1)
                    c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index);
                break;
            } else
                get_bits1(&gb); // skip 1 bit
        }
    }
    return specific_config_bitindex;
}
コード例 #16
0
ファイル: binkaudio.c プロジェクト: OS2World/LIB-libav
static int decode_frame(AVCodecContext *avctx, void *data,
                        int *got_frame_ptr, AVPacket *avpkt)
{
    BinkAudioContext *s = avctx->priv_data;
    AVFrame *frame      = data;
    GetBitContext *gb = &s->gb;
    int ret, consumed = 0;

    if (!get_bits_left(gb)) {
        uint8_t *buf;
        /* handle end-of-stream */
        if (!avpkt->size) {
            *got_frame_ptr = 0;
            return 0;
        }
        if (avpkt->size < 4) {
            av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
            return AVERROR_INVALIDDATA;
        }
        buf = av_realloc(s->packet_buffer, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!buf)
            return AVERROR(ENOMEM);
        s->packet_buffer = buf;
        memcpy(s->packet_buffer, avpkt->data, avpkt->size);
        init_get_bits(gb, s->packet_buffer, avpkt->size * 8);
        consumed = avpkt->size;

        /* skip reported size */
        skip_bits_long(gb, 32);
    }

    /* get output buffer */
    frame->nb_samples = s->frame_len;
    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    if (decode_block(s, (float **)frame->extended_data,
                     avctx->codec->id == AV_CODEC_ID_BINKAUDIO_DCT)) {
        av_log(avctx, AV_LOG_ERROR, "Incomplete packet\n");
        return AVERROR_INVALIDDATA;
    }
    get_bits_align32(gb);

    frame->nb_samples = s->block_size / avctx->channels;
    *got_frame_ptr    = 1;

    return consumed;
}
コード例 #17
0
ファイル: adtsenc.c プロジェクト: VFR-maniac/libav
static int adts_decode_extradata(AVFormatContext *s, ADTSContext *adts, uint8_t *buf, int size)
{
    GetBitContext gb;
    PutBitContext pb;
    MPEG4AudioConfig m4ac;
    int off;

    init_get_bits(&gb, buf, size * 8);
    off = avpriv_mpeg4audio_get_config(&m4ac, buf, size * 8, 1);
    if (off < 0)
        return off;
    skip_bits_long(&gb, off);
    adts->objecttype        = m4ac.object_type - 1;
    adts->sample_rate_index = m4ac.sampling_index;
    adts->channel_conf      = m4ac.chan_config;

    if (adts->objecttype > 3U) {
        av_log(s, AV_LOG_ERROR, "MPEG-4 AOT %d is not allowed in ADTS\n", adts->objecttype+1);
        return AVERROR_INVALIDDATA;
    }
    if (adts->sample_rate_index == 15) {
        av_log(s, AV_LOG_ERROR, "Escape sample rate index illegal in ADTS\n");
        return AVERROR_INVALIDDATA;
    }
    if (get_bits(&gb, 1)) {
        av_log(s, AV_LOG_ERROR, "960/120 MDCT window is not allowed in ADTS\n");
        return AVERROR_INVALIDDATA;
    }
    if (get_bits(&gb, 1)) {
        av_log(s, AV_LOG_ERROR, "Scalable configurations are not allowed in ADTS\n");
        return AVERROR_INVALIDDATA;
    }
    if (get_bits(&gb, 1)) {
        av_log(s, AV_LOG_ERROR, "Extension flag is not allowed in ADTS\n");
        return AVERROR_INVALIDDATA;
    }
    if (!adts->channel_conf) {
        init_put_bits(&pb, adts->pce_data, MAX_PCE_SIZE);

        put_bits(&pb, 3, 5); //ID_PCE
        adts->pce_size = (avpriv_copy_pce_data(&pb, &gb) + 3) / 8;
        flush_put_bits(&pb);
    }

    adts->write_adts = 1;

    return 0;
}
コード例 #18
0
ファイル: mpeg4audio.c プロジェクト: 248668342/ffmpeg-windows
/**
 * Parse MPEG-4 audio configuration for ALS object type.
 * @param[in] gb       bit reader context
 * @param[in] c        MPEG4AudioConfig structure to fill
 * @return on success 0 is returned, otherwise a value < 0
 */
static int parse_config_ALS(GetBitContext *gb, MPEG4AudioConfig *c)
{
    if (get_bits_left(gb) < 112)
        return -1;

    if (get_bits_long(gb, 32) != MKBETAG('A','L','S','\0'))
        return -1;

    // override AudioSpecificConfig channel configuration and sample rate
    // which are buggy in old ALS conformance files
    c->sample_rate = get_bits_long(gb, 32);

    // skip number of samples
    skip_bits_long(gb, 32);

    // read number of channels
    c->chan_config = 0;
    c->channels    = get_bits(gb, 16) + 1;

    return 0;
}
コード例 #19
0
static int latm_decode_extradata(LATMContext *ctx, uint8_t *buf, int size)
{
    GetBitContext gb;
    MPEG4AudioConfig m4ac;

    init_get_bits(&gb, buf, size * 8);
    ctx->off = avpriv_mpeg4audio_get_config(&m4ac, buf, size);
    if (ctx->off < 0)
        return ctx->off;
    skip_bits_long(&gb, ctx->off);

    /* FIXME: are any formats not allowed in LATM? */

    if (m4ac.object_type > AOT_SBR && m4ac.object_type != AOT_ALS) {
        av_log(ctx, AV_LOG_ERROR, "Muxing MPEG-4 AOT %d in LATM is not supported\n", m4ac.object_type);
        return AVERROR_INVALIDDATA;
    }
    ctx->channel_conf = m4ac.chan_config;
    ctx->object_type  = m4ac.object_type;

    return 0;
}
コード例 #20
0
ファイル: h264_sei.c プロジェクト: GeniusBo/FFmpeg-1
static int decode_frame_packing(H264Context *h, int size) {
    int bits = get_bits_left(&h->gb);

    h->sei_fpa.frame_packing_arrangement_id          = get_ue_golomb(&h->gb);
    h->sei_fpa.frame_packing_arrangement_cancel_flag = get_bits(&h->gb, 1);
    if (!h->sei_fpa.frame_packing_arrangement_cancel_flag) {
        h->sei_fpa.frame_packing_arrangement_type  = get_bits(&h->gb, 7);
        h->sei_fpa.quincunx_sampling_flag          = get_bits(&h->gb, 1);
        h->sei_fpa.content_interpretation_type     = get_bits(&h->gb, 6);
        skip_bits(&h->gb, 1); /* spatial_flipping_flag */
        skip_bits(&h->gb, 1); /* frame0_flipped_flag */
        skip_bits(&h->gb, 1); /* field_views_flag */
        skip_bits(&h->gb, 1); /* current_frame_is_frame0_flag */
        skip_bits(&h->gb, 1); /* frame0_self_contained_flag */
        skip_bits(&h->gb, 1); /* frame1_self_contained_flag */
        if (!h->sei_fpa.quincunx_sampling_flag && h->sei_fpa.frame_packing_arrangement_type != 5) {
            skip_bits(&h->gb, 4); /* frame0_grid_position_x */
            skip_bits(&h->gb, 4); /* frame0_grid_position_y */
            skip_bits(&h->gb, 4); /* frame1_grid_position_x */
            skip_bits(&h->gb, 4); /* frame1_grid_position_y */
        }
        skip_bits(&h->gb, 8); /* frame_packing_arrangement_reserved_byte */
        h->sei_fpa.frame_packing_arrangement_repetition_period = get_ue_golomb(&h->gb) /* frame_packing_arrangement_repetition_period */;
    }
    skip_bits(&h->gb, 1); /* frame_packing_arrangement_extension_flag */

    if (h->avctx->debug & FF_DEBUG_PICT_INFO)
        av_log(h->avctx, AV_LOG_DEBUG, "SEI FPA %d %d %d %d %d %d\n",
                                       h->sei_fpa.frame_packing_arrangement_id,
                                       h->sei_fpa.frame_packing_arrangement_cancel_flag,
                                       h->sei_fpa.frame_packing_arrangement_type,
                                       h->sei_fpa.quincunx_sampling_flag,
                                       h->sei_fpa.content_interpretation_type,
                                       h->sei_fpa.frame_packing_arrangement_repetition_period);
    skip_bits_long(&h->gb, 8 * size - (bits - get_bits_left(&h->gb)));
    return 0;
}
コード例 #21
0
ファイル: h263dec.c プロジェクト: chance1254/libav-android
static int decode_slice(MpegEncContext *s){
    const int part_mask= s->partitioned_frame ? (ER_AC_END|ER_AC_ERROR) : 0x7F;
    const int mb_size= 16>>s->avctx->lowres;
    s->last_resync_gb= s->gb;
    s->first_slice_line= 1;

    s->resync_mb_x= s->mb_x;
    s->resync_mb_y= s->mb_y;

    ff_set_qscale(s, s->qscale);

    if (s->avctx->hwaccel) {
        const uint8_t *start= s->gb.buffer + get_bits_count(&s->gb)/8;
        const uint8_t *end  = ff_h263_find_resync_marker(start + 1, s->gb.buffer_end);
        skip_bits_long(&s->gb, 8*(end - start));
        return s->avctx->hwaccel->decode_slice(s->avctx, start, end - start);
    }

    if(s->partitioned_frame){
        const int qscale= s->qscale;

        if(CONFIG_MPEG4_DECODER && s->codec_id==CODEC_ID_MPEG4){
            if(ff_mpeg4_decode_partitions(s) < 0)
                return -1;
        }

        /* restore variables which were modified */
        s->first_slice_line=1;
        s->mb_x= s->resync_mb_x;
        s->mb_y= s->resync_mb_y;
        ff_set_qscale(s, qscale);
    }

    for(; s->mb_y < s->mb_height; s->mb_y++) {
        /* per-row end of slice checks */
        if(s->msmpeg4_version){
            if(s->resync_mb_y + s->slice_height == s->mb_y){
                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END);

                return 0;
            }
        }

        if(s->msmpeg4_version==1){
            s->last_dc[0]=
            s->last_dc[1]=
            s->last_dc[2]= 128;
        }

        ff_init_block_index(s);
        for(; s->mb_x < s->mb_width; s->mb_x++) {
            int ret;

            ff_update_block_index(s);

            if(s->resync_mb_x == s->mb_x && s->resync_mb_y+1 == s->mb_y){
                s->first_slice_line=0;
            }

            /* DCT & quantize */

            s->mv_dir = MV_DIR_FORWARD;
            s->mv_type = MV_TYPE_16X16;
//            s->mb_skipped = 0;
//printf("%d %d %06X\n", ret, get_bits_count(&s->gb), show_bits(&s->gb, 24));
            ret= s->decode_mb(s, s->block);

            if (s->pict_type!=AV_PICTURE_TYPE_B)
                ff_h263_update_motion_val(s);

            if(ret<0){
                const int xy= s->mb_x + s->mb_y*s->mb_stride;
                if(ret==SLICE_END){
                    MPV_decode_mb(s, s->block);
                    if(s->loop_filter)
                        ff_h263_loop_filter(s);

//printf("%d %d %d %06X\n", s->mb_x, s->mb_y, s->gb.size*8 - get_bits_count(&s->gb), show_bits(&s->gb, 24));
                    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_END&part_mask);

                    s->padding_bug_score--;

                    if(++s->mb_x >= s->mb_width){
                        s->mb_x=0;
                        ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
                        MPV_report_decode_progress(s);
                        s->mb_y++;
                    }
                    return 0;
                }else if(ret==SLICE_NOEND){
                    av_log(s->avctx, AV_LOG_ERROR, "Slice mismatch at MB: %d\n", xy);
                    ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x+1, s->mb_y, ER_MB_END&part_mask);
                    return -1;
                }
                av_log(s->avctx, AV_LOG_ERROR, "Error at MB: %d\n", xy);
                ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR&part_mask);

                return -1;
            }

            MPV_decode_mb(s, s->block);
            if(s->loop_filter)
                ff_h263_loop_filter(s);
        }

        ff_draw_horiz_band(s, s->mb_y*mb_size, mb_size);
        MPV_report_decode_progress(s);

        s->mb_x= 0;
    }

    assert(s->mb_x==0 && s->mb_y==s->mb_height);

    if(s->codec_id==CODEC_ID_MPEG4
       && (s->workaround_bugs&FF_BUG_AUTODETECT)
       && get_bits_left(&s->gb) >= 48
       && show_bits(&s->gb, 24)==0x4010
       && !s->data_partitioning)
        s->padding_bug_score+=32;

    /* try to detect the padding bug */
    if(      s->codec_id==CODEC_ID_MPEG4
       &&   (s->workaround_bugs&FF_BUG_AUTODETECT)
       &&    get_bits_left(&s->gb) >=0
       &&    get_bits_left(&s->gb) < 48
//       &&   !s->resync_marker
       &&   !s->data_partitioning){

        const int bits_count= get_bits_count(&s->gb);
        const int bits_left = s->gb.size_in_bits - bits_count;

        if(bits_left==0){
            s->padding_bug_score+=16;
        } else if(bits_left != 1){
            int v= show_bits(&s->gb, 8);
            v|= 0x7F >> (7-(bits_count&7));

            if(v==0x7F && bits_left<=8)
                s->padding_bug_score--;
            else if(v==0x7F && ((get_bits_count(&s->gb)+8)&8) && bits_left<=16)
                s->padding_bug_score+= 4;
            else
                s->padding_bug_score++;
        }
    }
コード例 #22
0
ファイル: aacdec.c プロジェクト: hookbrother/FFmpeg
static int read_stream_mux_config(struct LATMContext *latmctx,
                                  GetBitContext *gb)
{
    int ret, audio_mux_version = get_bits(gb, 1);

    latmctx->audio_mux_version_A = 0;
    if (audio_mux_version)
        latmctx->audio_mux_version_A = get_bits(gb, 1);

    if (!latmctx->audio_mux_version_A) {

        if (audio_mux_version)
            latm_get_value(gb);                 // taraFullness

        skip_bits(gb, 1);                       // allStreamSameTimeFraming
        skip_bits(gb, 6);                       // numSubFrames
        // numPrograms
        if (get_bits(gb, 4)) {                  // numPrograms
            avpriv_request_sample(latmctx->aac_ctx.avctx, "Multiple programs");
            return AVERROR_PATCHWELCOME;
        }

        // for each program (which there is only one in DVB)

        // for each layer (which there is only one in DVB)
        if (get_bits(gb, 3)) {                   // numLayer
            avpriv_request_sample(latmctx->aac_ctx.avctx, "Multiple layers");
            return AVERROR_PATCHWELCOME;
        }

        // for all but first stream: use_same_config = get_bits(gb, 1);
        if (!audio_mux_version) {
            if ((ret = latm_decode_audio_specific_config(latmctx, gb, 0)) < 0)
                return ret;
        } else {
            int ascLen = latm_get_value(gb);
            if ((ret = latm_decode_audio_specific_config(latmctx, gb, ascLen)) < 0)
                return ret;
            ascLen -= ret;
            skip_bits_long(gb, ascLen);
        }

        latmctx->frame_length_type = get_bits(gb, 3);
        switch (latmctx->frame_length_type) {
        case 0:
            skip_bits(gb, 8);       // latmBufferFullness
            break;
        case 1:
            latmctx->frame_length = get_bits(gb, 9);
            break;
        case 3:
        case 4:
        case 5:
            skip_bits(gb, 6);       // CELP frame length table index
            break;
        case 6:
        case 7:
            skip_bits(gb, 1);       // HVXC frame length table index
            break;
        }

        if (get_bits(gb, 1)) {                  // other data
            if (audio_mux_version) {
                latm_get_value(gb);             // other_data_bits
            } else {
                int esc;
                do {
                    esc = get_bits(gb, 1);
                    skip_bits(gb, 8);
                } while (esc);
            }
        }

        if (get_bits(gb, 1))                     // crc present
            skip_bits(gb, 8);                    // config_crc
    }

    return 0;
}
コード例 #23
0
ファイル: aacdec.c プロジェクト: DeHackEd/FFmpeg
static int latm_decode_audio_specific_config(struct LATMContext *latmctx,
                                             GetBitContext *gb, int asclen)
{
    AACContext *ac        = &latmctx->aac_ctx;
    AVCodecContext *avctx = ac->avctx;
    MPEG4AudioConfig m4ac = { 0 };
    GetBitContext gbc;
    int config_start_bit  = get_bits_count(gb);
    int sync_extension    = 0;
    int bits_consumed, esize, i;

    if (asclen > 0) {
        sync_extension = 1;
        asclen         = FFMIN(asclen, get_bits_left(gb));
        init_get_bits(&gbc, gb->buffer, config_start_bit + asclen);
        skip_bits_long(&gbc, config_start_bit);
    } else if (asclen == 0) {
        gbc = *gb;
    } else {
        return AVERROR_INVALIDDATA;
    }

    if (get_bits_left(gb) <= 0)
        return AVERROR_INVALIDDATA;

    bits_consumed = decode_audio_specific_config_gb(NULL, avctx, &m4ac,
                                                    &gbc, config_start_bit,
                                                    sync_extension);

    if (bits_consumed < config_start_bit)
        return AVERROR_INVALIDDATA;
    bits_consumed -= config_start_bit;

    if (asclen == 0)
      asclen = bits_consumed;

    if (!latmctx->initialized ||
        ac->oc[1].m4ac.sample_rate != m4ac.sample_rate ||
        ac->oc[1].m4ac.chan_config != m4ac.chan_config) {

        if (latmctx->initialized) {
            av_log(avctx, AV_LOG_INFO, "audio config changed (sample_rate=%d, chan_config=%d)\n", m4ac.sample_rate, m4ac.chan_config);
        } else {
            av_log(avctx, AV_LOG_DEBUG, "initializing latmctx\n");
        }
        latmctx->initialized = 0;

        esize = (asclen + 7) / 8;

        if (avctx->extradata_size < esize) {
            av_free(avctx->extradata);
            avctx->extradata = av_malloc(esize + AV_INPUT_BUFFER_PADDING_SIZE);
            if (!avctx->extradata)
                return AVERROR(ENOMEM);
        }

        avctx->extradata_size = esize;
        gbc = *gb;
        for (i = 0; i < esize; i++) {
          avctx->extradata[i] = get_bits(&gbc, 8);
        }
        memset(avctx->extradata+esize, 0, AV_INPUT_BUFFER_PADDING_SIZE);
    }
    skip_bits_long(gb, asclen);

    return 0;
}
コード例 #24
0
ファイル: dts.cpp プロジェクト: bestustc/LAVFilters
int parse_dts_header(DTSParserContext *pContext, DTSHeader *pHeader, uint8_t *pBuffer, unsigned uSize)
{
  if(!pContext) return -1;
  if(!pHeader) return -1;

  unsigned ExtDescriptor = 0, ExtCoding = 0;

  uint8_t dts_buffer[32 + FF_INPUT_BUFFER_PADDING_SIZE] = {0};
  int ret = ff_dca_convert_bitstream(pBuffer, uSize, dts_buffer, 32);

  bool is16be = (AV_RB32(pBuffer) == DCA_MARKER_RAW_BE);

  /* Parse Core Header */
  if (ret >= 0) {
    pHeader->HasCore = 1;

    GetBitContext *gb = pContext->gb;
    init_get_bits(gb, dts_buffer, 32 << 3);

    skip_bits_long(gb, 32);                             /* Sync code */
    skip_bits1(gb);                                     /* Frame type */
    pHeader->SamplesPerBlock  = get_bits(gb, 5) + 1;    /* Samples deficit */
    pHeader->CRCPresent       = get_bits1(gb);          /* CRC present */
    pHeader->Blocks           = get_bits(gb, 7) + 1;    /* Number of Blocks */
    pHeader->FrameSize        = get_bits(gb, 14) + 1;   /* Primary (core) Frame Size */
    pHeader->ChannelLayout    = get_bits(gb, 6);        /* Channel configuration */
    unsigned sample_index     = get_bits(gb, 4);        /* Sample frequency index */
    pHeader->SampleRate       = avpriv_dca_sample_rates[sample_index];
    unsigned bitrate_index    = get_bits(gb, 5);        /* Bitrate index */
    pHeader->Bitrate          = dca_bit_rates[bitrate_index];
    skip_bits1(gb);                                     /* Down mix */
    skip_bits1(gb);                                     /* Dynamic range */
    skip_bits1(gb);                                     /* Time stamp */
    skip_bits1(gb);                                     /* Auxiliary data */
    skip_bits1(gb);                                     /* HDCD */
    ExtDescriptor             = get_bits(gb, 3);        /* External descriptor  */
    ExtCoding                 = get_bits1(gb);          /* Extended coding */
    skip_bits1(gb);                                     /* ASPF */
    pHeader->LFE              = get_bits(gb, 2);        /* LFE */
    skip_bits1(gb);                                     /* Predictor History */
    if(pHeader->CRCPresent)
      skip_bits(gb, 16);                                /* CRC */
    skip_bits1(gb);                                     /* Multirate Interpolator */
    skip_bits(gb, 4);                                   /* Encoder Software Revision */
    skip_bits(gb, 2);                                   /* Copy history */
    pHeader->ES = get_bits1(gb);                        /* ES */
    skip_bits(gb, 2);                                   /* PCMR (source PCM resolution) */
    skip_bits1(gb);                                     /* SUMF (Front Sum/Difference Flag) */
    skip_bits1(gb);                                     /* SUMS (Surround Sum/Difference Flag) */
    skip_bits(gb, 4);                                   /* Dialog Normalization Parameter or Unspecified (dependent on encoder version) */

    // Check some basic validity
    if (uSize < pHeader->FrameSize)
      return -1;
  } else {
    pHeader->HasCore = 0;
  }

  if (pHeader->HasCore && !is16be)
    return 0;

  // DTS-HD parsing
  const uint8_t *pHD = nullptr;
  if (pHeader->HasCore) { // If we have a core, only search after the normal buffer
    if (uSize > (pHeader->FrameSize + 4)) { // at least 4 bytes extra, could probably insert a minimal size of a HD header, but so what
      pHD = find_marker32_position(pBuffer + pHeader->FrameSize, uSize - pHeader->FrameSize, DCA_HD_MARKER);
    }
  } else {
    pHD = find_marker32_position(pBuffer, uSize, DCA_HD_MARKER);
  }
  if (pHD) {
    pHeader->IsHD = 1;
    size_t remaining = uSize - (pHD - pBuffer);
    parse_dts_hd_header(pContext, pHeader, pHD, (unsigned)remaining);

    const uint8_t *pXChHD = find_marker32_position(pHD, remaining, DCA_XCH_MARKER);
    if (pXChHD) {
      size_t remaining = uSize - (pXChHD - pBuffer);
      parse_dts_xch_hd_header(pContext, pHeader, pXChHD, (unsigned)remaining);
    }

    const uint8_t *pXXChHD = find_marker32_position(pHD, remaining, DCA_XXCH_MARKER);
    if (pXXChHD) {
      size_t remaining = uSize - (pXXChHD - pBuffer);
      parse_dts_xxch_hd_header(pContext, pHeader, pXXChHD, (unsigned)remaining);
    }
  }

  // Handle DTS extensions
  if (ExtCoding) {
    size_t coreSize = pHD ? (pHD - pBuffer) : uSize;
    if (ExtDescriptor == 0 || ExtDescriptor == 3) {
      const uint8_t *pXCh = find_marker32_position(pBuffer, coreSize, DCA_XCH_MARKER);
      if (pXCh) {
        size_t remaining = coreSize - (pXCh - pBuffer);
        parse_dts_xch_header(pContext, pHeader, pXCh, (unsigned)remaining);
      }
    }
    if (ExtDescriptor == 6) {
      const uint8_t *pXXCh = find_marker32_position(pBuffer, coreSize, DCA_XXCH_MARKER);
      if (pXXCh) {
        size_t remaining = coreSize - (pXXCh - pBuffer);
        parse_dts_xxch_header(pContext, pHeader, pXXCh, (unsigned)remaining);
      }
    }
  }

  return 0;
}
コード例 #25
0
ファイル: h264_sei.c プロジェクト: CoerSuer/FFmpeg
int ff_h264_decode_sei(H264Context *h)
{
    while (get_bits_left(&h->gb) > 16) {
        int type = 0;
        unsigned size = 0;
        unsigned next;
        int ret  = 0;

        do {
            if (get_bits_left(&h->gb) < 8)
                return AVERROR_INVALIDDATA;
            type += show_bits(&h->gb, 8);
        } while (get_bits(&h->gb, 8) == 255);

        do {
            if (get_bits_left(&h->gb) < 8)
                return AVERROR_INVALIDDATA;
            size += show_bits(&h->gb, 8);
        } while (get_bits(&h->gb, 8) == 255);

        if (h->avctx->debug&FF_DEBUG_STARTCODE)
            av_log(h->avctx, AV_LOG_DEBUG, "SEI %d len:%d\n", type, size);

        if (size > get_bits_left(&h->gb) / 8) {
            av_log(h->avctx, AV_LOG_ERROR, "SEI type %d size %d truncated at %d\n",
                   type, 8*size, get_bits_left(&h->gb));
            return AVERROR_INVALIDDATA;
        }
        next = get_bits_count(&h->gb) + 8 * size;

        switch (type) {
        case SEI_TYPE_PIC_TIMING: // Picture timing SEI
            ret = decode_picture_timing(h);
            if (ret < 0)
                return ret;
            break;
        case SEI_TYPE_USER_DATA_ITU_T_T35:
            if (decode_user_data_itu_t_t35(h, size) < 0)
                return -1;
            break;
        case SEI_TYPE_USER_DATA_UNREGISTERED:
            ret = decode_unregistered_user_data(h, size);
            if (ret < 0)
                return ret;
            break;
        case SEI_TYPE_RECOVERY_POINT:
            ret = decode_recovery_point(h);
            if (ret < 0)
                return ret;
            break;
        case SEI_BUFFERING_PERIOD:
            ret = decode_buffering_period(h);
            if (ret < 0)
                return ret;
            break;
        case SEI_TYPE_FRAME_PACKING:
            ret = decode_frame_packing_arrangement(h);
            if (ret < 0)
                return ret;
            break;
        default:
            av_log(h->avctx, AV_LOG_DEBUG, "unknown SEI type %d\n", type);
        }
        skip_bits_long(&h->gb, next - get_bits_count(&h->gb));

        // FIXME check bits here
        align_get_bits(&h->gb);
    }

    return 0;
}
コード例 #26
0
ファイル: escape130.c プロジェクト: CoderMarco/FFmpeg4Android
static int escape130_decode_frame(AVCodecContext *avctx, void *data,
                                  int *got_frame, AVPacket *avpkt)
{
    int buf_size        = avpkt->size;
    Escape130Context *s = avctx->priv_data;
    AVFrame *pic        = data;
    GetBitContext gb;
    int ret;

    uint8_t *old_y, *old_cb, *old_cr,
            *new_y, *new_cb, *new_cr;
    uint8_t *dstY, *dstU, *dstV;
    unsigned old_y_stride, old_cb_stride, old_cr_stride,
             new_y_stride, new_cb_stride, new_cr_stride;
    unsigned total_blocks = avctx->width * avctx->height / 4,
             block_index, block_x = 0;
    unsigned y[4] = { 0 }, cb = 0x10, cr = 0x10;
    int skip = -1, y_avg = 0, i, j;
    uint8_t *ya = s->old_y_avg;

    // first 16 bytes are header; no useful information in here
    if (buf_size <= 16) {
        av_log(avctx, AV_LOG_ERROR, "Insufficient frame data\n");
        return AVERROR_INVALIDDATA;
    }

    if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
        return ret;

    if ((ret = init_get_bits8(&gb, avpkt->data, avpkt->size)) < 0)
        return ret;
    skip_bits_long(&gb, 16 * 8);

    new_y  = s->new_y;
    new_cb = s->new_u;
    new_cr = s->new_v;
    new_y_stride  = s->linesize[0];
    new_cb_stride = s->linesize[1];
    new_cr_stride = s->linesize[2];
    old_y  = s->old_y;
    old_cb = s->old_u;
    old_cr = s->old_v;
    old_y_stride  = s->linesize[0];
    old_cb_stride = s->linesize[1];
    old_cr_stride = s->linesize[2];

    for (block_index = 0; block_index < total_blocks; block_index++) {
        // Note that this call will make us skip the rest of the blocks
        // if the frame ends prematurely.
        if (skip == -1)
            skip = decode_skip_count(&gb);
        if (skip == -1) {
            av_log(avctx, AV_LOG_ERROR, "Error decoding skip value\n");
            return AVERROR_INVALIDDATA;
        }

        if (skip) {
            y[0] = old_y[0];
            y[1] = old_y[1];
            y[2] = old_y[old_y_stride];
            y[3] = old_y[old_y_stride + 1];
            y_avg = ya[0];
            cb = old_cb[0];
            cr = old_cr[0];
        } else {
            if (get_bits1(&gb)) {
                unsigned sign_selector       = get_bits(&gb, 6);
                unsigned difference_selector = get_bits(&gb, 2);
                y_avg = 2 * get_bits(&gb, 5);
                for (i = 0; i < 4; i++) {
                    y[i] = av_clip(y_avg + offset_table[difference_selector] *
                                   sign_table[sign_selector][i], 0, 63);
                }
            } else if (get_bits1(&gb)) {
                if (get_bits1(&gb)) {
                    y_avg = get_bits(&gb, 6);
                } else {
                    unsigned adjust_index = get_bits(&gb, 3);
                    y_avg = (y_avg + luma_adjust[adjust_index]) & 63;
                }
                for (i = 0; i < 4; i++)
                    y[i] = y_avg;
            }

            if (get_bits1(&gb)) {
                if (get_bits1(&gb)) {
                    cb = get_bits(&gb, 5);
                    cr = get_bits(&gb, 5);
                } else {
                    unsigned adjust_index = get_bits(&gb, 3);
                    cb = (cb + chroma_adjust[0][adjust_index]) & 31;
                    cr = (cr + chroma_adjust[1][adjust_index]) & 31;
                }
            }
        }
        *ya++ = y_avg;

        new_y[0]                = y[0];
        new_y[1]                = y[1];
        new_y[new_y_stride]     = y[2];
        new_y[new_y_stride + 1] = y[3];
        *new_cb = cb;
        *new_cr = cr;

        old_y += 2;
        old_cb++;
        old_cr++;
        new_y += 2;
        new_cb++;
        new_cr++;
        block_x++;
        if (block_x * 2 == avctx->width) {
            block_x = 0;
            old_y  += old_y_stride * 2  - avctx->width;
            old_cb += old_cb_stride     - avctx->width / 2;
            old_cr += old_cr_stride     - avctx->width / 2;
            new_y  += new_y_stride * 2  - avctx->width;
            new_cb += new_cb_stride     - avctx->width / 2;
            new_cr += new_cr_stride     - avctx->width / 2;
        }

        skip--;
    }

    new_y  = s->new_y;
    new_cb = s->new_u;
    new_cr = s->new_v;
    dstY   = pic->data[0];
    dstU   = pic->data[1];
    dstV   = pic->data[2];
    for (j = 0; j < avctx->height; j++) {
        for (i = 0; i < avctx->width; i++)
            dstY[i] = new_y[i] << 2;
        dstY  += pic->linesize[0];
        new_y += new_y_stride;
    }
    for (j = 0; j < avctx->height / 2; j++) {
        for (i = 0; i < avctx->width / 2; i++) {
            dstU[i] = chroma_vals[new_cb[i]];
            dstV[i] = chroma_vals[new_cr[i]];
        }
        dstU   += pic->linesize[1];
        dstV   += pic->linesize[2];
        new_cb += new_cb_stride;
        new_cr += new_cr_stride;
    }

    av_dlog(avctx, "Frame data: provided %d bytes, used %d bytes\n",
            buf_size, get_bits_count(&gb) >> 3);

    FFSWAP(uint8_t*, s->old_y, s->new_y);
    FFSWAP(uint8_t*, s->old_u, s->new_u);
    FFSWAP(uint8_t*, s->old_v, s->new_v);

    *got_frame = 1;

    return buf_size;
}
コード例 #27
0
static int flashsv_decode_frame(AVCodecContext *avctx,
                                void *data, int *data_size,
                                AVPacket *avpkt)
{
	const uint8_t *buf = avpkt->data;
	int buf_size = avpkt->size;
	FlashSVContext *s = avctx->priv_data;
	int h_blocks, v_blocks, h_part, v_part, i, j;
	GetBitContext gb;

	/* no supplementary picture */
	if (buf_size == 0)
		return 0;
	if (buf_size < 4)
		return -1;

	init_get_bits(&gb, buf, buf_size * 8);

	/* start to parse the bitstream */
	s->block_width = 16* (get_bits(&gb, 4)+1);
	s->image_width =     get_bits(&gb,12);
	s->block_height= 16* (get_bits(&gb, 4)+1);
	s->image_height=     get_bits(&gb,12);

	/* calculate amount of blocks and the size of the border blocks */
	h_blocks = s->image_width / s->block_width;
	h_part = s->image_width % s->block_width;
	v_blocks = s->image_height / s->block_height;
	v_part = s->image_height % s->block_height;

	/* the block size could change between frames, make sure the buffer
	 * is large enough, if not, get a larger one */
	if(s->block_size < s->block_width*s->block_height)
	{
		av_free(s->tmpblock);
		if ((s->tmpblock = av_malloc(3*s->block_width*s->block_height)) == NULL)
		{
			av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
			return -1;
		}
	}
	s->block_size = s->block_width*s->block_height;

	/* init the image size once */
	if((avctx->width==0) && (avctx->height==0))
	{
		avctx->width = s->image_width;
		avctx->height = s->image_height;
	}

	/* check for changes of image width and image height */
	if ((avctx->width != s->image_width) || (avctx->height != s->image_height))
	{
		av_log(avctx, AV_LOG_ERROR, "Frame width or height differs from first frames!\n");
		av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d  vs  ch = %d, cv = %d\n",avctx->height,
		       avctx->width,s->image_height,s->image_width);
		return -1;
	}

	av_log(avctx, AV_LOG_DEBUG, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n",
	       s->image_width, s->image_height, s->block_width, s->block_height,
	       h_blocks, v_blocks, h_part, v_part);

	s->frame.reference = 1;
	s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
	if(avctx->reget_buffer(avctx, &s->frame) < 0)
	{
		av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
		return -1;
	}

	/* loop over all block columns */
	for (j = 0; j < v_blocks + (v_part?1:0); j++)
	{

		int hp = j*s->block_height; // horiz position in frame
		int hs = (j<v_blocks)?s->block_height:v_part; // size of block


		/* loop over all block rows */
		for (i = 0; i < h_blocks + (h_part?1:0); i++)
		{
			int wp = i*s->block_width; // vert position in frame
			int ws = (i<h_blocks)?s->block_width:h_part; // size of block

			/* get the size of the compressed zlib chunk */
			int size = get_bits(&gb, 16);
			if (8 * size > get_bits_left(&gb))
			{
				avctx->release_buffer(avctx, &s->frame);
				s->frame.data[0] = NULL;
				return -1;
			}

			if (size == 0)
			{
				/* no change, don't do anything */
			}
			else
			{
				/* decompress block */
				int ret = inflateReset(&(s->zstream));
				if (ret != Z_OK)
				{
					av_log(avctx, AV_LOG_ERROR, "error in decompression (reset) of block %dx%d\n", i, j);
					/* return -1; */
				}
				s->zstream.next_in = buf+(get_bits_count(&gb)/8);
				s->zstream.avail_in = size;
				s->zstream.next_out = s->tmpblock;
				s->zstream.avail_out = s->block_size*3;
				ret = inflate(&(s->zstream), Z_FINISH);
				if (ret == Z_DATA_ERROR)
				{
					av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n");
					inflateSync(&(s->zstream));
					ret = inflate(&(s->zstream), Z_FINISH);
				}

				if ((ret != Z_OK) && (ret != Z_STREAM_END))
				{
					av_log(avctx, AV_LOG_ERROR, "error in decompression of block %dx%d: %d\n", i, j, ret);
					/* return -1; */
				}
				copy_region(s->tmpblock, s->frame.data[0], s->image_height-(hp+hs+1), wp, hs, ws, s->frame.linesize[0]);
				skip_bits_long(&gb, 8*size);   /* skip the consumed bits */
			}
		}
	}

	*data_size = sizeof(AVFrame);
	*(AVFrame*)data = s->frame;

	if ((get_bits_count(&gb)/8) != buf_size)
		av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
		       buf_size, (get_bits_count(&gb)/8));

	/* report that the buffer was completely consumed */
	return buf_size;
}
コード例 #28
0
ファイル: aacps.c プロジェクト: Arcen/FFmpeg
int ff_ps_read_data(AVCodecContext *avctx, GetBitContext *gb_host, PSContext *ps, int bits_left)
{
    int e;
    int bit_count_start = get_bits_count(gb_host);
    int header;
    int bits_consumed;
    GetBitContext gbc = *gb_host, *gb = &gbc;

    header = get_bits1(gb);
    if (header) {     //enable_ps_header
        ps->enable_iid = get_bits1(gb);
        if (ps->enable_iid) {
            int iid_mode = get_bits(gb, 3);
            if (iid_mode > 5) {
                av_log(avctx, AV_LOG_ERROR, "iid_mode %d is reserved.\n",
                       iid_mode);
                goto err;
            }
            ps->nr_iid_par    = nr_iidicc_par_tab[iid_mode];
            ps->iid_quant     = iid_mode > 2;
            ps->nr_ipdopd_par = nr_iidopd_par_tab[iid_mode];
        }
        ps->enable_icc = get_bits1(gb);
        if (ps->enable_icc) {
            ps->icc_mode = get_bits(gb, 3);
            if (ps->icc_mode > 5) {
                av_log(avctx, AV_LOG_ERROR, "icc_mode %d is reserved.\n",
                       ps->icc_mode);
                goto err;
            }
            ps->nr_icc_par = nr_iidicc_par_tab[ps->icc_mode];
        }
        ps->enable_ext = get_bits1(gb);
    }

    ps->frame_class = get_bits1(gb);
    ps->num_env_old = ps->num_env;
    ps->num_env     = num_env_tab[ps->frame_class][get_bits(gb, 2)];

    ps->border_position[0] = -1;
    if (ps->frame_class) {
        for (e = 1; e <= ps->num_env; e++)
            ps->border_position[e] = get_bits(gb, 5);
    } else
        for (e = 1; e <= ps->num_env; e++)
            ps->border_position[e] = (e * numQMFSlots >> ff_log2_tab[ps->num_env]) - 1;

    if (ps->enable_iid) {
        for (e = 0; e < ps->num_env; e++) {
            int dt = get_bits1(gb);
            if (read_iid_data(avctx, gb, ps, ps->iid_par, huff_iid[2*dt+ps->iid_quant], e, dt))
                goto err;
        }
    } else
        memset(ps->iid_par, 0, sizeof(ps->iid_par));

    if (ps->enable_icc)
        for (e = 0; e < ps->num_env; e++) {
            int dt = get_bits1(gb);
            if (read_icc_data(avctx, gb, ps, ps->icc_par, dt ? huff_icc_dt : huff_icc_df, e, dt))
                goto err;
        }
    else
        memset(ps->icc_par, 0, sizeof(ps->icc_par));

    if (ps->enable_ext) {
        int cnt = get_bits(gb, 4);
        if (cnt == 15) {
            cnt += get_bits(gb, 8);
        }
        cnt *= 8;
        while (cnt > 7) {
            int ps_extension_id = get_bits(gb, 2);
            cnt -= 2 + ps_read_extension_data(gb, ps, ps_extension_id);
        }
        if (cnt < 0) {
            av_log(avctx, AV_LOG_ERROR, "ps extension overflow %d", cnt);
            goto err;
        }
        skip_bits(gb, cnt);
    }

    ps->enable_ipdopd &= !PS_BASELINE;

    //Fix up envelopes
    if (!ps->num_env || ps->border_position[ps->num_env] < numQMFSlots - 1) {
        //Create a fake envelope
        int source = ps->num_env ? ps->num_env - 1 : ps->num_env_old - 1;
        if (source >= 0 && source != ps->num_env) {
            if (ps->enable_iid) {
                memcpy(ps->iid_par+ps->num_env, ps->iid_par+source, sizeof(ps->iid_par[0]));
            }
            if (ps->enable_icc) {
                memcpy(ps->icc_par+ps->num_env, ps->icc_par+source, sizeof(ps->icc_par[0]));
            }
            if (ps->enable_ipdopd) {
                memcpy(ps->ipd_par+ps->num_env, ps->ipd_par+source, sizeof(ps->ipd_par[0]));
                memcpy(ps->opd_par+ps->num_env, ps->opd_par+source, sizeof(ps->opd_par[0]));
            }
        }
        ps->num_env++;
        ps->border_position[ps->num_env] = numQMFSlots - 1;
    }


    ps->is34bands_old = ps->is34bands;
    if (!PS_BASELINE && (ps->enable_iid || ps->enable_icc))
        ps->is34bands = (ps->enable_iid && ps->nr_iid_par == 34) ||
                        (ps->enable_icc && ps->nr_icc_par == 34);

    //Baseline
    if (!ps->enable_ipdopd) {
        memset(ps->ipd_par, 0, sizeof(ps->ipd_par));
        memset(ps->opd_par, 0, sizeof(ps->opd_par));
    }

    if (header)
        ps->start = 1;

    bits_consumed = get_bits_count(gb) - bit_count_start;
    if (bits_consumed <= bits_left) {
        skip_bits_long(gb_host, bits_consumed);
        return bits_consumed;
    }
    av_log(avctx, AV_LOG_ERROR, "Expected to read %d PS bits actually read %d.\n", bits_left, bits_consumed);
err:
    ps->start = 0;
    skip_bits_long(gb_host, bits_left);
    return bits_left;
}
コード例 #29
0
ファイル: dtsdec.c プロジェクト: 63n/FFmpeg
static int dts_probe(AVProbeData *p)
{
    const uint8_t *buf, *bufp;
    uint32_t state = -1;
    int markers[4*16] = {0};
    int sum, max, i;
    int64_t diff = 0;
    uint8_t hdr[12 + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 };

    buf = p->buf + FFMIN(4096, p->buf_size);

    for(; buf < (p->buf+p->buf_size)-2; buf+=2) {
        int marker, sample_blocks, sample_rate, sr_code, framesize;
        int lfe;
        GetBitContext gb;

        bufp = buf;
        state = (state << 16) | bytestream_get_be16(&bufp);

        if (buf - p->buf >= 4)
            diff += FFABS(((int16_t)AV_RL16(buf)) - (int16_t)AV_RL16(buf-4));

        /* regular bitstream */
        if (state == DCA_SYNCWORD_CORE_BE)
            marker = 0;
        else if (state == DCA_SYNCWORD_CORE_LE)
            marker = 1;

        /* 14 bits big-endian bitstream */
        else if (state == DCA_SYNCWORD_CORE_14B_BE &&
                 (bytestream_get_be16(&bufp) & 0xFFF0) == 0x07F0)
            marker = 2;

        /* 14 bits little-endian bitstream */
        else if (state == DCA_SYNCWORD_CORE_14B_LE &&
                 (bytestream_get_be16(&bufp) & 0xF0FF) == 0xF007)
            marker = 3;
        else
            continue;

        if (avpriv_dca_convert_bitstream(buf-2, 12, hdr, 12) < 0)
            continue;

        init_get_bits(&gb, hdr, 96);
        skip_bits_long(&gb, 39);

        sample_blocks = get_bits(&gb, 7) + 1;
        if (sample_blocks < 8)
            continue;

        framesize = get_bits(&gb, 14) + 1;
        if (framesize < 95)
            continue;

        skip_bits(&gb, 6);
        sr_code = get_bits(&gb, 4);
        sample_rate = avpriv_dca_sample_rates[sr_code];
        if (sample_rate == 0)
            continue;

        get_bits(&gb, 5);
        if (get_bits(&gb, 1))
            continue;

        skip_bits_long(&gb, 9);
        lfe = get_bits(&gb, 2);
        if (lfe > 2)
            continue;

        marker += 4* sr_code;

        markers[marker] ++;
    }

    sum = max = 0;
    for (i=0; i<FF_ARRAY_ELEMS(markers); i++) {
        sum += markers[i];
        if (markers[max] < markers[i])
            max = i;
    }

    if (markers[max] > 3 && p->buf_size / markers[max] < 32*1024 &&
        markers[max] * 4 > sum * 3 &&
        diff / p->buf_size > 200)
        return AVPROBE_SCORE_EXTENSION + 1;

    return 0;
}
コード例 #30
0
ファイル: vima.c プロジェクト: Bjelijah/EcamTurnH265
static int decode_frame(AVCodecContext *avctx, void *data,
                        int *got_frame_ptr, AVPacket *pkt)
{
    GetBitContext  gb;
    AVFrame        *frame = data;
    int16_t        pcm_data[2];
    uint32_t       samples;
    int8_t         channel_hint[2];
    int            ret, chan, channels = 1;

    if (pkt->size < 13)
        return AVERROR_INVALIDDATA;

    if ((ret = init_get_bits8(&gb, pkt->data, pkt->size)) < 0)
        return ret;

    samples = get_bits_long(&gb, 32);
    if (samples == 0xffffffff) {
        skip_bits_long(&gb, 32);
        samples = get_bits_long(&gb, 32);
    }

    if (samples > pkt->size * 2)
        return AVERROR_INVALIDDATA;

    channel_hint[0] = get_sbits(&gb, 8);
    if (channel_hint[0] & 0x80) {
        channel_hint[0] = ~channel_hint[0];
        channels = 2;
    }
    avctx->channels = channels;
    avctx->channel_layout = (channels == 2) ? AV_CH_LAYOUT_STEREO :
                                              AV_CH_LAYOUT_MONO;
    pcm_data[0] = get_sbits(&gb, 16);
    if (channels > 1) {
        channel_hint[1] = get_sbits(&gb, 8);
        pcm_data[1] = get_sbits(&gb, 16);
    }

    frame->nb_samples = samples;
    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
        return ret;

    for (chan = 0; chan < channels; chan++) {
        uint16_t *dest = (uint16_t*)frame->data[0] + chan;
        int step_index = channel_hint[chan];
        int output = pcm_data[chan];
        int sample;

        for (sample = 0; sample < samples; sample++) {
            int lookup_size, lookup, highbit, lowbits;

            step_index  = av_clip(step_index, 0, 88);
            lookup_size = size_table[step_index];
            lookup      = get_bits(&gb, lookup_size);
            highbit     = 1 << (lookup_size - 1);
            lowbits     = highbit - 1;

            if (lookup & highbit)
                lookup ^= highbit;
            else
                highbit = 0;

            if (lookup == lowbits) {
                output = get_sbits(&gb, 16);
            } else {
                int predict_index, diff;

                predict_index = (lookup << (7 - lookup_size)) | (step_index << 6);
                predict_index = av_clip(predict_index, 0, 5785);
                diff          = predict_table[predict_index];
                if (lookup)
                    diff += ff_adpcm_step_table[step_index] >> (lookup_size - 1);
                if (highbit)
                    diff  = -diff;

                output  = av_clip_int16(output + diff);
            }

            *dest = output;
            dest += channels;

            step_index += step_index_tables[lookup_size - 2][lookup];
        }
    }

    *got_frame_ptr   = 1;

    return pkt->size;
}