Beispiel #1
0
static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceContext *sl)
{
    const int mb_x    = sl->mb_x;
    const int mb_y    = sl->mb_y;
    const int mb_xy   = sl->mb_xy;
    const int mb_type = h->cur_pic.mb_type[mb_xy];
    uint8_t *dest[3];
    int linesize;
    int i, j, p;
    const int *block_offset = &h->block_offset[0];
    const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass);
    const int plane_count      = (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) ? 3 : 1;

    for (p = 0; p < plane_count; p++) {
        dest[p] = h->cur_pic.f.data[p] +
                  ((mb_x << PIXEL_SHIFT) + mb_y * sl->linesize) * 16;
        h->vdsp.prefetch(dest[p] + (sl->mb_x & 3) * 4 * sl->linesize + (64 << PIXEL_SHIFT),
                         sl->linesize, 4);
    }

    h->list_counts[mb_xy] = sl->list_count;

    if (!SIMPLE && MB_FIELD(sl)) {
        linesize     = sl->mb_linesize = sl->mb_uvlinesize = sl->linesize * 2;
        block_offset = &h->block_offset[48];
        if (mb_y & 1) // FIXME move out of this function?
            for (p = 0; p < 3; p++)
                dest[p] -= sl->linesize * 15;
        if (FRAME_MBAFF(h)) {
            int list;
            for (list = 0; list < sl->list_count; list++) {
                if (!USES_LIST(mb_type, list))
                    continue;
                if (IS_16X16(mb_type)) {
                    int8_t *ref = &sl->ref_cache[list][scan8[0]];
                    fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (sl->mb_y & 1), 1);
                } else {
                    for (i = 0; i < 16; i += 4) {
                        int ref = sl->ref_cache[list][scan8[i]];
                        if (ref >= 0)
                            fill_rectangle(&sl->ref_cache[list][scan8[i]], 2, 2,
                                           8, (16 + ref) ^ (sl->mb_y & 1), 1);
                    }
                }
            }
        }
    } else {
        linesize = sl->mb_linesize = sl->mb_uvlinesize = sl->linesize;
    }

    if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
        if (PIXEL_SHIFT) {
            const int bit_depth = h->sps.bit_depth_luma;
            GetBitContext gb;
            init_get_bits(&gb, sl->intra_pcm_ptr, 768 * bit_depth);

            for (p = 0; p < plane_count; p++)
                for (i = 0; i < 16; i++) {
                    uint16_t *tmp = (uint16_t *)(dest[p] + i * linesize);
                    for (j = 0; j < 16; j++)
                        tmp[j] = get_bits(&gb, bit_depth);
                }
        } else {
            for (p = 0; p < plane_count; p++)
                for (i = 0; i < 16; i++)
                    memcpy(dest[p] + i * linesize,
                           sl->intra_pcm_ptr + p * 256 + i * 16, 16);
        }
    } else {
        if (IS_INTRA(mb_type)) {
            if (sl->deblocking_filter)
                xchg_mb_border(h, sl, dest[0], dest[1], dest[2], linesize,
                               linesize, 1, 1, SIMPLE, PIXEL_SHIFT);

            for (p = 0; p < plane_count; p++)
                hl_decode_mb_predict_luma(h, sl, mb_type, 1, SIMPLE,
                                          transform_bypass, PIXEL_SHIFT,
                                          block_offset, linesize, dest[p], p);

            if (sl->deblocking_filter)
                xchg_mb_border(h, sl, dest[0], dest[1], dest[2], linesize,
                               linesize, 0, 1, SIMPLE, PIXEL_SHIFT);
        } else {
            FUNC(hl_motion_444)(h, sl, dest[0], dest[1], dest[2],
                                h->qpel_put, h->h264chroma.put_h264_chroma_pixels_tab,
                                h->qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab,
                                h->h264dsp.weight_h264_pixels_tab,
                                h->h264dsp.biweight_h264_pixels_tab);
        }

        for (p = 0; p < plane_count; p++)
            hl_decode_mb_idct_luma(h, sl, mb_type, 1, SIMPLE, transform_bypass,
                                   PIXEL_SHIFT, block_offset, linesize,
                                   dest[p], p);
    }
}
Beispiel #2
0
static int mjpegb_decode_frame(AVCodecContext *avctx,
                              void *data, int *data_size,
                              AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MJpegDecodeContext *s = avctx->priv_data;
    const uint8_t *buf_end, *buf_ptr;
    AVFrame *picture = data;
    GetBitContext hgb; /* for the header */
    uint32_t dqt_offs, dht_offs, sof_offs, sos_offs, second_field_offs;
    uint32_t field_size, sod_offs;

    buf_ptr = buf;
    buf_end = buf + buf_size;

read_header:
    /* reset on every SOI */
    s->restart_interval = 0;
    s->restart_count = 0;
    s->mjpb_skiptosod = 0;

    init_get_bits(&hgb, buf_ptr, /*buf_size*/(buf_end - buf_ptr)*8);

    skip_bits(&hgb, 32); /* reserved zeros */

    if (get_bits_long(&hgb, 32) != MKBETAG('m','j','p','g'))
    {
        av_log(avctx, AV_LOG_WARNING, "not mjpeg-b (bad fourcc)\n");
        return 0;
    }

    field_size = get_bits_long(&hgb, 32); /* field size */
    av_log(avctx, AV_LOG_DEBUG, "field size: 0x%x\n", field_size);
    skip_bits(&hgb, 32); /* padded field size */
    second_field_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "second_field_offs is %d and size is %d\n");
    av_log(avctx, AV_LOG_DEBUG, "second field offs: 0x%x\n", second_field_offs);

    dqt_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "dqt is %d and size is %d\n");
    av_log(avctx, AV_LOG_DEBUG, "dqt offs: 0x%x\n", dqt_offs);
    if (dqt_offs)
    {
        init_get_bits(&s->gb, buf_ptr+dqt_offs, (buf_end - (buf_ptr+dqt_offs))*8);
        s->start_code = DQT;
        ff_mjpeg_decode_dqt(s);
    }

    dht_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "dht is %d and size is %d\n");
    av_log(avctx, AV_LOG_DEBUG, "dht offs: 0x%x\n", dht_offs);
    if (dht_offs)
    {
        init_get_bits(&s->gb, buf_ptr+dht_offs, (buf_end - (buf_ptr+dht_offs))*8);
        s->start_code = DHT;
        ff_mjpeg_decode_dht(s);
    }

    sof_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sof is %d and size is %d\n");
    av_log(avctx, AV_LOG_DEBUG, "sof offs: 0x%x\n", sof_offs);
    if (sof_offs)
    {
        init_get_bits(&s->gb, buf_ptr+sof_offs, (buf_end - (buf_ptr+sof_offs))*8);
        s->start_code = SOF0;
        if (ff_mjpeg_decode_sof(s) < 0)
            return -1;
    }

    sos_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sos is %d and size is %d\n");
    av_log(avctx, AV_LOG_DEBUG, "sos offs: 0x%x\n", sos_offs);
    sod_offs = read_offs(avctx, &hgb, buf_end - buf_ptr, "sof is %d and size is %d\n");
    av_log(avctx, AV_LOG_DEBUG, "sod offs: 0x%x\n", sod_offs);
    if (sos_offs)
    {
//        init_get_bits(&s->gb, buf+sos_offs, (buf_end - (buf+sos_offs))*8);
        init_get_bits(&s->gb, buf_ptr+sos_offs, field_size*8);
        s->mjpb_skiptosod = (sod_offs - sos_offs - show_bits(&s->gb, 16));
        s->start_code = SOS;
        ff_mjpeg_decode_sos(s);
    }

    if (s->interlaced) {
        s->bottom_field ^= 1;
        /* if not bottom field, do not output image yet */
        if (s->bottom_field != s->interlace_polarity && second_field_offs)
        {
            buf_ptr = buf + second_field_offs;
            second_field_offs = 0;
            goto read_header;
            }
    }

    //XXX FIXME factorize, this looks very similar to the EOI code

    *picture= s->picture;
    *data_size = sizeof(AVFrame);

    if(!s->lossless){
        picture->quality= FFMAX3(s->qscale[0], s->qscale[1], s->qscale[2]);
        picture->qstride= 0;
        picture->qscale_table= s->qscale_table;
        memset(picture->qscale_table, picture->quality, (s->width+15)/16);
        if(avctx->debug & FF_DEBUG_QP)
            av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", picture->quality);
        picture->quality*= FF_QP2LAMBDA;
    }

    return buf_ptr - buf;
}
Beispiel #3
0
static int
avs_decode_frame(AVCodecContext * avctx,
                 void *data, int *data_size, const uint8_t * buf, int buf_size)
{
    AvsContext *const avs = avctx->priv_data;
    AVFrame *picture = data;
    AVFrame *const p = (AVFrame *) & avs->picture;
    const uint8_t *table, *vect;
    uint8_t *out;
    int i, j, x, y, stride, vect_w = 3, vect_h = 3;
    AvsVideoSubType sub_type;
    AvsBlockType type;
    GetBitContext change_map;

    if (avctx->reget_buffer(avctx, p)) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }
    p->reference = 1;
    p->pict_type = FF_P_TYPE;
    p->key_frame = 0;

    out = avs->picture.data[0];
    stride = avs->picture.linesize[0];

    sub_type = buf[0];
    type = buf[1];
    buf += 4;

    if (type == AVS_PALETTE) {
        int first, last;
        uint32_t *pal = (uint32_t *) avs->picture.data[1];

        first = AV_RL16(buf);
        last = first + AV_RL16(buf + 2);
        buf += 4;
        for (i=first; i<last; i++, buf+=3)
            pal[i] = (buf[0] << 18) | (buf[1] << 10) | (buf[2] << 2);

        sub_type = buf[0];
        type = buf[1];
        buf += 4;
    }

    if (type != AVS_VIDEO)
        return -1;

    switch (sub_type) {
    case AVS_I_FRAME:
        p->pict_type = FF_I_TYPE;
        p->key_frame = 1;
    case AVS_P_FRAME_3X3:
        vect_w = 3;
        vect_h = 3;
        break;

    case AVS_P_FRAME_2X2:
        vect_w = 2;
        vect_h = 2;
        break;

    case AVS_P_FRAME_2X3:
        vect_w = 2;
        vect_h = 3;
        break;

    default:
      return -1;
    }

    table = buf + (256 * vect_w * vect_h);
    if (sub_type != AVS_I_FRAME) {
        int map_size = ((318 / vect_w + 7) / 8) * (198 / vect_h);
        init_get_bits(&change_map, table, map_size);
        table += map_size;
    }

    for (y=0; y<198; y+=vect_h) {
        for (x=0; x<318; x+=vect_w) {
            if (sub_type == AVS_I_FRAME || get_bits1(&change_map)) {
                vect = &buf[*table++ * (vect_w * vect_h)];
                for (j=0; j<vect_w; j++) {
                    out[(y + 0) * stride + x + j] = vect[(0 * vect_w) + j];
                    out[(y + 1) * stride + x + j] = vect[(1 * vect_w) + j];
                    if (vect_h == 3)
                        out[(y + 2) * stride + x + j] =
                            vect[(2 * vect_w) + j];
                }
            }
        }
        if (sub_type != AVS_I_FRAME)
            align_get_bits(&change_map);
    }

    *picture = *(AVFrame *) & avs->picture;
    *data_size = sizeof(AVPicture);

    return buf_size;
}
static int mpc8_decode_frame(AVCodecContext * avctx,
                             void *data, int *data_size,
                             AVPacket *avpkt)
{
	const uint8_t *buf = avpkt->data;
	int buf_size = avpkt->size;
	MPCContext *c = avctx->priv_data;
	GetBitContext gb2, *gb = &gb2;
	int i, j, k, ch, cnt, res, t;
	Band *bands = c->bands;
	int off;
	int maxband, keyframe;
	int last[2];

	keyframe = c->cur_frame == 0;

	if(keyframe)
	{
		memset(c->Q, 0, sizeof(c->Q));
		c->last_bits_used = 0;
	}
	init_get_bits(gb, buf, buf_size * 8);
	skip_bits(gb, c->last_bits_used & 7);

	if(keyframe)
		maxband = mpc8_get_mod_golomb(gb, c->maxbands + 1);
	else
	{
		maxband = c->last_max_band + get_vlc2(gb, band_vlc.table, MPC8_BANDS_BITS, 2);
		if(maxband > 32) maxband -= 33;
	}
	c->last_max_band = maxband;

	/* read subband indexes */
	if(maxband)
	{
		last[0] = last[1] = 0;
		for(i = maxband - 1; i >= 0; i--)
		{
			for(ch = 0; ch < 2; ch++)
			{
				last[ch] = get_vlc2(gb, res_vlc[last[ch] > 2].table, MPC8_RES_BITS, 2) + last[ch];
				if(last[ch] > 15) last[ch] -= 17;
				bands[i].res[ch] = last[ch];
			}
		}
		if(c->MSS)
		{
			int mask;

			cnt = 0;
			for(i = 0; i < maxband; i++)
				if(bands[i].res[0] || bands[i].res[1])
					cnt++;
			t = mpc8_get_mod_golomb(gb, cnt);
			mask = mpc8_get_mask(gb, cnt, t);
			for(i = maxband - 1; i >= 0; i--)
				if(bands[i].res[0] || bands[i].res[1])
				{
					bands[i].msf = mask & 1;
					mask >>= 1;
				}
		}
Beispiel #5
0
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                        AVPacket *avpkt)
{
    SmackVContext * const smk = avctx->priv_data;
    uint8_t *out;
    uint32_t *pal;
    GetByteContext gb2;
    GetBitContext gb;
    int blocks, blk, bw, bh;
    int i, ret;
    int stride;
    int flags;

    if (avpkt->size <= 769)
        return AVERROR_INVALIDDATA;

    smk->pic.reference = 3;
    smk->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if((ret = avctx->reget_buffer(avctx, &smk->pic)) < 0){
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    /* make the palette available on the way out */
    pal = (uint32_t*)smk->pic.data[1];
    bytestream2_init(&gb2, avpkt->data, avpkt->size);
    flags = bytestream2_get_byteu(&gb2);
    smk->pic.palette_has_changed = flags & 1;
    smk->pic.key_frame = !!(flags & 2);
    if(smk->pic.key_frame)
        smk->pic.pict_type = AV_PICTURE_TYPE_I;
    else
        smk->pic.pict_type = AV_PICTURE_TYPE_P;

    for(i = 0; i < 256; i++)
        *pal++ = 0xFFU << 24 | bytestream2_get_be24u(&gb2);

    last_reset(smk->mmap_tbl, smk->mmap_last);
    last_reset(smk->mclr_tbl, smk->mclr_last);
    last_reset(smk->full_tbl, smk->full_last);
    last_reset(smk->type_tbl, smk->type_last);
    init_get_bits(&gb, avpkt->data + 769, (avpkt->size - 769) * 8);

    blk = 0;
    bw = avctx->width >> 2;
    bh = avctx->height >> 2;
    blocks = bw * bh;
    out = smk->pic.data[0];
    stride = smk->pic.linesize[0];
    while(blk < blocks) {
        int type, run, mode;
        uint16_t pix;

        type = smk_get_code(&gb, smk->type_tbl, smk->type_last);
        run = block_runs[(type >> 2) & 0x3F];
        switch(type & 3){
        case SMK_BLK_MONO:
            while(run-- && blk < blocks){
                int clr, map;
                int hi, lo;
                clr = smk_get_code(&gb, smk->mclr_tbl, smk->mclr_last);
                map = smk_get_code(&gb, smk->mmap_tbl, smk->mmap_last);
                out = smk->pic.data[0] + (blk / bw) * (stride * 4) + (blk % bw) * 4;
                hi = clr >> 8;
                lo = clr & 0xFF;
                for(i = 0; i < 4; i++) {
                    if(map & 1) out[0] = hi; else out[0] = lo;
                    if(map & 2) out[1] = hi; else out[1] = lo;
                    if(map & 4) out[2] = hi; else out[2] = lo;
                    if(map & 8) out[3] = hi; else out[3] = lo;
                    map >>= 4;
                    out += stride;
                }
                blk++;
            }
            break;
        case SMK_BLK_FULL:
            mode = 0;
            if(avctx->codec_tag == MKTAG('S', 'M', 'K', '4')) { // In case of Smacker v4 we have three modes
                if(get_bits1(&gb)) mode = 1;
                else if(get_bits1(&gb)) mode = 2;
            }
            while(run-- && blk < blocks){
                out = smk->pic.data[0] + (blk / bw) * (stride * 4) + (blk % bw) * 4;
                switch(mode){
                case 0:
                    for(i = 0; i < 4; i++) {
                        pix = smk_get_code(&gb, smk->full_tbl, smk->full_last);
                        AV_WL16(out+2,pix);
                        pix = smk_get_code(&gb, smk->full_tbl, smk->full_last);
                        AV_WL16(out,pix);
                        out += stride;
                    }
                    break;
                case 1:
                    pix = smk_get_code(&gb, smk->full_tbl, smk->full_last);
                    out[0] = out[1] = pix & 0xFF;
                    out[2] = out[3] = pix >> 8;
                    out += stride;
                    out[0] = out[1] = pix & 0xFF;
                    out[2] = out[3] = pix >> 8;
                    out += stride;
                    pix = smk_get_code(&gb, smk->full_tbl, smk->full_last);
                    out[0] = out[1] = pix & 0xFF;
                    out[2] = out[3] = pix >> 8;
                    out += stride;
                    out[0] = out[1] = pix & 0xFF;
                    out[2] = out[3] = pix >> 8;
                    out += stride;
                    break;
                case 2:
                    for(i = 0; i < 2; i++) {
                        uint16_t pix1, pix2;
                        pix2 = smk_get_code(&gb, smk->full_tbl, smk->full_last);
                        pix1 = smk_get_code(&gb, smk->full_tbl, smk->full_last);
                        AV_WL16(out,pix1);
                        AV_WL16(out+2,pix2);
                        out += stride;
                        AV_WL16(out,pix1);
                        AV_WL16(out+2,pix2);
                        out += stride;
                    }
                    break;
                }
                blk++;
            }
            break;
        case SMK_BLK_SKIP:
            while(run-- && blk < blocks)
                blk++;
            break;
        case SMK_BLK_FILL:
            mode = type >> 8;
            while(run-- && blk < blocks){
                uint32_t col;
                out = smk->pic.data[0] + (blk / bw) * (stride * 4) + (blk % bw) * 4;
                col = mode * 0x01010101;
                for(i = 0; i < 4; i++) {
                    *((uint32_t*)out) = col;
                    out += stride;
                }
                blk++;
            }
            break;
        }

    }

    *got_frame = 1;
    *(AVFrame*)data = smk->pic;

    /* always report that the buffer was completely consumed */
    return avpkt->size;
}
void ff_rtp_send_h263_rfc2190(AVFormatContext *s1, const uint8_t *buf, int size,
                              const uint8_t *mb_info, int mb_info_size)
{
    RTPMuxContext *s = s1->priv_data;
    int len, sbits = 0, ebits = 0;
    GetBitContext gb;
    struct H263Info info = { 0 };
    struct H263State state = { 0 };
    int mb_info_pos = 0, mb_info_count = mb_info_size / 12;
    const uint8_t *buf_base = buf;

    s->timestamp = s->cur_timestamp;

    init_get_bits(&gb, buf, size*8);
    if (get_bits(&gb, 22) == 0x20) { /* Picture Start Code */
        info.tr  = get_bits(&gb, 8);
        skip_bits(&gb, 2); /* PTYPE start, H.261 disambiguation */
        skip_bits(&gb, 3); /* Split screen, document camera, freeze picture release */
        info.src = get_bits(&gb, 3);
        info.i   = get_bits(&gb, 1);
        info.u   = get_bits(&gb, 1);
        info.s   = get_bits(&gb, 1);
        info.a   = get_bits(&gb, 1);
        info.pb  = get_bits(&gb, 1);
    }

    while (size > 0) {
        struct H263State packet_start_state = state;
        len = FFMIN(s->max_payload_size - 8, size);

        /* Look for a better place to split the frame into packets. */
        if (len < size) {
            const uint8_t *end = ff_h263_find_resync_marker_reverse(buf,
                                                                    buf + len);
            len = end - buf;
            if (len == s->max_payload_size - 8) {
                /* Skip mb info prior to the start of the current ptr */
                while (mb_info_pos < mb_info_count) {
                    uint32_t pos = AV_RL32(&mb_info[12*mb_info_pos])/8;
                    if (pos >= buf - buf_base)
                        break;
                    mb_info_pos++;
                }
                /* Find the first mb info past the end pointer */
                while (mb_info_pos + 1 < mb_info_count) {
                    uint32_t pos = AV_RL32(&mb_info[12*(mb_info_pos + 1)])/8;
                    if (pos >= end - buf_base)
                        break;
                    mb_info_pos++;
                }
                if (mb_info_pos < mb_info_count) {
                    const uint8_t *ptr = &mb_info[12*mb_info_pos];
                    /* get position in bits in the input packet at which the next info block should be used */
                    uint32_t bit_pos = AV_RL32(ptr);
                    /* get position in bytes */
                    uint32_t pos_next_mb_info = (bit_pos + 7)/8;
                    /* check if data from the next MB info block should be used */
                    if (pos_next_mb_info <= end - buf_base) {
                        state.quant = ptr[4];
                        state.gobn  = ptr[5];
                        state.mba   = AV_RL16(&ptr[6]);
                        state.hmv1  = (int8_t) ptr[8];
                        state.vmv1  = (int8_t) ptr[9];
                        state.hmv2  = (int8_t) ptr[10];
                        state.vmv2  = (int8_t) ptr[11];
                        ebits = 8 * pos_next_mb_info - bit_pos;
                        len   = pos_next_mb_info - (buf - buf_base);
                        mb_info_pos++;
                    }
                } else {
                    av_log(s1, AV_LOG_ERROR, "Unable to split H.263 packet, "
                           "use -mb_info %d or -ps 1.\n",
                           s->max_payload_size - 8);
                }
            }
        }

        if (size > 2 && !buf[0] && !buf[1])
            send_mode_a(s1, &info, buf, len, ebits, len == size);
        else
            send_mode_b(s1, &info, &packet_start_state, buf, len, sbits,
                        ebits, len == size);

        if (ebits) {
            sbits = 8 - ebits;
            len--;
        } else {
            sbits = 0;
        }
        buf  += len;
        size -= len;
        ebits = 0;
    }
}
Beispiel #7
0
static int flashsv_decode_frame(AVCodecContext *avctx,
                                    void *data, int *data_size,
                                    const uint8_t *buf, int buf_size)
{
    FlashSVContext *s = avctx->priv_data;
    int h_blocks, v_blocks, h_part, v_part, i, j;
    GetBitContext gb;

    /* no supplementary picture */
    if (buf_size == 0)
        return 0;

    if(s->frame.data[0])
            avctx->release_buffer(avctx, &s->frame);

    init_get_bits(&gb, buf, buf_size * 8);

    /* start to parse the bitstream */
    s->block_width = 16* (get_bits(&gb, 4)+1);
    s->image_width =     get_bits(&gb,12);
    s->block_height= 16* (get_bits(&gb, 4)+1);
    s->image_height=     get_bits(&gb,12);

    /* calculate amount of blocks and the size of the border blocks */
    h_blocks = s->image_width / s->block_width;
    h_part = s->image_width % s->block_width;
    v_blocks = s->image_height / s->block_height;
    v_part = s->image_height % s->block_height;

    /* the block size could change between frames, make sure the buffer
     * is large enough, if not, get a larger one */
    if(s->block_size < s->block_width*s->block_height) {
        if (s->tmpblock != NULL)
            av_free(s->tmpblock);
        if ((s->tmpblock = av_malloc(3*s->block_width*s->block_height)) == NULL) {
            av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
            return -1;
        }
    }
    s->block_size = s->block_width*s->block_height;

    /* init the image size once */
    if((avctx->width==0) && (avctx->height==0)){
        avctx->width = s->image_width;
        avctx->height = s->image_height;
    }

    /* check for changes of image width and image height */
    if ((avctx->width != s->image_width) || (avctx->height != s->image_height)) {
        av_log(avctx, AV_LOG_ERROR, "Frame width or height differs from first frames!\n");
        av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d  vs  ch = %d, cv = %d\n",avctx->height,
        avctx->width,s->image_height,s->image_width);
        return -1;
    }

    av_log(avctx, AV_LOG_DEBUG, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n",
        s->image_width, s->image_height, s->block_width, s->block_height,
        h_blocks, v_blocks, h_part, v_part);

    s->frame.reference = 1;
    s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
    if (avctx->get_buffer(avctx, &s->frame) < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }

    /* loop over all block columns */
    for (j = 0; j < v_blocks + (v_part?1:0); j++)
    {

        int hp = j*s->block_height; // horiz position in frame
        int hs = (j<v_blocks)?s->block_height:v_part; // size of block


        /* loop over all block rows */
        for (i = 0; i < h_blocks + (h_part?1:0); i++)
        {
            int wp = i*s->block_width; // vert position in frame
            int ws = (i<h_blocks)?s->block_width:h_part; // size of block

            /* get the size of the compressed zlib chunk */
            int size = get_bits(&gb, 16);

            if (size == 0) {
                /* no change, don't do anything */
            } else {
                /* decompress block */
                int ret = inflateReset(&(s->zstream));
                if (ret != Z_OK)
                {
                    av_log(avctx, AV_LOG_ERROR, "error in decompression (reset) of block %dx%d\n", i, j);
                    /* return -1; */
                }
                s->zstream.next_in = buf+(get_bits_count(&gb)/8);
                s->zstream.avail_in = size;
                s->zstream.next_out = s->tmpblock;
                s->zstream.avail_out = s->block_size*3;
                ret = inflate(&(s->zstream), Z_FINISH);
                if (ret == Z_DATA_ERROR)
                {
                    av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n");
                    inflateSync(&(s->zstream));
                    ret = inflate(&(s->zstream), Z_FINISH);
                }

                if ((ret != Z_OK) && (ret != Z_STREAM_END))
                {
                    av_log(avctx, AV_LOG_ERROR, "error in decompression of block %dx%d: %d\n", i, j, ret);
                    /* return -1; */
                }
                copy_region(s->tmpblock, s->frame.data[0], s->image_height-(hp+hs+1), wp, hs, ws, s->frame.linesize[0]);
                skip_bits_long(&gb, 8*size);   /* skip the consumed bits */
            }
        }
    }

    *data_size = sizeof(AVFrame);
    *(AVFrame*)data = s->frame;

    if ((get_bits_count(&gb)/8) != buf_size)
        av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
            buf_size, (get_bits_count(&gb)/8));

    /* report that the buffer was completely consumed */
    return buf_size;
}
Beispiel #8
0
static int dvbsub_read_2bit_string(uint8_t *destbuf, int dbuf_len,
                                   const uint8_t **srcbuf, int buf_size,
                                   int non_mod, uint8_t *map_table)
{
    GetBitContext gb;

    int bits;
    int run_length;
    int pixels_read = 0;

    init_get_bits(&gb, *srcbuf, buf_size << 3);

    while (get_bits_count(&gb) < buf_size << 3 && pixels_read < dbuf_len) {
        bits = get_bits(&gb, 2);

        if (bits) {
            if (non_mod != 1 || bits != 1) {
                if (map_table)
                    *destbuf++ = map_table[bits];
                else
                    *destbuf++ = bits;
            }
            pixels_read++;
        } else {
            bits = get_bits1(&gb);
            if (bits == 1) {
                run_length = get_bits(&gb, 3) + 3;
                bits = get_bits(&gb, 2);

                if (non_mod == 1 && bits == 1)
                    pixels_read += run_length;
                else {
                    if (map_table)
                        bits = map_table[bits];
                    while (run_length-- > 0 && pixels_read < dbuf_len) {
                        *destbuf++ = bits;
                        pixels_read++;
                    }
                }
            } else {
                bits = get_bits1(&gb);
                if (bits == 0) {
                    bits = get_bits(&gb, 2);
                    if (bits == 2) {
                        run_length = get_bits(&gb, 4) + 12;
                        bits = get_bits(&gb, 2);

                        if (non_mod == 1 && bits == 1)
                            pixels_read += run_length;
                        else {
                            if (map_table)
                                bits = map_table[bits];
                            while (run_length-- > 0 && pixels_read < dbuf_len) {
                                *destbuf++ = bits;
                                pixels_read++;
                            }
                        }
                    } else if (bits == 3) {
                        run_length = get_bits(&gb, 8) + 29;
                        bits = get_bits(&gb, 2);

                        if (non_mod == 1 && bits == 1)
                            pixels_read += run_length;
                        else {
                            if (map_table)
                                bits = map_table[bits];
                            while (run_length-- > 0 && pixels_read < dbuf_len) {
                                *destbuf++ = bits;
                                pixels_read++;
                            }
                        }
                    } else if (bits == 1) {
                        pixels_read += 2;
                        if (map_table)
                            bits = map_table[0];
                        else
                            bits = 0;
                        if (pixels_read <= dbuf_len) {
                            *destbuf++ = bits;
                            *destbuf++ = bits;
                        }
                    } else {
                        (*srcbuf) += (get_bits_count(&gb) + 7) >> 3;
                        return pixels_read;
                    }
                } else {
                    if (map_table)
                        bits = map_table[0];
                    else
                        bits = 0;
                    *destbuf++ = bits;
                    pixels_read++;
                }
            }
        }
Beispiel #9
0
static int h261_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;
    H261Context *h     = avctx->priv_data;
    MpegEncContext *s  = &h->s;
    int ret;
    AVFrame *pict = data;

    av_dlog(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size);
    av_dlog(avctx, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]);
    s->flags  = avctx->flags;
    s->flags2 = avctx->flags2;

    h->gob_start_code_skipped = 0;

retry:
    init_get_bits(&s->gb, buf, buf_size * 8);

    if (!s->context_initialized)
        // we need the IDCT permutaton for reading a custom matrix
        if (ff_MPV_common_init(s) < 0)
            return -1;

    /* We need to set current_picture_ptr before reading the header,
     * otherwise we cannot store anything in there. */
    if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
        int i = ff_find_unused_picture(s, 0);
        if (i < 0)
            return i;
        s->current_picture_ptr = &s->picture[i];
    }

    ret = h261_decode_picture_header(h);

    /* skip if the header was thrashed */
    if (ret < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
        return -1;
    }

    if (s->width != avctx->coded_width || s->height != avctx->coded_height) {
        ParseContext pc = s->parse_context; // FIXME move this demuxing hack to libavformat
        s->parse_context.buffer = 0;
        ff_MPV_common_end(s);
        s->parse_context = pc;
    }
    if (!s->context_initialized) {
        ret = ff_set_dimensions(avctx, s->width, s->height);
        if (ret < 0)
            return ret;

        goto retry;
    }

    // for skipping the frame
    s->current_picture.f.pict_type = s->pict_type;
    s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;

    if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
        (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
         avctx->skip_frame >= AVDISCARD_ALL)
        return get_consumed_bytes(s, buf_size);

    if (ff_MPV_frame_start(s, avctx) < 0)
        return -1;

    ff_mpeg_er_frame_start(s);

    /* decode each macroblock */
    s->mb_x = 0;
    s->mb_y = 0;

    while (h->gob_number < (s->mb_height == 18 ? 12 : 5)) {
        if (h261_resync(h) < 0)
            break;
        h261_decode_gob(h);
    }
    ff_MPV_frame_end(s);

    assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
    assert(s->current_picture.f.pict_type == s->pict_type);

    if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
        return ret;
    ff_print_debug_info(s, s->current_picture_ptr);

    *got_frame = 1;

    return get_consumed_bytes(s, buf_size);
}
Beispiel #10
0
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *data_size,
                        AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    ASV1Context * const a = avctx->priv_data;
    AVFrame *picture = data;
    AVFrame * const p= &a->picture;
    int mb_x, mb_y;

    if(p->data[0])
        avctx->release_buffer(avctx, p);

    p->reference= 0;
    if(avctx->get_buffer(avctx, p) < 0){
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }
    p->pict_type= AV_PICTURE_TYPE_I;
    p->key_frame= 1;

    av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size,
                          buf_size);
    if (!a->bitstream_buffer)
        return AVERROR(ENOMEM);

    if(avctx->codec_id == AV_CODEC_ID_ASV1)
        a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
    else{
        int i;
        for(i=0; i<buf_size; i++)
            a->bitstream_buffer[i]= av_reverse[ buf[i] ];
    }

    init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8);

    for(mb_y=0; mb_y<a->mb_height2; mb_y++){
        for(mb_x=0; mb_x<a->mb_width2; mb_x++){
            if( decode_mb(a, a->block) <0)
                return -1;

            idct_put(a, mb_x, mb_y);
        }
    }

    if(a->mb_width2 != a->mb_width){
        mb_x= a->mb_width2;
        for(mb_y=0; mb_y<a->mb_height2; mb_y++){
            if( decode_mb(a, a->block) <0)
                return -1;

            idct_put(a, mb_x, mb_y);
        }
    }

    if(a->mb_height2 != a->mb_height){
        mb_y= a->mb_height2;
        for(mb_x=0; mb_x<a->mb_width; mb_x++){
            if( decode_mb(a, a->block) <0)
                return -1;

            idct_put(a, mb_x, mb_y);
        }
    }

    *picture   = a->picture;
    *data_size = sizeof(AVPicture);

    emms_c();

    return (get_bits_count(&a->gb)+31)/32*4;
}
Beispiel #11
0
static int mp_decode_frame(AVCodecContext *avctx,
                                 void *data, int *data_size,
                                 AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MotionPixelsContext *mp = avctx->priv_data;
    GetBitContext gb;
    int i, count1, count2, sz;

    mp->frame.reference = 1;
    mp->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if (avctx->reget_buffer(avctx, &mp->frame)) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    /* le32 bitstream msb first */
    av_fast_malloc(&mp->bswapbuf, &mp->bswapbuf_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!mp->bswapbuf)
        return AVERROR(ENOMEM);
    mp->dsp.bswap_buf((uint32_t *)mp->bswapbuf, (const uint32_t *)buf, buf_size / 4);
    if (buf_size & 3)
        memcpy(mp->bswapbuf + (buf_size & ~3), buf + (buf_size & ~3), buf_size & 3);
    memset(mp->bswapbuf + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
    init_get_bits(&gb, mp->bswapbuf, buf_size * 8);

    memset(mp->changes_map, 0, avctx->width * avctx->height);
    for (i = !(avctx->extradata[1] & 2); i < 2; ++i) {
        count1 = get_bits(&gb, 12);
        count2 = get_bits(&gb, 12);
        mp_read_changes_map(mp, &gb, count1, 8, i);
        mp_read_changes_map(mp, &gb, count2, 4, i);
    }

    mp->codes_count = get_bits(&gb, 4);
    if (mp->codes_count == 0)
        goto end;

    if (mp->changes_map[0] == 0) {
        *(uint16_t *)mp->frame.data[0] = get_bits(&gb, 15);
        mp->changes_map[0] = 1;
    }
    mp_read_codes_table(mp, &gb);

    sz = get_bits(&gb, 18);
    if (avctx->extradata[0] != 5)
        sz += get_bits(&gb, 18);
    if (sz == 0)
        goto end;

    if (mp->max_codes_bits <= 0)
        goto end;
    if (init_vlc(&mp->vlc, mp->max_codes_bits, mp->codes_count, &mp->codes[0].size, sizeof(HuffCode), 1, &mp->codes[0].code, sizeof(HuffCode), 4, 0))
        goto end;
    mp_decode_frame_helper(mp, &gb);
    ff_free_vlc(&mp->vlc);

end:
    *data_size = sizeof(AVFrame);
    *(AVFrame *)data = mp->frame;
    return buf_size;
}
Beispiel #12
0
static int ff_h264_find_frame_end(H264Context *h, const uint8_t *buf, int buf_size)
{
    int i, j;
    uint32_t state;
    ParseContext *pc = &(h->s.parse_context);
    int next_avc= h->is_avc ? 0 : buf_size;

//    mb_addr= pc->mb_addr - 1;
    state= pc->state;
    if(state>13)
        state= 7;

    if(h->is_avc && !h->nal_length_size)
        av_log(h->s.avctx, AV_LOG_ERROR, "AVC-parser: nal length size invalid\n");

    for(i=0; i<buf_size; i++){
        if(i >= next_avc) {
            int nalsize = 0;
            i = next_avc;
            for(j = 0; j < h->nal_length_size; j++)
                nalsize = (nalsize << 8) | buf[i++];
            if(nalsize <= 0 || nalsize > buf_size - i){
                av_log(h->s.avctx, AV_LOG_ERROR, "AVC-parser: nal size %d remaining %d\n", nalsize, buf_size - i);
                return buf_size;
            }
            next_avc= i + nalsize;
            state= 5;
        }

        if(state==7){
#if HAVE_FAST_UNALIGNED
        /* we check i<buf_size instead of i+3/7 because its simpler
         * and there should be FF_INPUT_BUFFER_PADDING_SIZE bytes at the end
         */
#    if HAVE_FAST_64BIT
            while(i<next_avc && !((~*(const uint64_t*)(buf+i) & (*(const uint64_t*)(buf+i) - 0x0101010101010101ULL)) & 0x8080808080808080ULL))
                i+=8;
#    else
            while(i<next_avc && !((~*(const uint32_t*)(buf+i) & (*(const uint32_t*)(buf+i) - 0x01010101U)) & 0x80808080U))
                i+=4;
#    endif
#endif
            for(; i<next_avc; i++){
                if(!buf[i]){
                    state=2;
                    break;
                }
            }
        }else if(state<=2){
            if(buf[i]==1)   state^= 5; //2->7, 1->4, 0->5
            else if(buf[i]) state = 7;
            else            state>>=1; //2->1, 1->0, 0->0
        }else if(state<=5){
            int v= buf[i] & 0x1F;
            if(v==6 || v==7 || v==8 || v==9){
                if(pc->frame_start_found){
                    i++;
                    goto found;
                }
            }else if(v==1 || v==2 || v==5){
                state+=8;
                continue;
            }
            state= 7;
        }else{
            h->parse_history[h->parse_history_count++]= buf[i];
            if(h->parse_history_count>3){
                unsigned int mb, last_mb= h->parse_last_mb;
                GetBitContext gb;

                init_get_bits(&gb, h->parse_history, 8*h->parse_history_count);
                h->parse_history_count=0;
                mb= get_ue_golomb_long(&gb);
                last_mb= h->parse_last_mb;
                h->parse_last_mb= mb;
                if(pc->frame_start_found){
                    if(mb <= last_mb)
                        goto found;
                }else
                    pc->frame_start_found = 1;
                state= 7;
            }
        }
    }
Beispiel #13
0
static int spdif_get_offset_and_codec(AVFormatContext *s,
                                      enum IEC61937DataType data_type,
                                      const char *buf, int *offset,
                                      enum CodecID *codec)
{
    AACADTSHeaderInfo aac_hdr;
    GetBitContext gbc;

    switch (data_type & 0xff) {
    case IEC61937_AC3:
        *offset = AC3_FRAME_SIZE << 2;
        *codec = CODEC_ID_AC3;
        break;
    case IEC61937_MPEG1_LAYER1:
        *offset = spdif_mpeg_pkt_offset[1][0];
        *codec = CODEC_ID_MP1;
        break;
    case IEC61937_MPEG1_LAYER23:
        *offset = spdif_mpeg_pkt_offset[1][0];
        *codec = CODEC_ID_MP3;
        break;
    case IEC61937_MPEG2_EXT:
        *offset = 4608;
        *codec = CODEC_ID_MP3;
        break;
    case IEC61937_MPEG2_AAC:
        init_get_bits(&gbc, buf, AAC_ADTS_HEADER_SIZE * 8);
        if (avpriv_aac_parse_header(&gbc, &aac_hdr)) {
            if (s) /* be silent during a probe */
                av_log(s, AV_LOG_ERROR, "Invalid AAC packet in IEC 61937\n");
            return AVERROR_INVALIDDATA;
        }
        *offset = aac_hdr.samples << 2;
        *codec = CODEC_ID_AAC;
        break;
    case IEC61937_MPEG2_LAYER1_LSF:
        *offset = spdif_mpeg_pkt_offset[0][0];
        *codec = CODEC_ID_MP1;
        break;
    case IEC61937_MPEG2_LAYER2_LSF:
        *offset = spdif_mpeg_pkt_offset[0][1];
        *codec = CODEC_ID_MP2;
        break;
    case IEC61937_MPEG2_LAYER3_LSF:
        *offset = spdif_mpeg_pkt_offset[0][2];
        *codec = CODEC_ID_MP3;
        break;
    case IEC61937_DTS1:
        *offset = 2048;
        *codec = CODEC_ID_DTS;
        break;
    case IEC61937_DTS2:
        *offset = 4096;
        *codec = CODEC_ID_DTS;
        break;
    case IEC61937_DTS3:
        *offset = 8192;
        *codec = CODEC_ID_DTS;
        break;
    default:
        if (s) { /* be silent during a probe */
            av_log(s, AV_LOG_WARNING, "Data type 0x%04x", data_type);
            av_log_missing_feature(s, " in IEC 61937 is", 1);
        }
        return AVERROR_PATCHWELCOME;
    }
    return 0;
}
Beispiel #14
0
static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContext *sl)
{
    const int mb_x    = sl->mb_x;
    const int mb_y    = sl->mb_y;
    const int mb_xy   = sl->mb_xy;
    const int mb_type = h->cur_pic.mb_type[mb_xy];
    uint8_t *dest_y, *dest_cb, *dest_cr;
    int linesize, uvlinesize /*dct_offset*/;
    int i, j;
    const int *block_offset = &h->block_offset[0];
    const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass);
    /* is_h264 should always be true if SVQ3 is disabled. */
    const int is_h264 = !CONFIG_SVQ3_DECODER || SIMPLE || h->avctx->codec_id == AV_CODEC_ID_H264;
    void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
    const int block_h   = 16 >> h->chroma_y_shift;
    const int chroma422 = CHROMA422(h);

    dest_y  = h->cur_pic.f.data[0] + ((mb_x << PIXEL_SHIFT)     + mb_y * sl->linesize)  * 16;
    dest_cb = h->cur_pic.f.data[1] +  (mb_x << PIXEL_SHIFT) * 8 + mb_y * sl->uvlinesize * block_h;
    dest_cr = h->cur_pic.f.data[2] +  (mb_x << PIXEL_SHIFT) * 8 + mb_y * sl->uvlinesize * block_h;

    h->vdsp.prefetch(dest_y  + (sl->mb_x & 3) * 4 * sl->linesize   + (64 << PIXEL_SHIFT), sl->linesize,       4);
    h->vdsp.prefetch(dest_cb + (sl->mb_x & 7)     * sl->uvlinesize + (64 << PIXEL_SHIFT), dest_cr - dest_cb, 2);

    h->list_counts[mb_xy] = sl->list_count;

    if (!SIMPLE && MB_FIELD(sl)) {
        linesize     = sl->mb_linesize = sl->linesize * 2;
        uvlinesize   = sl->mb_uvlinesize = sl->uvlinesize * 2;
        block_offset = &h->block_offset[48];
        if (mb_y & 1) { // FIXME move out of this function?
            dest_y  -= sl->linesize * 15;
            dest_cb -= sl->uvlinesize * (block_h - 1);
            dest_cr -= sl->uvlinesize * (block_h - 1);
        }
        if (FRAME_MBAFF(h)) {
            int list;
            for (list = 0; list < sl->list_count; list++) {
                if (!USES_LIST(mb_type, list))
                    continue;
                if (IS_16X16(mb_type)) {
                    int8_t *ref = &sl->ref_cache[list][scan8[0]];
                    fill_rectangle(ref, 4, 4, 8, (16 + *ref) ^ (sl->mb_y & 1), 1);
                } else {
                    for (i = 0; i < 16; i += 4) {
                        int ref = sl->ref_cache[list][scan8[i]];
                        if (ref >= 0)
                            fill_rectangle(&sl->ref_cache[list][scan8[i]], 2, 2,
                                           8, (16 + ref) ^ (sl->mb_y & 1), 1);
                    }
                }
            }
        }
    } else {
        linesize   = sl->mb_linesize   = sl->linesize;
        uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
        // dct_offset = s->linesize * 16;
    }

    if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
        const int bit_depth = h->sps.bit_depth_luma;
        if (PIXEL_SHIFT) {
            int j;
            GetBitContext gb;
            init_get_bits(&gb, sl->intra_pcm_ptr,
                          ff_h264_mb_sizes[h->sps.chroma_format_idc] * bit_depth);

            for (i = 0; i < 16; i++) {
                uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize);
                for (j = 0; j < 16; j++)
                    tmp_y[j] = get_bits(&gb, bit_depth);
            }
            if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
                if (!h->sps.chroma_format_idc) {
                    for (i = 0; i < block_h; i++) {
                        uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize);
                        uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize);
                        for (j = 0; j < 8; j++) {
                            tmp_cb[j] = tmp_cr[j] = 1 << (bit_depth - 1);
                        }
                    }
                } else {
                    for (i = 0; i < block_h; i++) {
                        uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize);
                        for (j = 0; j < 8; j++)
                            tmp_cb[j] = get_bits(&gb, bit_depth);
                    }
                    for (i = 0; i < block_h; i++) {
                        uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize);
                        for (j = 0; j < 8; j++)
                            tmp_cr[j] = get_bits(&gb, bit_depth);
                    }
                }
            }
        } else {
            for (i = 0; i < 16; i++)
                memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16);
            if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
                if (!h->sps.chroma_format_idc) {
                    for (i = 0; i < 8; i++) {
                        memset(dest_cb + i * uvlinesize, 1 << (bit_depth - 1), 8);
                        memset(dest_cr + i * uvlinesize, 1 << (bit_depth - 1), 8);
                    }
                } else {
                    const uint8_t *src_cb = sl->intra_pcm_ptr + 256;
                    const uint8_t *src_cr = sl->intra_pcm_ptr + 256 + block_h * 8;
                    for (i = 0; i < block_h; i++) {
                        memcpy(dest_cb + i * uvlinesize, src_cb + i * 8, 8);
                        memcpy(dest_cr + i * uvlinesize, src_cr + i * 8, 8);
                    }
                }
            }
        }
    } else {
        if (IS_INTRA(mb_type)) {
            if (sl->deblocking_filter)
                xchg_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
                               uvlinesize, 1, 0, SIMPLE, PIXEL_SHIFT);

            if (SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
                h->hpc.pred8x8[sl->chroma_pred_mode](dest_cb, uvlinesize);
                h->hpc.pred8x8[sl->chroma_pred_mode](dest_cr, uvlinesize);
            }

            hl_decode_mb_predict_luma(h, sl, mb_type, is_h264, SIMPLE,
                                      transform_bypass, PIXEL_SHIFT,
                                      block_offset, linesize, dest_y, 0);

            if (sl->deblocking_filter)
                xchg_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
                               uvlinesize, 0, 0, SIMPLE, PIXEL_SHIFT);
        } else if (is_h264) {
            if (chroma422) {
                FUNC(hl_motion_422)(h, sl, dest_y, dest_cb, dest_cr,
                                    h->qpel_put, h->h264chroma.put_h264_chroma_pixels_tab,
                                    h->qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab,
                                    h->h264dsp.weight_h264_pixels_tab,
                                    h->h264dsp.biweight_h264_pixels_tab);
            } else {
                FUNC(hl_motion_420)(h, sl, dest_y, dest_cb, dest_cr,
                                    h->qpel_put, h->h264chroma.put_h264_chroma_pixels_tab,
                                    h->qpel_avg, h->h264chroma.avg_h264_chroma_pixels_tab,
                                    h->h264dsp.weight_h264_pixels_tab,
                                    h->h264dsp.biweight_h264_pixels_tab);
            }
        }

        hl_decode_mb_idct_luma(h, sl, mb_type, is_h264, SIMPLE, transform_bypass,
                               PIXEL_SHIFT, block_offset, linesize, dest_y, 0);

        if ((SIMPLE || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) &&
                (sl->cbp & 0x30)) {
            uint8_t *dest[2] = { dest_cb, dest_cr };
            if (transform_bypass) {
                if (IS_INTRA(mb_type) && h->sps.profile_idc == 244 &&
                        (sl->chroma_pred_mode == VERT_PRED8x8 ||
                         sl->chroma_pred_mode == HOR_PRED8x8)) {
                    h->hpc.pred8x8_add[sl->chroma_pred_mode](dest[0],
                            block_offset + 16,
                            sl->mb + (16 * 16 * 1 << PIXEL_SHIFT),
                            uvlinesize);
                    h->hpc.pred8x8_add[sl->chroma_pred_mode](dest[1],
                            block_offset + 32,
                            sl->mb + (16 * 16 * 2 << PIXEL_SHIFT),
                            uvlinesize);
                } else {
                    idct_add = h->h264dsp.h264_add_pixels4_clear;
                    for (j = 1; j < 3; j++) {
                        for (i = j * 16; i < j * 16 + 4; i++)
                            if (sl->non_zero_count_cache[scan8[i]] ||
                                    dctcoef_get(sl->mb, PIXEL_SHIFT, i * 16))
                                idct_add(dest[j - 1] + block_offset[i],
                                         sl->mb + (i * 16 << PIXEL_SHIFT),
                                         uvlinesize);
                        if (chroma422) {
                            for (i = j * 16 + 4; i < j * 16 + 8; i++)
                                if (sl->non_zero_count_cache[scan8[i + 4]] ||
                                        dctcoef_get(sl->mb, PIXEL_SHIFT, i * 16))
                                    idct_add(dest[j - 1] + block_offset[i + 4],
                                             sl->mb + (i * 16 << PIXEL_SHIFT),
                                             uvlinesize);
                        }
                    }
                }
            } else {
                if (is_h264) {
                    int qp[2];
                    if (chroma422) {
                        qp[0] = sl->chroma_qp[0] + 3;
                        qp[1] = sl->chroma_qp[1] + 3;
                    } else {
                        qp[0] = sl->chroma_qp[0];
                        qp[1] = sl->chroma_qp[1];
                    }
                    if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]])
                        h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 1 << PIXEL_SHIFT),
                                                               h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]);
                    if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]])
                        h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 2 << PIXEL_SHIFT),
                                                               h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]);
                    h->h264dsp.h264_idct_add8(dest, block_offset,
                                              sl->mb, uvlinesize,
                                              sl->non_zero_count_cache);
                } else if (CONFIG_SVQ3_DECODER) {
                    h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + 16 * 16 * 1,
                                                           h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][sl->chroma_qp[0]][0]);
                    h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + 16 * 16 * 2,
                                                           h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][sl->chroma_qp[1]][0]);
                    for (j = 1; j < 3; j++) {
                        for (i = j * 16; i < j * 16 + 4; i++)
                            if (sl->non_zero_count_cache[scan8[i]] || sl->mb[i * 16]) {
                                uint8_t *const ptr = dest[j - 1] + block_offset[i];
                                ff_svq3_add_idct_c(ptr, sl->mb + i * 16,
                                                   uvlinesize,
                                                   ff_h264_chroma_qp[0][sl->qscale + 12] - 12, 2);
                            }
                    }
                }
            }
        }
    }
}
Beispiel #15
0
static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MPCContext *c = avctx->priv_data;
    GetBitContext gb2, *gb = &gb2;
    int i, j, k, ch, cnt, res, t;
    Band *bands = c->bands;
    int off;
    int maxband, keyframe;
    int last[2];

    /* get output buffer */
    c->frame.nb_samples = MPC_FRAME_SIZE;
    if ((res = avctx->get_buffer(avctx, &c->frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return res;
    }

    keyframe = c->cur_frame == 0;

    if(keyframe){
        memset(c->Q, 0, sizeof(c->Q));
        c->last_bits_used = 0;
    }
    init_get_bits(gb, buf, buf_size * 8);
    skip_bits(gb, c->last_bits_used & 7);

    if(keyframe)
        maxband = mpc8_get_mod_golomb(gb, c->maxbands + 1);
    else{
        maxband = c->last_max_band + get_vlc2(gb, band_vlc.table, MPC8_BANDS_BITS, 2);
        if(maxband > 32) maxband -= 33;
    }
    if(maxband > c->maxbands + 1)
        return AVERROR_INVALIDDATA;
    c->last_max_band = maxband;

    /* read subband indexes */
    if(maxband){
        last[0] = last[1] = 0;
        for(i = maxband - 1; i >= 0; i--){
            for(ch = 0; ch < 2; ch++){
                last[ch] = get_vlc2(gb, res_vlc[last[ch] > 2].table, MPC8_RES_BITS, 2) + last[ch];
                if(last[ch] > 15) last[ch] -= 17;
                bands[i].res[ch] = last[ch];
            }
        }
        if(c->MSS){
            int mask;

            cnt = 0;
            for(i = 0; i < maxband; i++)
                if(bands[i].res[0] || bands[i].res[1])
                    cnt++;
            t = mpc8_get_mod_golomb(gb, cnt);
            mask = mpc8_get_mask(gb, cnt, t);
            for(i = maxband - 1; i >= 0; i--)
                if(bands[i].res[0] || bands[i].res[1]){
                    bands[i].msf = mask & 1;
                    mask >>= 1;
                }
        }
Beispiel #16
0
static av_cold int ffat_create_decoder(AVCodecContext *avctx, AVPacket *pkt)
{
    ATDecodeContext *at = avctx->priv_data;
    OSStatus status;
    int i;

    enum AVSampleFormat sample_fmt = (avctx->bits_per_raw_sample == 32) ?
                                     AV_SAMPLE_FMT_S32 : AV_SAMPLE_FMT_S16;

    AudioStreamBasicDescription in_format = {
        .mFormatID = ffat_get_format_id(avctx->codec_id, avctx->profile),
        .mBytesPerPacket = (avctx->codec_id == AV_CODEC_ID_ILBC) ? avctx->block_align : 0,
    };
    AudioStreamBasicDescription out_format = {
        .mFormatID = kAudioFormatLinearPCM,
        .mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked,
        .mFramesPerPacket = 1,
        .mBitsPerChannel = av_get_bytes_per_sample(sample_fmt) * 8,
    };

    avctx->sample_fmt = sample_fmt;

    if (ffat_usable_extradata(avctx)) {
        UInt32 format_size = sizeof(in_format);
        UInt32 cookie_size;
        uint8_t *cookie = ffat_get_magic_cookie(avctx, &cookie_size);
        if (!cookie)
            return AVERROR(ENOMEM);
        status = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
                                        cookie_size, cookie, &format_size, &in_format);
        if (cookie != at->extradata)
            av_free(cookie);
        if (status != 0) {
            av_log(avctx, AV_LOG_ERROR, "AudioToolbox header-parse error: %i\n", (int)status);
            return AVERROR_UNKNOWN;
        }
#if CONFIG_MP1_AT_DECODER || CONFIG_MP2_AT_DECODER || CONFIG_MP3_AT_DECODER
    } else if (pkt && pkt->size >= 4 &&
               (avctx->codec_id == AV_CODEC_ID_MP1 ||
                avctx->codec_id == AV_CODEC_ID_MP2 ||
                avctx->codec_id == AV_CODEC_ID_MP3)) {
        enum AVCodecID codec_id;
        int bit_rate;
        if (ff_mpa_decode_header(AV_RB32(pkt->data), &avctx->sample_rate,
                                 &in_format.mChannelsPerFrame, &avctx->frame_size,
                                 &bit_rate, &codec_id) < 0)
            return AVERROR_INVALIDDATA;
        avctx->bit_rate = bit_rate;
        in_format.mSampleRate = avctx->sample_rate;
#endif
#if CONFIG_AC3_AT_DECODER || CONFIG_EAC3_AT_DECODER
    } else if (pkt && pkt->size >= 7 &&
               (avctx->codec_id == AV_CODEC_ID_AC3 ||
                avctx->codec_id == AV_CODEC_ID_EAC3)) {
        AC3HeaderInfo hdr, *phdr = &hdr;
        GetBitContext gbc;
        init_get_bits(&gbc, pkt->data, pkt->size);
        if (avpriv_ac3_parse_header(&gbc, &phdr) < 0)
            return AVERROR_INVALIDDATA;
        in_format.mSampleRate = hdr.sample_rate;
        in_format.mChannelsPerFrame = hdr.channels;
        avctx->frame_size = hdr.num_blocks * 256;
        avctx->bit_rate = hdr.bit_rate;
#endif
    } else {
        in_format.mSampleRate = avctx->sample_rate ? avctx->sample_rate : 44100;
        in_format.mChannelsPerFrame = avctx->channels ? avctx->channels : 1;
    }

    avctx->sample_rate = out_format.mSampleRate = in_format.mSampleRate;
    avctx->channels = out_format.mChannelsPerFrame = in_format.mChannelsPerFrame;

    if (avctx->codec_id == AV_CODEC_ID_ADPCM_IMA_QT)
        in_format.mFramesPerPacket = 64;

    status = AudioConverterNew(&in_format, &out_format, &at->converter);

    if (status != 0) {
        av_log(avctx, AV_LOG_ERROR, "AudioToolbox init error: %i\n", (int)status);
        return AVERROR_UNKNOWN;
    }

    if ((status = ffat_set_extradata(avctx)) < 0)
        return status;

    for (i = 0; i < (sizeof(at->channel_map) / sizeof(at->channel_map[0])); i++)
        at->channel_map[i] = i;

    ffat_update_ctx(avctx);

    if(!(at->decoded_data = av_malloc(av_get_bytes_per_sample(avctx->sample_fmt)
                                      * avctx->frame_size * avctx->channels)))
        return AVERROR(ENOMEM);

    at->last_pts = AV_NOPTS_VALUE;

    return 0;
}

static av_cold int ffat_init_decoder(AVCodecContext *avctx)
{
    ATDecodeContext *at = avctx->priv_data;
    at->extradata = avctx->extradata;
    at->extradata_size = avctx->extradata_size;

    if ((avctx->channels && avctx->sample_rate) || ffat_usable_extradata(avctx))
        return ffat_create_decoder(avctx, NULL);
    else
        return 0;
}

static OSStatus ffat_decode_callback(AudioConverterRef converter, UInt32 *nb_packets,
                                     AudioBufferList *data,
                                     AudioStreamPacketDescription **packets,
                                     void *inctx)
{
    AVCodecContext *avctx = inctx;
    ATDecodeContext *at = avctx->priv_data;

    if (at->eof) {
        *nb_packets = 0;
        if (packets) {
            *packets = &at->pkt_desc;
            at->pkt_desc.mDataByteSize = 0;
        }
        return 0;
    }

    av_packet_unref(&at->in_pkt);
    av_packet_move_ref(&at->in_pkt, &at->new_in_pkt);

    if (!at->in_pkt.data) {
        *nb_packets = 0;
        return 1;
    }

    data->mNumberBuffers              = 1;
    data->mBuffers[0].mNumberChannels = 0;
    data->mBuffers[0].mDataByteSize   = at->in_pkt.size;
    data->mBuffers[0].mData           = at->in_pkt.data;
    *nb_packets = 1;

    if (packets) {
        *packets = &at->pkt_desc;
        at->pkt_desc.mDataByteSize = at->in_pkt.size;
    }

    return 0;
}
int avpriv_mpeg4audio_get_config(MPEG4AudioConfig *c, const uint8_t *buf, int buf_size)
{
    GetBitContext gb;
    int specific_config_bitindex;

    init_get_bits(&gb, buf, buf_size*8);
    c->object_type = get_object_type(&gb);
    c->sample_rate = get_sample_rate(&gb, &c->sampling_index);
    c->chan_config = get_bits(&gb, 4);
    if (c->chan_config < FF_ARRAY_ELEMS(ff_mpeg4audio_channels))
        c->channels = ff_mpeg4audio_channels[c->chan_config];
    c->sbr = -1;
    c->ps  = -1;
    if (c->object_type == AOT_SBR || (c->object_type == AOT_PS &&
        // check for W6132 Annex YYYY draft MP3onMP4
        !(show_bits(&gb, 3) & 0x03 && !(show_bits(&gb, 9) & 0x3F)))) {
        if (c->object_type == AOT_PS)
            c->ps = 1;
        c->ext_object_type = AOT_SBR;
        c->sbr = 1;
        c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index);
        c->object_type = get_object_type(&gb);
        if (c->object_type == AOT_ER_BSAC)
            c->ext_chan_config = get_bits(&gb, 4);
    } else {
        c->ext_object_type = AOT_NULL;
        c->ext_sample_rate = 0;
    }
    specific_config_bitindex = get_bits_count(&gb);

    if (c->object_type == AOT_ALS) {
        skip_bits(&gb, 5);
        if (show_bits_long(&gb, 24) != MKBETAG('\0','A','L','S'))
            skip_bits_long(&gb, 24);

        specific_config_bitindex = get_bits_count(&gb);

        if (parse_config_ALS(&gb, c))
            return -1;
    }

    if (c->ext_object_type != AOT_SBR) {
        while (get_bits_left(&gb) > 15) {
            if (show_bits(&gb, 11) == 0x2b7) { // sync extension
                get_bits(&gb, 11);
                c->ext_object_type = get_object_type(&gb);
                if (c->ext_object_type == AOT_SBR && (c->sbr = get_bits1(&gb)) == 1)
                    c->ext_sample_rate = get_sample_rate(&gb, &c->ext_sampling_index);
                if (get_bits_left(&gb) > 11 && get_bits(&gb, 11) == 0x548)
                    c->ps = get_bits1(&gb);
                break;
            } else
                get_bits1(&gb); // skip 1 bit
        }
    }

    //PS requires SBR
    if (!c->sbr)
        c->ps = 0;
    //Limit implicit PS to the HE-AACv2 Profile
    if ((c->ps == -1 && c->object_type != AOT_AAC_LC) || c->channels & ~0x01)
        c->ps = 0;

    return specific_config_bitindex;
}
Beispiel #18
0
static int mp_decode_frame(AVCodecContext *avctx,
                                 void *data, int *got_frame,
                                 AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MotionPixelsContext *mp = avctx->priv_data;
    GetBitContext gb;
    int i, count1, count2, sz, ret;

    if ((ret = ff_reget_buffer(avctx, &mp->frame)) < 0)
        return ret;

    /* le32 bitstream msb first */
    av_fast_malloc(&mp->bswapbuf, &mp->bswapbuf_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!mp->bswapbuf)
        return AVERROR(ENOMEM);
    mp->dsp.bswap_buf((uint32_t *)mp->bswapbuf, (const uint32_t *)buf, buf_size / 4);
    if (buf_size & 3)
        memcpy(mp->bswapbuf + (buf_size & ~3), buf + (buf_size & ~3), buf_size & 3);
    memset(mp->bswapbuf + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
    init_get_bits(&gb, mp->bswapbuf, buf_size * 8);

    memset(mp->changes_map, 0, avctx->width * avctx->height);
    for (i = !(avctx->extradata[1] & 2); i < 2; ++i) {
        count1 = get_bits(&gb, 12);
        count2 = get_bits(&gb, 12);
        mp_read_changes_map(mp, &gb, count1, 8, i);
        mp_read_changes_map(mp, &gb, count2, 4, i);
    }

    mp->codes_count = get_bits(&gb, 4);
    if (mp->codes_count == 0)
        goto end;

    if (mp->changes_map[0] == 0) {
        *(uint16_t *)mp->frame.data[0] = get_bits(&gb, 15);
        mp->changes_map[0] = 1;
    }
    if (mp_read_codes_table(mp, &gb) < 0)
        goto end;

    sz = get_bits(&gb, 18);
    if (avctx->extradata[0] != 5)
        sz += get_bits(&gb, 18);
    if (sz == 0)
        goto end;

    if (mp->max_codes_bits <= 0)
        goto end;
    if (init_vlc(&mp->vlc, mp->max_codes_bits, mp->codes_count, &mp->codes[0].size, sizeof(HuffCode), 1, &mp->codes[0].code, sizeof(HuffCode), 4, 0))
        goto end;
    mp_decode_frame_helper(mp, &gb);
    ff_free_vlc(&mp->vlc);

end:
    if ((ret = av_frame_ref(data, &mp->frame)) < 0)
        return ret;
    *got_frame       = 1;
    return buf_size;
}
Beispiel #19
0
static int pcx_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
                            AVPacket *avpkt) {
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    PCXContext * const s = avctx->priv_data;
    AVFrame *picture = data;
    AVFrame * const p = &s->picture;
    int compressed, xmin, ymin, xmax, ymax;
    unsigned int w, h, bits_per_pixel, bytes_per_line, nplanes, stride, y, x,
                 bytes_per_scanline;
    uint8_t *ptr;
    uint8_t const *bufstart = buf;
    uint8_t *scanline;
    int ret = -1;

    if (buf[0] != 0x0a || buf[1] > 5) {
        av_log(avctx, AV_LOG_ERROR, "this is not PCX encoded data\n");
        return -1;
    }

    compressed = buf[2];
    xmin = AV_RL16(buf+ 4);
    ymin = AV_RL16(buf+ 6);
    xmax = AV_RL16(buf+ 8);
    ymax = AV_RL16(buf+10);

    if (xmax < xmin || ymax < ymin) {
        av_log(avctx, AV_LOG_ERROR, "invalid image dimensions\n");
        return -1;
    }

    w = xmax - xmin + 1;
    h = ymax - ymin + 1;

    bits_per_pixel     = buf[3];
    bytes_per_line     = AV_RL16(buf+66);
    nplanes            = buf[65];
    bytes_per_scanline = nplanes * bytes_per_line;

    if (bytes_per_scanline < w * bits_per_pixel * nplanes / 8) {
        av_log(avctx, AV_LOG_ERROR, "PCX data is corrupted\n");
        return -1;
    }

    switch ((nplanes<<8) + bits_per_pixel) {
        case 0x0308:
            avctx->pix_fmt = PIX_FMT_RGB24;
            break;
        case 0x0108:
        case 0x0104:
        case 0x0102:
        case 0x0101:
        case 0x0401:
        case 0x0301:
        case 0x0201:
            avctx->pix_fmt = PIX_FMT_PAL8;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "invalid PCX file\n");
            return -1;
    }

    buf += 128;

    if (p->data[0])
        avctx->release_buffer(avctx, p);

    if (avcodec_check_dimensions(avctx, w, h))
        return -1;
    if (w != avctx->width || h != avctx->height)
        avcodec_set_dimensions(avctx, w, h);
    if (avctx->get_buffer(avctx, p) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }

    p->pict_type = FF_I_TYPE;

    ptr    = p->data[0];
    stride = p->linesize[0];

    scanline = av_malloc(bytes_per_scanline);
    if (!scanline)
        return AVERROR(ENOMEM);

    if (nplanes == 3 && bits_per_pixel == 8) {
        for (y=0; y<h; y++) {
            buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed);

            for (x=0; x<w; x++) {
                ptr[3*x  ] = scanline[x                    ];
                ptr[3*x+1] = scanline[x+ bytes_per_line    ];
                ptr[3*x+2] = scanline[x+(bytes_per_line<<1)];
            }

            ptr += stride;
        }

    } else if (nplanes == 1 && bits_per_pixel == 8) {
        const uint8_t *palstart = bufstart + buf_size - 769;

        for (y=0; y<h; y++, ptr+=stride) {
            buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed);
            memcpy(ptr, scanline, w);
        }

        if (buf != palstart) {
            av_log(avctx, AV_LOG_WARNING, "image data possibly corrupted\n");
            buf = palstart;
        }
        if (*buf++ != 12) {
            av_log(avctx, AV_LOG_ERROR, "expected palette after image data\n");
            goto end;
        }

    } else if (nplanes == 1) {   /* all packed formats, max. 16 colors */
        GetBitContext s;

        for (y=0; y<h; y++) {
            init_get_bits(&s, scanline, bytes_per_scanline<<3);

            buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed);

            for (x=0; x<w; x++)
                ptr[x] = get_bits(&s, bits_per_pixel);
            ptr += stride;
        }

    } else {    /* planar, 4, 8 or 16 colors */
        int i;

        for (y=0; y<h; y++) {
            buf = pcx_rle_decode(buf, scanline, bytes_per_scanline, compressed);

            for (x=0; x<w; x++) {
                int m = 0x80 >> (x&7), v = 0;
                for (i=nplanes - 1; i>=0; i--) {
                    v <<= 1;
                    v  += !!(scanline[i*bytes_per_line + (x>>3)] & m);
                }
                ptr[x] = v;
            }
            ptr += stride;
        }
    }

    if (nplanes == 1 && bits_per_pixel == 8) {
        pcx_palette(&buf, (uint32_t *) p->data[1], 256);
    } else if (bits_per_pixel < 8) {
        const uint8_t *palette = bufstart+16;
        pcx_palette(&palette, (uint32_t *) p->data[1], 16);
    }

    *picture = s->picture;
    *data_size = sizeof(AVFrame);

    ret = buf - bufstart;
end:
    av_free(scanline);
    return ret;
}
Beispiel #20
0
// the two bits[p] > 32 checks should be redundant, all calling code should
// already ensure that, but since it allows overwriting the stack it seems
// reasonable to check redundantly.
int ff_vorbis_len2vlc(uint8_t *bits, uint32_t *codes, unsigned num)
{
    uint32_t exit_at_level[33] = { 404 };

    unsigned i, j, p, code;

#ifdef DEBUG
    GetBitContext gb;
#endif

    for (p = 0; (bits[p] == 0) && (p < num); ++p)
        ;
    if (p == num) {
//        av_log(vc->avccontext, AV_LOG_INFO, "An empty codebook. Heh?! \n");
        return 0;
    }

    codes[p] = 0;
    if (bits[p] > 32)
        return 1;
    for (i = 0; i < bits[p]; ++i)
        exit_at_level[i+1] = 1 << i;

#ifdef DEBUG
    av_log(NULL, AV_LOG_INFO, " %u. of %u code len %d code %d - ", p, num, bits[p], codes[p]);
    init_get_bits(&gb, (uint8_t *)&codes[p], bits[p]);
    for (i = 0; i < bits[p]; ++i)
        av_log(NULL, AV_LOG_INFO, "%s", get_bits1(&gb) ? "1" : "0");
    av_log(NULL, AV_LOG_INFO, "\n");
#endif

    ++p;

    for (; p < num; ++p) {
        if (bits[p] > 32)
             return 1;
        if (bits[p] == 0)
             continue;
        // find corresponding exit(node which the tree can grow further from)
        for (i = bits[p]; i > 0; --i)
            if (exit_at_level[i])
                break;
        if (!i) // overspecified tree
             return 1;
        code = exit_at_level[i];
        exit_at_level[i] = 0;
        // construct code (append 0s to end) and introduce new exits
        for (j = i + 1 ;j <= bits[p]; ++j)
            exit_at_level[j] = code + (1 << (j - 1));
        codes[p] = code;

#ifdef DEBUG
        av_log(NULL, AV_LOG_INFO, " %d. code len %d code %d - ", p, bits[p], codes[p]);
        init_get_bits(&gb, (uint8_t *)&codes[p], bits[p]);
        for (i = 0; i < bits[p]; ++i)
            av_log(NULL, AV_LOG_INFO, "%s", get_bits1(&gb) ? "1" : "0");
        av_log(NULL, AV_LOG_INFO, "\n");
#endif

    }

    //no exits should be left (underspecified tree - ie. unused valid vlcs - not allowed by SPEC)
    for (p = 1; p < 33; p++)
        if (exit_at_level[p])
            return 1;

    return 0;
}
static av_cold int mpc8_decode_init(AVCodecContext * avctx)
{
	int i;
	MPCContext *c = avctx->priv_data;
	GetBitContext gb;
	static int vlc_initialized = 0;
	int channels;

	static VLC_TYPE band_table[542][2];
	static VLC_TYPE q1_table[520][2];
	static VLC_TYPE q9up_table[524][2];
	static VLC_TYPE scfi0_table[1 << MPC8_SCFI0_BITS][2];
	static VLC_TYPE scfi1_table[1 << MPC8_SCFI1_BITS][2];
	static VLC_TYPE dscf0_table[560][2];
	static VLC_TYPE dscf1_table[598][2];
	static VLC_TYPE q3_0_table[512][2];
	static VLC_TYPE q3_1_table[516][2];
	static VLC_TYPE codes_table[5708][2];

	if(avctx->extradata_size < 2)
	{
		av_log(avctx, AV_LOG_ERROR, "Too small extradata size (%i)!\n", avctx->extradata_size);
		return -1;
	}
	memset(c->oldDSCF, 0, sizeof(c->oldDSCF));
	av_lfg_init(&c->rnd, 0xDEADBEEF);
	dsputil_init(&c->dsp, avctx);

	ff_mpc_init();

	init_get_bits(&gb, avctx->extradata, 16);

	skip_bits(&gb, 3);//sample rate
	c->maxbands = get_bits(&gb, 5) + 1;
	channels = get_bits(&gb, 4) + 1;
	if (channels > 2)
	{
		av_log_missing_feature(avctx, "Multichannel MPC SV8", 1);
		return -1;
	}
	c->MSS = get_bits1(&gb);
	c->frames = 1 << (get_bits(&gb, 3) * 2);

	avctx->sample_fmt = AV_SAMPLE_FMT_S16;
	avctx->channel_layout = (avctx->channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;

	if(vlc_initialized) return 0;
	av_log(avctx, AV_LOG_DEBUG, "Initing VLC\n");

	band_vlc.table = band_table;
	band_vlc.table_allocated = 542;
	init_vlc(&band_vlc, MPC8_BANDS_BITS, MPC8_BANDS_SIZE,
	         mpc8_bands_bits,  1, 1,
	         mpc8_bands_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);

	q1_vlc.table = q1_table;
	q1_vlc.table_allocated = 520;
	init_vlc(&q1_vlc, MPC8_Q1_BITS, MPC8_Q1_SIZE,
	         mpc8_q1_bits,  1, 1,
	         mpc8_q1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
	q9up_vlc.table = q9up_table;
	q9up_vlc.table_allocated = 524;
	init_vlc(&q9up_vlc, MPC8_Q9UP_BITS, MPC8_Q9UP_SIZE,
	         mpc8_q9up_bits,  1, 1,
	         mpc8_q9up_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);

	scfi_vlc[0].table = scfi0_table;
	scfi_vlc[0].table_allocated = 1 << MPC8_SCFI0_BITS;
	init_vlc(&scfi_vlc[0], MPC8_SCFI0_BITS, MPC8_SCFI0_SIZE,
	         mpc8_scfi0_bits,  1, 1,
	         mpc8_scfi0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
	scfi_vlc[1].table = scfi1_table;
	scfi_vlc[1].table_allocated = 1 << MPC8_SCFI1_BITS;
	init_vlc(&scfi_vlc[1], MPC8_SCFI1_BITS, MPC8_SCFI1_SIZE,
	         mpc8_scfi1_bits,  1, 1,
	         mpc8_scfi1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);

	dscf_vlc[0].table = dscf0_table;
	dscf_vlc[0].table_allocated = 560;
	init_vlc(&dscf_vlc[0], MPC8_DSCF0_BITS, MPC8_DSCF0_SIZE,
	         mpc8_dscf0_bits,  1, 1,
	         mpc8_dscf0_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);
	dscf_vlc[1].table = dscf1_table;
	dscf_vlc[1].table_allocated = 598;
	init_vlc(&dscf_vlc[1], MPC8_DSCF1_BITS, MPC8_DSCF1_SIZE,
	         mpc8_dscf1_bits,  1, 1,
	         mpc8_dscf1_codes, 1, 1, INIT_VLC_USE_NEW_STATIC);

	q3_vlc[0].table = q3_0_table;
	q3_vlc[0].table_allocated = 512;
	init_vlc_sparse(&q3_vlc[0], MPC8_Q3_BITS, MPC8_Q3_SIZE,
	                mpc8_q3_bits,  1, 1,
	                mpc8_q3_codes, 1, 1,
	                mpc8_q3_syms,  1, 1, INIT_VLC_USE_NEW_STATIC);
	q3_vlc[1].table = q3_1_table;
	q3_vlc[1].table_allocated = 516;
	init_vlc_sparse(&q3_vlc[1], MPC8_Q4_BITS, MPC8_Q4_SIZE,
	                mpc8_q4_bits,  1, 1,
	                mpc8_q4_codes, 1, 1,
	                mpc8_q4_syms,  1, 1, INIT_VLC_USE_NEW_STATIC);

	for(i = 0; i < 2; i++)
	{
		res_vlc[i].table = &codes_table[vlc_offsets[0+i]];
		res_vlc[i].table_allocated = vlc_offsets[1+i] - vlc_offsets[0+i];
		init_vlc(&res_vlc[i], MPC8_RES_BITS, MPC8_RES_SIZE,
		         &mpc8_res_bits[i],  1, 1,
		         &mpc8_res_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);

		q2_vlc[i].table = &codes_table[vlc_offsets[2+i]];
		q2_vlc[i].table_allocated = vlc_offsets[3+i] - vlc_offsets[2+i];
		init_vlc(&q2_vlc[i], MPC8_Q2_BITS, MPC8_Q2_SIZE,
		         &mpc8_q2_bits[i],  1, 1,
		         &mpc8_q2_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);

		quant_vlc[0][i].table = &codes_table[vlc_offsets[4+i]];
		quant_vlc[0][i].table_allocated = vlc_offsets[5+i] - vlc_offsets[4+i];
		init_vlc(&quant_vlc[0][i], MPC8_Q5_BITS, MPC8_Q5_SIZE,
		         &mpc8_q5_bits[i],  1, 1,
		         &mpc8_q5_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
		quant_vlc[1][i].table = &codes_table[vlc_offsets[6+i]];
		quant_vlc[1][i].table_allocated = vlc_offsets[7+i] - vlc_offsets[6+i];
		init_vlc(&quant_vlc[1][i], MPC8_Q6_BITS, MPC8_Q6_SIZE,
		         &mpc8_q6_bits[i],  1, 1,
		         &mpc8_q6_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
		quant_vlc[2][i].table = &codes_table[vlc_offsets[8+i]];
		quant_vlc[2][i].table_allocated = vlc_offsets[9+i] - vlc_offsets[8+i];
		init_vlc(&quant_vlc[2][i], MPC8_Q7_BITS, MPC8_Q7_SIZE,
		         &mpc8_q7_bits[i],  1, 1,
		         &mpc8_q7_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
		quant_vlc[3][i].table = &codes_table[vlc_offsets[10+i]];
		quant_vlc[3][i].table_allocated = vlc_offsets[11+i] - vlc_offsets[10+i];
		init_vlc(&quant_vlc[3][i], MPC8_Q8_BITS, MPC8_Q8_SIZE,
		         &mpc8_q8_bits[i],  1, 1,
		         &mpc8_q8_codes[i], 1, 1, INIT_VLC_USE_NEW_STATIC);
	}
	vlc_initialized = 1;
	return 0;
}
Beispiel #22
0
/**
 * Decode a single frame
 * @param avctx decoder context
 * @param data decoded frame
 * @param data_size size of the decoded frame
 * @param buf input buffer
 * @param buf_size input buffer size
 * @return 0 success, -1 on error
 */
static int escape130_decode_frame(AVCodecContext *avctx,
                                  void *data, int *data_size,
                                  AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    Escape130Context *s = avctx->priv_data;

    GetBitContext gb;
    unsigned i;

    uint8_t *old_y, *old_cb, *old_cr,
            *new_y, *new_cb, *new_cr;
    unsigned old_y_stride, old_cb_stride, old_cr_stride,
             new_y_stride, new_cb_stride, new_cr_stride;
    unsigned total_blocks = avctx->width * avctx->height / 4,
             block_index, row_index = 0;
    unsigned y[4] = {0}, cb = 16, cr = 16;
    unsigned skip = -1;
    unsigned y_base = 0;
    uint8_t *yb= s->bases;

    AVFrame new_frame = { { 0 } };

    init_get_bits(&gb, buf, buf_size * 8);

    if (get_bits_left(&gb) < 128)
        return -1;

    // Header; no useful information in here
    skip_bits_long(&gb, 128);

    new_frame.reference = 3;
    if (avctx->get_buffer(avctx, &new_frame)) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }

    new_y = new_frame.data[0];
    new_cb = new_frame.data[1];
    new_cr = new_frame.data[2];
    new_y_stride = new_frame.linesize[0];
    new_cb_stride = new_frame.linesize[1];
    new_cr_stride = new_frame.linesize[2];
    old_y = s->frame.data[0];
    old_cb = s->frame.data[1];
    old_cr = s->frame.data[2];
    old_y_stride = s->frame.linesize[0];
    old_cb_stride = s->frame.linesize[1];
    old_cr_stride = s->frame.linesize[2];

    av_log(avctx, AV_LOG_DEBUG,
           "Strides: %i, %i\n",
           new_y_stride, new_cb_stride);

    for (block_index = 0; block_index < total_blocks; block_index++) {
        // Note that this call will make us skip the rest of the blocks
        // if the frame prematurely ends
        if (skip == -1)
            skip = decode_skip_count(&gb);

        if (skip) {
            if (old_y) {
                y[0] = old_y[0] / 4;
                y[1] = old_y[1] / 4;
                y[2] = old_y[old_y_stride] / 4;
                y[3] = old_y[old_y_stride+1] / 4;
                y_base= yb[0];
                cb = old_cb[0] / 8;
                cr = old_cr[0] / 8;
            } else {
                y_base=y[0] = y[1] = y[2] = y[3] = 0;
                cb = cr = 16;
            }
        } else {
            if (get_bits1(&gb)) {
                static const uint8_t offset_table[] = {2, 4, 10, 20};
                static const int8_t sign_table[64][4] =
                    { {0, 0, 0, 0},
                      {-1, 1, 0, 0},
                      {1, -1, 0, 0},
                      {-1, 0, 1, 0},
                      {-1, 1, 1, 0},
                      {0, -1, 1, 0},
                      {1, -1, 1, 0},
                      {-1, -1, 1, 0},
                      {1, 0, -1, 0},
                      {0, 1, -1, 0},
                      {1, 1, -1, 0},
                      {-1, 1, -1, 0},
                      {1, -1, -1, 0},
                      {-1, 0, 0, 1},
                      {-1, 1, 0, 1},
                      {0, -1, 0, 1},

                      {0, 0, 0, 0},
                      {1, -1, 0, 1},
                      {-1, -1, 0, 1},
                      {-1, 0, 1, 1},
                      {-1, 1, 1, 1},
                      {0, -1, 1, 1},
                      {1, -1, 1, 1},
                      {-1, -1, 1, 1},
                      {0, 0, -1, 1},
                      {1, 0, -1, 1},
                      {-1, 0, -1, 1},
                      {0, 1, -1, 1},
                      {1, 1, -1, 1},
                      {-1, 1, -1, 1},
                      {0, -1, -1, 1},
                      {1, -1, -1, 1},

                      {0, 0, 0, 0},
                      {-1, -1, -1, 1},
                      {1, 0, 0, -1},
                      {0, 1, 0, -1},
                      {1, 1, 0, -1},
                      {-1, 1, 0, -1},
                      {1, -1, 0, -1},
                      {0, 0, 1, -1},
                      {1, 0, 1, -1},
                      {-1, 0, 1, -1},
                      {0, 1, 1, -1},
                      {1, 1, 1, -1},
                      {-1, 1, 1, -1},
                      {0, -1, 1, -1},
                      {1, -1, 1, -1},
                      {-1, -1, 1, -1},

                      {0, 0, 0, 0},
                      {1, 0, -1, -1},
                      {0, 1, -1, -1},
                      {1, 1, -1, -1},
                      {-1, 1, -1, -1},
                      {1, -1, -1, -1} };
                unsigned sign_selector = get_bits(&gb, 6);
                unsigned difference_selector = get_bits(&gb, 2);
                y_base = 2 * get_bits(&gb, 5);
                for (i = 0; i < 4; i++) {
                    y[i] = av_clip((int)y_base + offset_table[difference_selector] *
                                            sign_table[sign_selector][i], 0, 63);
                }
            } else if (get_bits1(&gb)) {
                if (get_bits1(&gb)) {
                    y_base = get_bits(&gb, 6);
                } else {
                    unsigned adjust_index = get_bits(&gb, 3);
                    static const int8_t adjust[] = {-4, -3, -2, -1, 1, 2, 3, 4};
                    y_base = (y_base + adjust[adjust_index]) & 63;
                }
                for (i = 0; i < 4; i++)
                    y[i] = y_base;
            }

            if (get_bits1(&gb)) {
                if (get_bits1(&gb)) {
                    cb = get_bits(&gb, 5);
                    cr = get_bits(&gb, 5);
                } else {
                    unsigned adjust_index = get_bits(&gb, 3);
                    static const int8_t adjust[2][8] =
                        {  { 1, 1, 0, -1, -1, -1,  0,  1 },
                           { 0, 1, 1,  1,  0, -1, -1, -1 } };
                    cb = (cb + adjust[0][adjust_index]) & 31;
                    cr = (cr + adjust[1][adjust_index]) & 31;
                }
            }
        }
        *yb++= y_base;

        new_y[0] = y[0] * 4;
        new_y[1] = y[1] * 4;
        new_y[new_y_stride] = y[2] * 4;
        new_y[new_y_stride + 1] = y[3] * 4;
        *new_cb = cb * 8;
        *new_cr = cr * 8;

        if (old_y)
            old_y += 2, old_cb++, old_cr++;
        new_y += 2, new_cb++, new_cr++;
        row_index++;
        if (avctx->width / 2 == row_index) {
            row_index = 0;
            if (old_y) {
                old_y  += old_y_stride * 2  - avctx->width;
                old_cb += old_cb_stride - avctx->width / 2;
                old_cr += old_cr_stride - avctx->width / 2;
            }
            new_y  += new_y_stride * 2  - avctx->width;
            new_cb += new_cb_stride - avctx->width / 2;
            new_cr += new_cr_stride - avctx->width / 2;
        }

        skip--;
    }

    av_log(avctx, AV_LOG_DEBUG,
           "Escape sizes: %i, %i\n",
           buf_size, get_bits_count(&gb) / 8);

    if (s->frame.data[0])
        avctx->release_buffer(avctx, &s->frame);

    *(AVFrame*)data = s->frame = new_frame;
    *data_size = sizeof(AVFrame);

    return buf_size;
}
Beispiel #23
0
int
ff_rdt_parse_header(const uint8_t *buf, int len,
                    int *pset_id, int *pseq_no, int *pstream_id,
                    int *pis_keyframe, uint32_t *ptimestamp)
{
    GetBitContext gb;
    int consumed = 0, set_id, seq_no, stream_id, is_keyframe,
        len_included, need_reliable;
    uint32_t timestamp;

    /* skip status packets */
    while (len >= 5 && buf[1] == 0xFF /* status packet */) {
        int pkt_len;

        if (!(buf[0] & 0x80))
            return -1; /* not followed by a data packet */

        pkt_len = AV_RB16(buf+3);
        buf += pkt_len;
        len -= pkt_len;
        consumed += pkt_len;
    }
    if (len < 16)
        return -1;
    /**
     * Layout of the header (in bits):
     * 1:  len_included
     *     Flag indicating whether this header includes a length field;
     *     this can be used to concatenate multiple RDT packets in a
     *     single UDP/TCP data frame and is used to precede RDT data
     *     by stream status packets
     * 1:  need_reliable
     *     Flag indicating whether this header includes a "reliable
     *     sequence number"; these are apparently sequence numbers of
     *     data packets alone. For data packets, this flag is always
     *     set, according to the Real documentation [1]
     * 5:  set_id
     *     ID of a set of streams of identical content, possibly with
     *     different codecs or bitrates
     * 1:  is_reliable
     *     Flag set for certain streams deemed less tolerable for packet
     *     loss
     * 16: seq_no
     *     Packet sequence number; if >=0xFF00, this is a non-data packet
     *     containing stream status info, the second byte indicates the
     *     type of status packet (see wireshark docs / source code [2])
     * if (len_included) {
     *     16: packet_len
     * } else {
     *     packet_len = remainder of UDP/TCP frame
     * }
     * 1:  is_back_to_back
     *     Back-to-Back flag; used for timing, set for one in every 10
     *     packets, according to the Real documentation [1]
     * 1:  is_slow_data
     *     Slow-data flag; currently unused, according to Real docs [1]
     * 5:  stream_id
     *     ID of the stream within this particular set of streams
     * 1:  is_no_keyframe
     *     Non-keyframe flag (unset if packet belongs to a keyframe)
     * 32: timestamp (PTS)
     * if (set_id == 0x1F) {
     *     16: set_id (extended set-of-streams ID; see set_id)
     * }
     * if (need_reliable) {
     *     16: reliable_seq_no
     *         Reliable sequence number (see need_reliable)
     * }
     * if (stream_id == 0x3F) {
     *     16: stream_id (extended stream ID; see stream_id)
     * }
     * [1] https://protocol.helixcommunity.org/files/2005/devdocs/RDT_Feature_Level_20.txt
     * [2] http://www.wireshark.org/docs/dfref/r/rdt.html and
     *     http://anonsvn.wireshark.org/viewvc/trunk/epan/dissectors/packet-rdt.c
     */
    init_get_bits(&gb, buf, len << 3);
    len_included  = get_bits1(&gb);
    need_reliable = get_bits1(&gb);
    set_id        = get_bits(&gb, 5);
    skip_bits(&gb, 1);
    seq_no        = get_bits(&gb, 16);
    if (len_included)
        skip_bits(&gb, 16);
    skip_bits(&gb, 2);
    stream_id     = get_bits(&gb, 5);
    is_keyframe   = !get_bits1(&gb);
    timestamp     = get_bits_long(&gb, 32);
    if (set_id == 0x1f)
        set_id    = get_bits(&gb, 16);
    if (need_reliable)
        skip_bits(&gb, 16);
    if (stream_id == 0x1f)
        stream_id = get_bits(&gb, 16);

    if (pset_id)      *pset_id      = set_id;
    if (pseq_no)      *pseq_no      = seq_no;
    if (pstream_id)   *pstream_id   = stream_id;
    if (pis_keyframe) *pis_keyframe = is_keyframe;
    if (ptimestamp)   *ptimestamp   = timestamp;

    return consumed + (get_bits_count(&gb) >> 3);
}
Beispiel #24
0
static int h261_decode_frame(AVCodecContext *avctx,
                             void *data, int *data_size,
                             AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    H261Context *h= avctx->priv_data;
    MpegEncContext *s = &h->s;
    int ret;
    AVFrame *pict = data;

    dprintf(avctx, "*****frame %d size=%d\n", avctx->frame_number, buf_size);
    dprintf(avctx, "bytes=%x %x %x %x\n", buf[0], buf[1], buf[2], buf[3]);
    s->flags= avctx->flags;
    s->flags2= avctx->flags2;

    h->gob_start_code_skipped=0;

retry:

    init_get_bits(&s->gb, buf, buf_size*8);

    if(!s->context_initialized){
        if (MPV_common_init(s) < 0) //we need the idct permutaton for reading a custom matrix
            return -1;
    }

    //we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there
    if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
        int i= ff_find_unused_picture(s, 0);
        s->current_picture_ptr= &s->picture[i];
    }

    ret = h261_decode_picture_header(h);

    /* skip if the header was thrashed */
    if (ret < 0){
        av_log(s->avctx, AV_LOG_ERROR, "header damaged\n");
        return -1;
    }

    if (s->width != avctx->coded_width || s->height != avctx->coded_height){
        ParseContext pc= s->parse_context; //FIXME move this demuxing hack to libavformat
        s->parse_context.buffer=0;
        MPV_common_end(s);
        s->parse_context= pc;
    }
    if (!s->context_initialized) {
        avcodec_set_dimensions(avctx, s->width, s->height);

        goto retry;
    }

    // for hurry_up==5
    s->current_picture.pict_type= s->pict_type;
    s->current_picture.key_frame= s->pict_type == FF_I_TYPE;

    /* skip everything if we are in a hurry>=5 */
    if(avctx->hurry_up>=5) return get_consumed_bytes(s, buf_size);
    if(  (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==FF_B_TYPE)
       ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=FF_I_TYPE)
       || avctx->skip_frame >= AVDISCARD_ALL)
        return get_consumed_bytes(s, buf_size);

    if(MPV_frame_start(s, avctx) < 0)
        return -1;

    ff_er_frame_start(s);

    /* decode each macroblock */
    s->mb_x=0;
    s->mb_y=0;

    while(h->gob_number < (s->mb_height==18 ? 12 : 5)){
        if(ff_h261_resync(h)<0)
            break;
        h261_decode_gob(h);
    }
    MPV_frame_end(s);

assert(s->current_picture.pict_type == s->current_picture_ptr->pict_type);
assert(s->current_picture.pict_type == s->pict_type);
    *pict= *(AVFrame*)s->current_picture_ptr;
    ff_print_debug_info(s, pict);

    *data_size = sizeof(AVFrame);

    return get_consumed_bytes(s, buf_size);
}
Beispiel #25
0
/**
 * Decode Smacker audio data
 */
static int smka_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame     = data;
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    GetBitContext gb;
    HuffContext h[4] = { { 0 } };
    VLC vlc[4]       = { { 0 } };
    int16_t *samples;
    uint8_t *samples8;
    int val;
    int i, res, ret;
    int unp_size;
    int bits, stereo;
    int pred[2] = {0, 0};

    if (buf_size <= 4) {
        av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
        return AVERROR(EINVAL);
    }

    unp_size = AV_RL32(buf);

    if (unp_size > (1U<<24)) {
        av_log(avctx, AV_LOG_ERROR, "packet is too big\n");
        return AVERROR_INVALIDDATA;
    }

    init_get_bits(&gb, buf + 4, (buf_size - 4) * 8);

    if(!get_bits1(&gb)){
        av_log(avctx, AV_LOG_INFO, "Sound: no data\n");
        *got_frame_ptr = 0;
        return 1;
    }
    stereo = get_bits1(&gb);
    bits = get_bits1(&gb);
    if (stereo ^ (avctx->channels != 1)) {
        av_log(avctx, AV_LOG_ERROR, "channels mismatch\n");
        return AVERROR(EINVAL);
    }
    if (bits && avctx->sample_fmt == AV_SAMPLE_FMT_U8) {
        av_log(avctx, AV_LOG_ERROR, "sample format mismatch\n");
        return AVERROR(EINVAL);
    }

    /* get output buffer */
    frame->nb_samples = unp_size / (avctx->channels * (bits + 1));
    if ((ret = ff_get_buffer(avctx, frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    samples  = (int16_t *)frame->data[0];
    samples8 =            frame->data[0];

    // Initialize
    for(i = 0; i < (1 << (bits + stereo)); i++) {
        h[i].length = 256;
        h[i].maxlength = 0;
        h[i].current = 0;
        h[i].bits = av_mallocz(256 * 4);
        h[i].lengths = av_mallocz(256 * sizeof(int));
        h[i].values = av_mallocz(256 * sizeof(int));
        skip_bits1(&gb);
        res = smacker_decode_tree(&gb, &h[i], 0, 0);
        if (res < 0)
            return res;
        skip_bits1(&gb);
        if(h[i].current > 1) {
            res = init_vlc(&vlc[i], SMKTREE_BITS, h[i].length,
                    h[i].lengths, sizeof(int), sizeof(int),
                    h[i].bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
            if(res < 0) {
                av_log(avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
                return AVERROR_INVALIDDATA;
            }
        }
    }
    if(bits) { //decode 16-bit data
        for(i = stereo; i >= 0; i--)
            pred[i] = sign_extend(av_bswap16(get_bits(&gb, 16)), 16);
        for(i = 0; i <= stereo; i++)
            *samples++ = pred[i];
        for(; i < unp_size / 2; i++) {
            if(get_bits_left(&gb)<0)
                return AVERROR_INVALIDDATA;
            if(i & stereo) {
                if(vlc[2].table)
                    res = get_vlc2(&gb, vlc[2].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                val  = h[2].values[res];
                if(vlc[3].table)
                    res = get_vlc2(&gb, vlc[3].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                val |= h[3].values[res] << 8;
                pred[1] += sign_extend(val, 16);
                *samples++ = pred[1];
            } else {
                if(vlc[0].table)
                    res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                val  = h[0].values[res];
                if(vlc[1].table)
                    res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                val |= h[1].values[res] << 8;
                pred[0] += sign_extend(val, 16);
                *samples++ = pred[0];
            }
        }
    } else { //8-bit data
        for(i = stereo; i >= 0; i--)
            pred[i] = get_bits(&gb, 8);
        for(i = 0; i <= stereo; i++)
            *samples8++ = pred[i];
        for(; i < unp_size; i++) {
            if(get_bits_left(&gb)<0)
                return AVERROR_INVALIDDATA;
            if(i & stereo){
                if(vlc[1].table)
                    res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                pred[1] += sign_extend(h[1].values[res], 8);
                *samples8++ = av_clip_uint8(pred[1]);
            } else {
                if(vlc[0].table)
                    res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                pred[0] += sign_extend(h[0].values[res], 8);
                *samples8++ = av_clip_uint8(pred[0]);
            }
        }
    }

    for(i = 0; i < 4; i++) {
        if(vlc[i].table)
            ff_free_vlc(&vlc[i]);
        av_free(h[i].bits);
        av_free(h[i].lengths);
        av_free(h[i].values);
    }

    *got_frame_ptr = 1;

    return buf_size;
}
Beispiel #26
0
/**
 * ffdshow custom stuff (based on decode_nal_units)
 * Copyright (c) 2008-2012 Haruhiko Yamagata (except the parts copied from decode_nal_units)
 *
 * @param[out] recovery_frame_cnt. Valid only if GDR.
 * @return   0: no recovery point, 1:I-frame 2:Recovery Point SEI (GDR), 3:IDR, -1:error
 */
int avcodec_h264_search_recovery_point(AVCodecContext *avctx,
                         const uint8_t *buf, int buf_size, int *recovery_frame_cnt)
{
    H264Context *h;
    MpegEncContext *s;
    int buf_index = 0;
    int found = 0; // 0: no recovery point, 1:Recovery Point SEI (GDR), 2:IDR
    AVCodecContext *users_MpegEncContext_avctx;

    avctx = get_thread0_avctx(avctx); // Next frame will start on thread 0, and we want to store SPS and PPS in the context of thread 0.
    h = avctx->priv_data;
    h->is_avc = avcodec_h264_decode_init_is_avc(avctx);
    s = &h->s;
    users_MpegEncContext_avctx = s->avctx; // save it and write back before return.

    if (s->avctx == NULL)
        s->avctx = avctx; // Hack, this function can be used before decoding, so we can't expect everything initialized.

    h->nal_length_size = avctx->nal_length_size ? avctx->nal_length_size : 4;

    for(;;){
        int consumed;
        int dst_length;
        int bit_length;
        const uint8_t *ptr;
        int i, nalsize = 0;

        if(h->is_avc) {
            if(buf_index >= buf_size) break;
            nalsize = 0;
            for(i = 0; i < h->nal_length_size; i++)
                nalsize = (nalsize << 8) | buf[buf_index++];
            if(nalsize <= 1 || (nalsize+buf_index > buf_size)){
                if(nalsize == 1){
                    buf_index++;
                    continue;
                }else{
                    av_log(h->s.avctx, AV_LOG_ERROR, "AVC: nal size %d\n", nalsize);
                    break;
                }
            }
        } else {
            // start code prefix search
            for(; buf_index + 3 < buf_size; buf_index++){
                // This should always succeed in the first iteration.
                if(buf[buf_index] == 0 && buf[buf_index+1] == 0 && buf[buf_index+2] == 1)
                    break;
            }

            if(buf_index+3 >= buf_size) break;

            buf_index+=3;
        }

        ptr= ff_h264_decode_nal(h, buf + buf_index, &dst_length, &consumed, h->is_avc ? nalsize : buf_size - buf_index);
        if (ptr==NULL || dst_length < 0){
            found = -1;
            goto end;
        }
        while(ptr[dst_length - 1] == 0 && dst_length > 0)
            dst_length--;
        bit_length= !dst_length ? 0 : (8*dst_length - decode_rbsp_trailing(h, ptr + dst_length - 1));

        if (h->is_avc && (nalsize != consumed)){
            av_log(h->s.avctx, AV_LOG_ERROR, "AVC: Consumed only %d bytes instead of %d\n", consumed, nalsize);
            consumed= nalsize;
        }

        buf_index += consumed;


        switch(h->nal_unit_type){
        case NAL_SEI:
            if (ptr[0] == 6/* Recovery Point SEI */){
                init_get_bits(&s->gb, ptr, bit_length);
                ff_h264_decode_sei(h);
                if (found < 2)
                    found = 2;
                break;
            }
            break;
        case NAL_SPS:
            init_get_bits(&s->gb, ptr, bit_length);
            ff_h264_decode_seq_parameter_set(h);

            if(s->flags& CODEC_FLAG_LOW_DELAY)
                s->low_delay=1;

            if(avctx->has_b_frames < 2)
                avctx->has_b_frames= !s->low_delay;
            break;
        case NAL_PPS:
            init_get_bits(&s->gb, ptr, bit_length);

            ff_h264_decode_picture_parameter_set(h, bit_length);

            break;
        case NAL_AUD:
        {
            int primary_pic_type;
            init_get_bits(&s->gb, ptr, bit_length);
            primary_pic_type = get_bits(&s->gb, 3);
            if (found == 0 && (primary_pic_type == 0 || primary_pic_type == 3)) // I-frame (all I/SI slices)
                found = 1;
            break;
        }
        case NAL_IDR_SLICE:
        case NAL_SLICE:
        case NAL_DPA:
            // decode part of slice header and find I frame
            init_get_bits(&s->gb, ptr, bit_length);
            int first_mb_in_slice = get_ue_golomb(&s->gb);
            unsigned int slice_type= get_ue_golomb(&s->gb);
            if (!is_I_slice(slice_type))
                goto end;

            // at least one slice must start at the left top corner.
            // ASO should work, while FMO may not. FMO is not supported by current Libav anyway.
            if (first_mb_in_slice == 0) {
                if (h->nal_unit_type == NAL_IDR_SLICE) {
                    found = 3;
                    goto end;
                } else {
                    if (found == 0) {
                        found = 1;
                        goto end;
                    }
                }
            }
            break;
        case NAL_DPB:
        case NAL_DPC:
        case NAL_END_SEQUENCE:
        case NAL_END_STREAM:
        case NAL_FILLER_DATA:
        case NAL_SPS_EXT:
        case NAL_AUXILIARY_SLICE:
            break;
        default:
            av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n", h->nal_unit_type, bit_length);
        }

    }
    *recovery_frame_cnt = h->sei_recovery_frame_cnt;
end:
    s->avctx = users_MpegEncContext_avctx;
    return found;
}
Beispiel #27
0
static int flac_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    FLACContext *s = avctx->priv_data;
    int bytes_read = 0;
    int ret;

    *got_frame_ptr = 0;

    if (s->max_framesize == 0) {
        s->max_framesize =
            ff_flac_get_max_frame_size(s->max_blocksize ? s->max_blocksize : FLAC_MAX_BLOCKSIZE,
                                       FLAC_MAX_CHANNELS, 32);
    }

    /* check that there is at least the smallest decodable amount of data.
       this amount corresponds to the smallest valid FLAC frame possible.
       FF F8 69 02 00 00 9A 00 00 34 46 */
    if (buf_size < FLAC_MIN_FRAME_SIZE)
        return buf_size;

    /* check for inline header */
    if (AV_RB32(buf) == MKBETAG('f','L','a','C')) {
        if (!s->got_streaminfo && parse_streaminfo(s, buf, buf_size)) {
            av_log(s->avctx, AV_LOG_ERROR, "invalid header\n");
            return -1;
        }
        return get_metadata_size(buf, buf_size);
    }

    /* decode frame */
    init_get_bits(&s->gb, buf, buf_size*8);
    if (decode_frame(s) < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "decode_frame() failed\n");
        return -1;
    }
    bytes_read = (get_bits_count(&s->gb)+7)/8;

    /* get output buffer */
    s->frame.nb_samples = s->blocksize;
    if ((ret = avctx->get_buffer(avctx, &s->frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    s->dsp.decorrelate[s->ch_mode](s->frame.data, s->decoded, s->channels,
                                   s->blocksize, s->sample_shift);

    if (bytes_read > buf_size) {
        av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", bytes_read - buf_size);
        return -1;
    }
    if (bytes_read < buf_size) {
        av_log(s->avctx, AV_LOG_DEBUG, "underread: %d orig size: %d\n",
               buf_size - bytes_read, buf_size);
    }

    *got_frame_ptr   = 1;
    *(AVFrame *)data = s->frame;

    return bytes_read;
}
Beispiel #28
0
static int rv10_decode_packet(AVCodecContext *avctx,
                             const uint8_t *buf, int buf_size, int buf_size2)
{
    MpegEncContext *s = avctx->priv_data;
    int mb_count, mb_pos, left, start_mb_x;

    init_get_bits(&s->gb, buf, buf_size*8);
    if(s->codec_id ==CODEC_ID_RV10)
        mb_count = rv10_decode_picture_header(s);
    else
        mb_count = rv20_decode_picture_header(s);
    if (mb_count < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "HEADER ERROR\n");
        return -1;
    }

    if (s->mb_x >= s->mb_width ||
        s->mb_y >= s->mb_height) {
        av_log(s->avctx, AV_LOG_ERROR, "POS ERROR %d %d\n", s->mb_x, s->mb_y);
        return -1;
    }
    mb_pos = s->mb_y * s->mb_width + s->mb_x;
    left = s->mb_width * s->mb_height - mb_pos;
    if (mb_count > left) {
        av_log(s->avctx, AV_LOG_ERROR, "COUNT ERROR\n");
        return -1;
    }

    if ((s->mb_x == 0 && s->mb_y == 0) || s->current_picture_ptr==NULL) {
        if(s->current_picture_ptr){ //FIXME write parser so we always have complete frames?
            ff_er_frame_end(s);
            MPV_frame_end(s);
            s->mb_x= s->mb_y = s->resync_mb_x = s->resync_mb_y= 0;
        }
        if(MPV_frame_start(s, avctx) < 0)
            return -1;
        ff_er_frame_start(s);
    }

    av_dlog(avctx, "qscale=%d\n", s->qscale);

    /* default quantization values */
    if(s->codec_id== CODEC_ID_RV10){
        if(s->mb_y==0) s->first_slice_line=1;
    }else{
        s->first_slice_line=1;
        s->resync_mb_x= s->mb_x;
    }
    start_mb_x= s->mb_x;
    s->resync_mb_y= s->mb_y;
    if(s->h263_aic){
        s->y_dc_scale_table=
        s->c_dc_scale_table= ff_aic_dc_scale_table;
    }else{
        s->y_dc_scale_table=
        s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
    }

    if(s->modified_quant)
        s->chroma_qscale_table= ff_h263_chroma_qscale_table;

    ff_set_qscale(s, s->qscale);

    s->rv10_first_dc_coded[0] = 0;
    s->rv10_first_dc_coded[1] = 0;
    s->rv10_first_dc_coded[2] = 0;
    s->block_wrap[0]=
    s->block_wrap[1]=
    s->block_wrap[2]=
    s->block_wrap[3]= s->b8_stride;
    s->block_wrap[4]=
    s->block_wrap[5]= s->mb_stride;
    ff_init_block_index(s);
    /* decode each macroblock */

    for(s->mb_num_left= mb_count; s->mb_num_left>0; s->mb_num_left--) {
        int ret;
        ff_update_block_index(s);
        av_dlog(avctx, "**mb x=%d y=%d\n", s->mb_x, s->mb_y);

        s->mv_dir = MV_DIR_FORWARD;
        s->mv_type = MV_TYPE_16X16;
        ret=ff_h263_decode_mb(s, s->block);

        if (ret != SLICE_ERROR && s->gb.size_in_bits < get_bits_count(&s->gb) && 8*buf_size2 >= get_bits_count(&s->gb)){
            av_log(avctx, AV_LOG_DEBUG, "update size from %d to %d\n", s->gb.size_in_bits, 8*buf_size2);
            s->gb.size_in_bits= 8*buf_size2;
            ret= SLICE_OK;
        }

        if (ret == SLICE_ERROR || s->gb.size_in_bits < get_bits_count(&s->gb)) {
            av_log(s->avctx, AV_LOG_ERROR, "ERROR at MB %d %d\n", s->mb_x, s->mb_y);
            return -1;
        }
        if(s->pict_type != FF_B_TYPE)
            ff_h263_update_motion_val(s);
        MPV_decode_mb(s, s->block);
        if(s->loop_filter)
            ff_h263_loop_filter(s);

        if (++s->mb_x == s->mb_width) {
            s->mb_x = 0;
            s->mb_y++;
            ff_init_block_index(s);
        }
        if(s->mb_x == s->resync_mb_x)
            s->first_slice_line=0;
        if(ret == SLICE_END) break;
    }

    ff_er_add_slice(s, start_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, AC_END|DC_END|MV_END);

    return s->gb.size_in_bits;
}
Beispiel #29
0
static int sp5x_decode_frame(AVCodecContext *avctx,
                              void *data, int *data_size,
                              uint8_t *buf, int buf_size)
{
#if 0
    MJpegDecodeContext *s = avctx->priv_data;
#endif
    const int qscale = 5;
    uint8_t *buf_ptr, *buf_end, *recoded;
    int i = 0, j = 0;

    if (!avctx->width || !avctx->height)
        return -1;

    buf_ptr = buf;
    buf_end = buf + buf_size;

#if 1
    recoded = av_mallocz(buf_size + 1024);
    if (!recoded)
        return -1;

    /* SOI */
    recoded[j++] = 0xFF;
    recoded[j++] = 0xD8;

    memcpy(recoded+j, &sp5x_data_dqt[0], sizeof(sp5x_data_dqt));
    memcpy(recoded+j+5, &sp5x_quant_table[qscale * 2], 64);
    memcpy(recoded+j+70, &sp5x_quant_table[(qscale * 2) + 1], 64);
    j += sizeof(sp5x_data_dqt);

    memcpy(recoded+j, &sp5x_data_dht[0], sizeof(sp5x_data_dht));
    j += sizeof(sp5x_data_dht);

    memcpy(recoded+j, &sp5x_data_sof[0], sizeof(sp5x_data_sof));
    AV_WB16(recoded+j+5, avctx->coded_height);
    AV_WB16(recoded+j+7, avctx->coded_width);
    j += sizeof(sp5x_data_sof);

    memcpy(recoded+j, &sp5x_data_sos[0], sizeof(sp5x_data_sos));
    j += sizeof(sp5x_data_sos);

    for (i = 14; i < buf_size && j < buf_size+1024-2; i++)
    {
        recoded[j++] = buf[i];
        if (buf[i] == 0xff)
            recoded[j++] = 0;
    }

    /* EOI */
    recoded[j++] = 0xFF;
    recoded[j++] = 0xD9;

    i = ff_mjpeg_decode_frame(avctx, data, data_size, recoded, j);

    av_free(recoded);

#else
    /* SOF */
    s->bits = 8;
    s->width  = avctx->coded_width;
    s->height = avctx->coded_height;
    s->nb_components = 3;
    s->component_id[0] = 0;
    s->h_count[0] = 2;
    s->v_count[0] = 2;
    s->quant_index[0] = 0;
    s->component_id[1] = 1;
    s->h_count[1] = 1;
    s->v_count[1] = 1;
    s->quant_index[1] = 1;
    s->component_id[2] = 2;
    s->h_count[2] = 1;
    s->v_count[2] = 1;
    s->quant_index[2] = 1;
    s->h_max = 2;
    s->v_max = 2;

    s->qscale_table = av_mallocz((s->width+15)/16);
    avctx->pix_fmt = s->cs_itu601 ? PIX_FMT_YUV420P : PIX_FMT_YUVJ420;
    s->interlaced = 0;

    s->picture.reference = 0;
    if (avctx->get_buffer(avctx, &s->picture) < 0)
    {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }

    s->picture.pict_type = I_TYPE;
    s->picture.key_frame = 1;

    for (i = 0; i < 3; i++)
        s->linesize[i] = s->picture.linesize[i] << s->interlaced;

    /* DQT */
    for (i = 0; i < 64; i++)
    {
        j = s->scantable.permutated[i];
        s->quant_matrixes[0][j] = sp5x_quant_table[(qscale * 2) + i];
    }
    s->qscale[0] = FFMAX(
        s->quant_matrixes[0][s->scantable.permutated[1]],
        s->quant_matrixes[0][s->scantable.permutated[8]]) >> 1;

    for (i = 0; i < 64; i++)
    {
        j = s->scantable.permutated[i];
        s->quant_matrixes[1][j] = sp5x_quant_table[(qscale * 2) + 1 + i];
    }
    s->qscale[1] = FFMAX(
        s->quant_matrixes[1][s->scantable.permutated[1]],
        s->quant_matrixes[1][s->scantable.permutated[8]]) >> 1;

    /* DHT */

    /* SOS */
    s->comp_index[0] = 0;
    s->nb_blocks[0] = s->h_count[0] * s->v_count[0];
    s->h_scount[0] = s->h_count[0];
    s->v_scount[0] = s->v_count[0];
    s->dc_index[0] = 0;
    s->ac_index[0] = 0;

    s->comp_index[1] = 1;
    s->nb_blocks[1] = s->h_count[1] * s->v_count[1];
    s->h_scount[1] = s->h_count[1];
    s->v_scount[1] = s->v_count[1];
    s->dc_index[1] = 1;
    s->ac_index[1] = 1;

    s->comp_index[2] = 2;
    s->nb_blocks[2] = s->h_count[2] * s->v_count[2];
    s->h_scount[2] = s->h_count[2];
    s->v_scount[2] = s->v_count[2];
    s->dc_index[2] = 1;
    s->ac_index[2] = 1;

    for (i = 0; i < 3; i++)
        s->last_dc[i] = 1024;

    s->mb_width = (s->width * s->h_max * 8 -1) / (s->h_max * 8);
    s->mb_height = (s->height * s->v_max * 8 -1) / (s->v_max * 8);

    init_get_bits(&s->gb, buf+14, (buf_size-14)*8);

    return mjpeg_decode_scan(s);
#endif

    return i;
}
Beispiel #30
0
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                        AVPacket *avpkt)
{
    ASV1Context *const a = avctx->priv_data;
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;
    AVFrame *const p = data;
    int mb_x, mb_y, ret;

    if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
        return ret;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size,
                          buf_size);
    if (!a->bitstream_buffer)
        return AVERROR(ENOMEM);

    if (avctx->codec_id == AV_CODEC_ID_ASV1) {
        a->bbdsp.bswap_buf((uint32_t *) a->bitstream_buffer,
                           (const uint32_t *) buf, buf_size / 4);
    } else {
        int i;
        for (i = 0; i < buf_size; i++)
            a->bitstream_buffer[i] = ff_reverse[buf[i]];
    }

    init_get_bits(&a->gb, a->bitstream_buffer, buf_size * 8);

    for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
        for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
            if ((ret = decode_mb(a, a->block)) < 0)
                return ret;

            idct_put(a, p, mb_x, mb_y);
        }
    }

    if (a->mb_width2 != a->mb_width) {
        mb_x = a->mb_width2;
        for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
            if ((ret = decode_mb(a, a->block)) < 0)
                return ret;

            idct_put(a, p, mb_x, mb_y);
        }
    }

    if (a->mb_height2 != a->mb_height) {
        mb_y = a->mb_height2;
        for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
            if ((ret = decode_mb(a, a->block)) < 0)
                return ret;

            idct_put(a, p, mb_x, mb_y);
        }
    }

    *got_frame = 1;

    emms_c();

    return (get_bits_count(&a->gb) + 31) / 32 * 4;
}