static int codec_reinit(AVCodecContext *avctx, int width, int height, int quality) {
    NuvContext *c = (NuvContext *)avctx->priv_data;
    width  = FFALIGN(width,  2);
    height = FFALIGN(height, 2);
    if (quality >= 0)
        get_quant_quality(c, quality);
    if (width != c->width || height != c->height) {
        // also reserve space for a possible additional header
        int buf_size = 24 + height * width * 3 / 2 + AV_LZO_OUTPUT_PADDING;
        if (av_image_check_size(height, width, 0, avctx) < 0 ||
            buf_size > INT_MAX/8)
            return -1;
        avctx->width = c->width = width;
        avctx->height = c->height = height;
        av_fast_malloc(&c->decomp_buf, &c->decomp_size, buf_size);
        if (!c->decomp_buf) {
            av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
            return AVERROR(ENOMEM);
        }
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
        return 1;
    } else if (quality != c->quality)
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
    return 0;
}
Exemple #2
0
/*
* ZSL
* 虽然名字叫 decode,其实并不解码,只是从 is->sampq 里拿出 af(audioframe,Frame类型)
* 把 af->frame(AVFrame类型)里的 data 经过 swr_convert() 之后,存入 is->audio_buf
* 返回存入的大小(即 resample 之后的大小)
*
*/
int audio_decode_frame(VideoState *is) {
	int resampled_data_size,out_size;
	Frame *af;
	af = frame_queue_peek_readable(&is->sampq);
	frame_queue_next(&is->sampq);
	if (!is->swr_ctx) {
		is->swr_ctx = swr_alloc_set_opts(NULL,
			AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, is->audio_ctx->sample_rate,
			av_get_default_channel_layout(is->audio_ctx->channels), is->audio_ctx->sample_fmt, is->audio_ctx->sample_rate,
			0, NULL);
		swr_init(is->swr_ctx);
	}
	const uint8_t **in = (const uint8_t **)af->frame->extended_data;
	uint8_t **out = &is->audio_buf;
	//out_size = av_samples_get_buffer_size(NULL, 2, af->frame->nb_samples, AV_SAMPLE_FMT_S16, 1);
	out_size = 2 * 1152 * 2;
	if (out_size < 0) { /*比如 af->frame->nb_samples==0 的时候,这必须要处理一下,不然一会儿 av_fast_malloc() 就出问题了 */
		av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
		return -1;
	}
	int len2;
	av_fast_malloc(&is->audio_buf, &is->audio_buf_size, out_size);
	len2 = swr_convert(is->swr_ctx, out, af->frame->nb_samples, in, af->frame->nb_samples);
	resampled_data_size = len2 * 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
	return resampled_data_size;
}
Exemple #3
0
static int pmp_packet(AVFormatContext *s, AVPacket *pkt) {
    PMPContext *pmp = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret = 0;
    int i;

    if (url_feof(pb))
        return AVERROR_EOF;
    if (pmp->cur_stream == 0) {
        int num_packets;
        pmp->audio_packets = avio_r8(pb);
        num_packets = (pmp->num_streams - 1) * pmp->audio_packets + 1;
        avio_skip(pb, 8);
        pmp->current_packet = 0;
        av_fast_malloc(&pmp->packet_sizes,
                       &pmp->packet_sizes_alloc,
                       num_packets * sizeof(*pmp->packet_sizes));
        for (i = 0; i < num_packets; i++)
            pmp->packet_sizes[i] = avio_rl32(pb);
    }
    ret = av_get_packet(pb, pkt, pmp->packet_sizes[pmp->current_packet]);
    if (ret >= 0) {
        ret = 0;
        // FIXME: this is a hack that should be remove once
        // compute_pkt_fields can handle
        if (pmp->cur_stream == 0)
            pkt->dts = s->streams[0]->cur_dts++;
        pkt->stream_index = pmp->cur_stream;
    }
    if (pmp->current_packet % pmp->audio_packets == 0)
        pmp->cur_stream = (pmp->cur_stream + 1) % pmp->num_streams;
    pmp->current_packet++;
    return ret;
}
static int mp_decode_frame(AVCodecContext *avctx,
                           void *data, int *data_size,
                           AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MotionPixelsContext *mp = avctx->priv_data;
    GetBitContext gb;
    int i, count1, count2, sz;

    mp->frame.reference = 1;
    mp->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if (avctx->reget_buffer(avctx, &mp->frame)) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    /* le32 bitstream msb first */
    av_fast_malloc(&mp->bswapbuf, &mp->bswapbuf_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!mp->bswapbuf)
        return AVERROR(ENOMEM);
    mp->dsp.bswap_buf((uint32_t *)mp->bswapbuf, (const uint32_t *)buf, buf_size / 4);
    if (buf_size & 3)
        memcpy(mp->bswapbuf + (buf_size & ~3), buf + (buf_size & ~3), buf_size & 3);
    init_get_bits(&gb, mp->bswapbuf, buf_size * 8);

    memset(mp->changes_map, 0, avctx->width * avctx->height);
    for (i = !(avctx->extradata[1] & 2); i < 2; ++i) {
        count1 = get_bits(&gb, 12);
        count2 = get_bits(&gb, 12);
        mp_read_changes_map(mp, &gb, count1, 8, i);
        mp_read_changes_map(mp, &gb, count2, 4, i);
    }

    mp->codes_count = get_bits(&gb, 4);
    if (mp->codes_count == 0)
        goto end;

    if (mp->changes_map[0] == 0) {
        *(uint16_t *)mp->frame.data[0] = get_bits(&gb, 15);
        mp->changes_map[0] = 1;
    }
    mp_read_codes_table(mp, &gb);

    sz = get_bits(&gb, 18);
    if (avctx->extradata[0] != 5)
        sz += get_bits(&gb, 18);
    if (sz == 0)
        goto end;

    init_vlc(&mp->vlc, mp->max_codes_bits, mp->codes_count, &mp->codes[0].size, sizeof(HuffCode), 1, &mp->codes[0].code, sizeof(HuffCode), 4, 0);
    mp_decode_frame_helper(mp, &gb);
    free_vlc(&mp->vlc);

end:
    *data_size = sizeof(AVFrame);
    *(AVFrame *)data = mp->frame;
    return buf_size;
}
Exemple #5
0
static int gif_read_header1(GifState *s)
{
    uint8_t sig[6];
    int v, n;
    int background_color_index;

    if (bytestream2_get_bytes_left(&s->gb) < 13)
        return AVERROR_INVALIDDATA;

    /* read gif signature */
    bytestream2_get_bufferu(&s->gb, sig, 6);
    if (memcmp(sig, gif87a_sig, 6) != 0 &&
        memcmp(sig, gif89a_sig, 6) != 0)
        return AVERROR_INVALIDDATA;

    /* read screen header */
    s->transparent_color_index = -1;
    s->screen_width = bytestream2_get_le16u(&s->gb);
    s->screen_height = bytestream2_get_le16u(&s->gb);
    if(   (unsigned)s->screen_width  > 32767
       || (unsigned)s->screen_height > 32767){
        av_log(s->avctx, AV_LOG_ERROR, "picture size too large\n");
        return AVERROR_INVALIDDATA;
    }

    av_fast_malloc(&s->idx_line, &s->idx_line_size, s->screen_width);
    if (!s->idx_line)
        return AVERROR(ENOMEM);

    v = bytestream2_get_byteu(&s->gb);
    s->color_resolution = ((v & 0x70) >> 4) + 1;
    s->has_global_palette = (v & 0x80);
    s->bits_per_pixel = (v & 0x07) + 1;
    background_color_index = bytestream2_get_byteu(&s->gb);
    n = bytestream2_get_byteu(&s->gb);
    if (n) {
        s->avctx->sample_aspect_ratio.num = n + 15;
        s->avctx->sample_aspect_ratio.den = 64;
    }

    av_dlog(s->avctx, "screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
           s->screen_width, s->screen_height, s->bits_per_pixel,
           s->has_global_palette);

    if (s->has_global_palette) {
        s->background_color_index = background_color_index;
        n = 1 << s->bits_per_pixel;
        if (bytestream2_get_bytes_left(&s->gb) < n * 3)
            return AVERROR_INVALIDDATA;

        gif_read_palette(s, s->global_palette, n);
        s->bg_color = s->global_palette[s->background_color_index];
    } else
        s->background_color_index = -1;

    return 0;
}
Exemple #6
0
/**
 * Parses the picture segment packet.
 *
 * The picture segment contains details on the sequence id,
 * width, height and Run Length Encoded (RLE) bitmap data.
 *
 * @param avctx contains the current codec context
 * @param buf pointer to the packet to process
 * @param buf_size size of packet to process
 * @todo TODO: Enable support for RLE data over multiple packets
 */
static int parse_picture_segment(AVCodecContext *avctx,
                                  const uint8_t *buf, int buf_size)
{
    PGSSubContext *ctx = avctx->priv_data;

    uint8_t sequence_desc;
    unsigned int rle_bitmap_len, width, height;

    /* skip 3 unknown bytes: Object ID (2 bytes), Version Number */
    buf += 3;

    /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */
    sequence_desc = bytestream_get_byte(&buf);

    if (!(sequence_desc & 0x80)) {
        av_log(avctx, AV_LOG_ERROR, "Decoder does not support object data over multiple packets.\n");
        return -1;
    }

    /* Decode rle bitmap length */
    rle_bitmap_len = bytestream_get_be24(&buf);

    /* Check to ensure we have enough data for rle_bitmap_length if just a single packet */
    if (rle_bitmap_len > buf_size - 7) {
        av_log(avctx, AV_LOG_ERROR, "Not enough RLE data for specified length of %d.\n", rle_bitmap_len);
        return -1;
    }

    ctx->picture.rle_data_len = rle_bitmap_len;

    /* Get bitmap dimensions from data */
    width  = bytestream_get_be16(&buf);
    height = bytestream_get_be16(&buf);

    /* Make sure the bitmap is not too large */
    if (ctx->presentation.video_w < width || ctx->presentation.video_h < height) {
        av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger then video.\n");
        return -1;
    }

    ctx->picture.w = width;
    ctx->picture.h = height;

    av_fast_malloc(&ctx->picture.rle, &ctx->picture.rle_buffer_size, rle_bitmap_len);

    if (!ctx->picture.rle)
        return -1;

    memcpy(ctx->picture.rle, buf, rle_bitmap_len);

    return 0;
}
Exemple #7
0
// Filter data through filter
static af_data_t* play(struct af_instance_s* af, af_data_t* data)
{
  af_resample_t *s = af->setup;
  int ret;
  int8_t *in = (int8_t*)data->audio;
  int8_t *out;
  int chans   = data->nch;
  int in_len  = data->len;
  int out_len = in_len * af->mul + 10;

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
      return NULL;

  av_fast_malloc(&s->tmp[0], &s->tmp_alloc, FFALIGN(out_len,32));
  if(s->tmp[0] == NULL) return NULL;

  out= (int8_t*)af->data->audio;

  out_len= FFMIN(out_len, af->data->len);

  av_fast_malloc(&s->in[0], &s->in_alloc, FFALIGN(in_len,32));
  if(s->in[0] == NULL) return NULL;

  memcpy(s->in[0], in, in_len);

  ret = swr_convert(s->swrctx, &s->tmp[0], out_len/chans/2, &s->in[0], in_len/chans/2);
  if (ret < 0) return NULL;
  out_len= ret*chans*2;

  memcpy(out, s->tmp[0], out_len);

  data->audio = af->data->audio;
  data->len   = out_len;
  data->rate  = af->data->rate;
  return data;
}
Exemple #8
0
static int allocate_buffers(FLACContext *s)
{
    int buf_size;

    buf_size = av_samples_get_buffer_size(NULL, s->channels, s->max_blocksize,
                                          AV_SAMPLE_FMT_S32P, 0);
    if (buf_size < 0)
        return buf_size;

    av_fast_malloc(&s->decoded_buffer, &s->decoded_buffer_size, buf_size);
    if (!s->decoded_buffer)
        return AVERROR(ENOMEM);

    return av_samples_fill_arrays((uint8_t **)s->decoded, NULL,
                                  s->decoded_buffer, s->channels,
                                  s->max_blocksize, AV_SAMPLE_FMT_S32P, 0);
}
Exemple #9
0
static int pmp_packet(AVFormatContext *s, AVPacket *pkt)
{
    PMPContext *pmp = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret = 0;
    int i;

    if (url_feof(pb))
        return AVERROR_EOF;
    if (pmp->cur_stream == 0) {
        int num_packets;
        pmp->audio_packets = avio_r8(pb);
        if (!pmp->audio_packets) {
            avpriv_request_sample(s, "0 audio packets");
            return AVERROR_PATCHWELCOME;
        }
        num_packets = (pmp->num_streams - 1) * pmp->audio_packets + 1;
        avio_skip(pb, 8);
        pmp->current_packet = 0;
        av_fast_malloc(&pmp->packet_sizes,
                       &pmp->packet_sizes_alloc,
                       num_packets * sizeof(*pmp->packet_sizes));
        if (!pmp->packet_sizes_alloc) {
            av_log(s, AV_LOG_ERROR, "Cannot (re)allocate packet buffer\n");
            return AVERROR(ENOMEM);
        }
        for (i = 0; i < num_packets; i++)
            pmp->packet_sizes[i] = avio_rl32(pb);
    }
    ret = av_get_packet(pb, pkt, pmp->packet_sizes[pmp->current_packet]);
    if (ret >= 0) {
        ret = 0;
        // FIXME: this is a hack that should be removed once
        // compute_pkt_fields() can handle timestamps properly
        if (pmp->cur_stream == 0)
            pkt->dts = s->streams[0]->cur_dts++;
        pkt->stream_index = pmp->cur_stream;
    }
    if (pmp->current_packet % pmp->audio_packets == 0)
        pmp->cur_stream = (pmp->cur_stream + 1) % pmp->num_streams;
    pmp->current_packet++;
    return ret;
}
static av_cold int truemotion1_decode_init(AVCodecContext *avctx)
{
    TrueMotion1Context *s = avctx->priv_data;

    s->avctx = avctx;

    // FIXME: it may change ?
//    if (avctx->bits_per_sample == 24)
//        avctx->pix_fmt = PIX_FMT_RGB24;
//    else
//        avctx->pix_fmt = PIX_FMT_RGB555;

    s->frame.data[0] = NULL;

    /* there is a vertical predictor for each pixel in a line; each vertical
     * predictor is 0 to start with */
    av_fast_malloc(&s->vert_pred, &s->vert_pred_size, s->avctx->width * sizeof(unsigned int));

    return 0;
}
Exemple #11
0
static int pmp_packet(AVFormatContext *s, AVPacket *pkt)
{
    PMPContext *pmp = s->priv_data;
    AVIOContext *pb = s->pb;
    int ret = 0;
    int i;

    if (avio_feof(pb))
        return AVERROR_EOF;
    if (pmp->cur_stream == 0) {
        int num_packets;
        pmp->audio_packets = avio_r8(pb);

        if (!pmp->audio_packets) {
            av_log(s, AV_LOG_ERROR, "No audio packets.\n");
            return AVERROR_INVALIDDATA;
        }

        num_packets = (pmp->num_streams - 1) * pmp->audio_packets + 1;
        avio_skip(pb, 8);
        pmp->current_packet = 0;
        av_fast_malloc(&pmp->packet_sizes,
                       &pmp->packet_sizes_alloc,
                       num_packets * sizeof(*pmp->packet_sizes));
        if (!pmp->packet_sizes_alloc) {
            av_log(s, AV_LOG_ERROR, "Cannot (re)allocate packet buffer\n");
            return AVERROR(ENOMEM);
        }
        for (i = 0; i < num_packets; i++)
            pmp->packet_sizes[i] = avio_rl32(pb);
    }
    ret = av_get_packet(pb, pkt, pmp->packet_sizes[pmp->current_packet]);
    if (ret >= 0) {
        ret = 0;
        pkt->stream_index = pmp->cur_stream;
    }
    if (pmp->current_packet % pmp->audio_packets == 0)
        pmp->cur_stream = (pmp->cur_stream + 1) % pmp->num_streams;
    pmp->current_packet++;
    return ret;
}
Exemple #12
0
static int codec_reinit(AVCodecContext *avctx, int width, int height, int quality) {
    NuvContext *c = avctx->priv_data;
    width = (width + 1) & ~1;
    height = (height + 1) & ~1;
    if (quality >= 0)
        get_quant_quality(c, quality);
    if (width != c->width || height != c->height) {
        if (av_image_check_size(height, width, 0, avctx) < 0)
            return 0;
        avctx->width = c->width = width;
        avctx->height = c->height = height;
        av_fast_malloc(&c->decomp_buf, &c->decomp_size, c->height * c->width * 3 / 2);
        if (!c->decomp_buf) {
            av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
            return 0;
        }
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
    } else if (quality != c->quality)
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
    return 1;
}
Exemple #13
0
static int codec_reinit(AVCodecContext *avctx, int width, int height,
                        int quality)
{
    NuvContext *c = avctx->priv_data;
    int ret;

    width  = FFALIGN(width,  2);
    height = FFALIGN(height, 2);
    if (quality >= 0)
        get_quant_quality(c, quality);
    if (width != c->width || height != c->height) {
        // also reserve space for a possible additional header
        int buf_size = height * width * 3 / 2
                     + FFMAX(AV_LZO_OUTPUT_PADDING, FF_INPUT_BUFFER_PADDING_SIZE)
                     + RTJPEG_HEADER_SIZE;
        if (buf_size > INT_MAX/8)
            return -1;
        if ((ret = av_image_check_size(height, width, 0, avctx)) < 0)
            return ret;
        avctx->width  = c->width  = width;
        avctx->height = c->height = height;
        av_fast_malloc(&c->decomp_buf, &c->decomp_size,
                       buf_size);
        if (!c->decomp_buf) {
            av_log(avctx, AV_LOG_ERROR,
                   "Can't allocate decompression buffer.\n");
            return AVERROR(ENOMEM);
        }
        ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);
        av_frame_unref(c->pic);
        return 1;
    } else if (quality != c->quality)
        ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);

    return 0;
}
Exemple #14
0
/**
 * Parse the picture segment packet.
 *
 * The picture segment contains details on the sequence id,
 * width, height and Run Length Encoded (RLE) bitmap data.
 *
 * @param avctx contains the current codec context
 * @param buf pointer to the packet to process
 * @param buf_size size of packet to process
 * @todo TODO: Enable support for RLE data over multiple packets
 */
static int parse_picture_segment(AVCodecContext *avctx,
                                  const uint8_t *buf, int buf_size)
{
    PGSSubContext *ctx = avctx->priv_data;

    uint8_t sequence_desc;
    unsigned int rle_bitmap_len, width, height;

    if (buf_size <= 4)
        return -1;
    buf_size -= 4;

    /* skip 3 unknown bytes: Object ID (2 bytes), Version Number */
    buf += 3;

    /* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */
    sequence_desc = bytestream_get_byte(&buf);

    if (!(sequence_desc & 0x80)) {
        /* Additional RLE data */
        if (buf_size > ctx->picture.rle_remaining_len)
            return -1;

        memcpy(ctx->picture.rle + ctx->picture.rle_data_len, buf, buf_size);
        ctx->picture.rle_data_len += buf_size;
        ctx->picture.rle_remaining_len -= buf_size;

        return 0;
    }

    if (buf_size <= 7)
        return -1;
    buf_size -= 7;

    /* Decode rle bitmap length, stored size includes width/height data */
    rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;

    /* Get bitmap dimensions from data */
    width  = bytestream_get_be16(&buf);
    height = bytestream_get_be16(&buf);

    /* Make sure the bitmap is not too large */
    if (avctx->width < width || avctx->height < height) {
        av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions larger then video.\n");
        return -1;
    }

    ctx->picture.w = width;
    ctx->picture.h = height;

    av_fast_malloc(&ctx->picture.rle, &ctx->picture.rle_buffer_size, rle_bitmap_len);

    if (!ctx->picture.rle)
        return -1;

    memcpy(ctx->picture.rle, buf, buf_size);
    ctx->picture.rle_data_len = buf_size;
    ctx->picture.rle_remaining_len = rle_bitmap_len - buf_size;

    return 0;
}
Exemple #15
0
static int submit_packet(PerThreadContext *p, AVPacket *avpkt)
{
    FrameThreadContext *fctx = p->parent;
    PerThreadContext *prev_thread = fctx->prev_thread;
    AVCodec *codec = p->avctx->codec;
    uint8_t *buf = p->avpkt.data;

    if (!avpkt->size && !(codec->capabilities & CODEC_CAP_DELAY)) return 0;

    pthread_mutex_lock(&p->mutex);

    release_delayed_buffers(p);

    if (prev_thread) {
        int err;
        if (prev_thread->state == STATE_SETTING_UP) {
            pthread_mutex_lock(&prev_thread->progress_mutex);
            while (prev_thread->state == STATE_SETTING_UP)
                pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
            pthread_mutex_unlock(&prev_thread->progress_mutex);
        }

        err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
        if (err) {
            pthread_mutex_unlock(&p->mutex);
            return err;
        }
    }

    av_fast_malloc(&buf, &p->allocated_buf_size, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
    p->avpkt = *avpkt;
    p->avpkt.data = buf;
    memcpy(buf, avpkt->data, avpkt->size);
    memset(buf + avpkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    p->state = STATE_SETTING_UP;
    pthread_cond_signal(&p->input_cond);
    pthread_mutex_unlock(&p->mutex);

    /*
     * If the client doesn't have a thread-safe get_buffer(),
     * then decoding threads call back to the main thread,
     * and it calls back to the client here.
     */

    if (!p->avctx->thread_safe_callbacks &&
         p->avctx->get_buffer != avcodec_default_get_buffer) {
        while (p->state != STATE_SETUP_FINISHED && p->state != STATE_INPUT_READY) {
            pthread_mutex_lock(&p->progress_mutex);
            while (p->state == STATE_SETTING_UP)
                pthread_cond_wait(&p->progress_cond, &p->progress_mutex);

            if (p->state == STATE_GET_BUFFER) {
                p->result = p->avctx->get_buffer(p->avctx, p->requested_frame);
                p->state  = STATE_SETTING_UP;
                pthread_cond_signal(&p->progress_cond);
            }
            pthread_mutex_unlock(&p->progress_mutex);
        }
    }

    fctx->prev_thread = p;
    fctx->next_decoding++;

    return 0;
}
/* Returns the number of bytes consumed from the bytestream. Returns -1 if
 * there was an error while decoding the header */
static int truemotion1_decode_header(TrueMotion1Context *s)
{
    int i;
    int width_shift = 0;
    int new_pix_fmt;
    struct frame_header header;
    uint8_t header_buffer[128];  /* logical maximum size of the header */
    const uint8_t *sel_vector_table;

    header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f;
    if (s->buf[0] < 0x10)
    {
        av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]);
        return -1;
    }

    /* unscramble the header bytes with a XOR operation */
    memset(header_buffer, 0, 128);
    for (i = 1; i < header.header_size; i++)
        header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1];

    header.compression = header_buffer[0];
    header.deltaset = header_buffer[1];
    header.vectable = header_buffer[2];
    header.ysize = AV_RL16(&header_buffer[3]);
    header.xsize = AV_RL16(&header_buffer[5]);
    header.checksum = AV_RL16(&header_buffer[7]);
    header.version = header_buffer[9];
    header.header_type = header_buffer[10];
    header.flags = header_buffer[11];
    header.control = header_buffer[12];

    /* Version 2 */
    if (header.version >= 2)
    {
        if (header.header_type > 3)
        {
            av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type);
            return -1;
        } else if ((header.header_type == 2) || (header.header_type == 3)) {
            s->flags = header.flags;
            if (!(s->flags & FLAG_INTERFRAME))
                s->flags |= FLAG_KEYFRAME;
        } else
            s->flags = FLAG_KEYFRAME;
    } else /* Version 1 */
        s->flags = FLAG_KEYFRAME;

    if (s->flags & FLAG_SPRITE) {
        av_log(s->avctx, AV_LOG_INFO, "SPRITE frame found, please report the sample to the developers\n");
        /* FIXME header.width, height, xoffset and yoffset aren't initialized */
#if 0
        s->w = header.width;
        s->h = header.height;
        s->x = header.xoffset;
        s->y = header.yoffset;
#else
        return -1;
#endif
    } else {
        s->w = header.xsize;
        s->h = header.ysize;
        if (header.header_type < 2) {
            if ((s->w < 213) && (s->h >= 176))
            {
                s->flags |= FLAG_INTERPOLATED;
                av_log(s->avctx, AV_LOG_INFO, "INTERPOLATION selected, please report the sample to the developers\n");
            }
        }
    }

    if (header.compression >= 17) {
        av_log(s->avctx, AV_LOG_ERROR, "invalid compression type (%d)\n", header.compression);
        return -1;
    }

    if ((header.deltaset != s->last_deltaset) ||
        (header.vectable != s->last_vectable))
        select_delta_tables(s, header.deltaset);

    if ((header.compression & 1) && header.header_type)
        sel_vector_table = pc_tbl2;
    else {
        if (header.vectable > 0 && header.vectable < 4)
            sel_vector_table = tables[header.vectable - 1];
        else {
            av_log(s->avctx, AV_LOG_ERROR, "invalid vector table id (%d)\n", header.vectable);
            return -1;
        }
    }

    if (compression_types[header.compression].algorithm == ALGO_RGB24H) {
        new_pix_fmt = PIX_FMT_RGB32;
        width_shift = 1;
    } else
        new_pix_fmt = PIX_FMT_RGB555; // RGB565 is supported as well

    s->w >>= width_shift;
    if (av_image_check_size(s->w, s->h, 0, s->avctx) < 0)
        return -1;

    if (s->w != s->avctx->width || s->h != s->avctx->height ||
        new_pix_fmt != s->avctx->pix_fmt) {
        if (s->frame.data[0])
            s->avctx->release_buffer(s->avctx, &s->frame);
        s->avctx->sample_aspect_ratio = (AVRational){ 1 << width_shift, 1 };
        s->avctx->pix_fmt = new_pix_fmt;
        avcodec_set_dimensions(s->avctx, s->w, s->h);
        av_fast_malloc(&s->vert_pred, &s->vert_pred_size, s->avctx->width * sizeof(unsigned int));
    }

    /* There is 1 change bit per 4 pixels, so each change byte represents
     * 32 pixels; divide width by 4 to obtain the number of change bits and
     * then round up to the nearest byte. */
    s->mb_change_bits_row_size = ((s->avctx->width >> (2 - width_shift)) + 7) >> 3;

    if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable))
    {
        if (compression_types[header.compression].algorithm == ALGO_RGB24H)
            gen_vector_table24(s, sel_vector_table);
        else
        if (s->avctx->pix_fmt == PIX_FMT_RGB555)
            gen_vector_table15(s, sel_vector_table);
        else
            gen_vector_table16(s, sel_vector_table);
    }

    /* set up pointers to the other key data chunks */
    s->mb_change_bits = s->buf + header.header_size;
    if (s->flags & FLAG_KEYFRAME) {
        /* no change bits specified for a keyframe; only index bytes */
        s->index_stream = s->mb_change_bits;
    } else {
        /* one change bit per 4x4 block */
        s->index_stream = s->mb_change_bits +
            (s->mb_change_bits_row_size * (s->avctx->height >> 2));
    }
    s->index_stream_size = s->size - (s->index_stream - s->buf);

    s->last_deltaset = header.deltaset;
    s->last_vectable = header.vectable;
    s->compression = header.compression;
    s->block_width = compression_types[header.compression].block_width;
    s->block_height = compression_types[header.compression].block_height;
    s->block_type = compression_types[header.compression].block_type;

    if (s->avctx->debug & FF_DEBUG_PICT_INFO)
        av_log(s->avctx, AV_LOG_INFO, "tables: %d / %d c:%d %dx%d t:%d %s%s%s%s\n",
            s->last_deltaset, s->last_vectable, s->compression, s->block_width,
            s->block_height, s->block_type,
            s->flags & FLAG_KEYFRAME ? " KEY" : "",
            s->flags & FLAG_INTERFRAME ? " INTER" : "",
            s->flags & FLAG_SPRITE ? " SPRITE" : "",
            s->flags & FLAG_INTERPOLATED ? " INTERPOL" : "");

    return header.header_size;
}
/**
 * Find the estimated global motion for a scene given the most likely shift
 * for each block in the frame. The global motion is estimated to be the
 * same as the motion from most blocks in the frame, so if most blocks
 * move one pixel to the right and two pixels down, this would yield a
 * motion vector (1, -2).
 */
static void find_motion(DeshakeContext *deshake, uint8_t *src1, uint8_t *src2,
                        int width, int height, int stride, Transform *t)
{
    int x, y;
    IntMotionVector mv = {0, 0};
    int count_max_value = 0;
    int contrast;

    int pos;
    int center_x = 0, center_y = 0;
    double p_x, p_y;

    av_fast_malloc(&deshake->angles, &deshake->angles_size, width * height / (16 * deshake->blocksize) * sizeof(*deshake->angles));

    // Reset counts to zero
    for (x = 0; x < deshake->rx * 2 + 1; x++) {
        for (y = 0; y < deshake->ry * 2 + 1; y++) {
            deshake->counts[x][y] = 0;
        }
    }

    pos = 0;
    // Find motion for every block and store the motion vector in the counts
    for (y = deshake->ry; y < height - deshake->ry - (deshake->blocksize * 2); y += deshake->blocksize * 2) {
        // We use a width of 16 here to match the sad function
        for (x = deshake->rx; x < width - deshake->rx - 16; x += 16) {
            // If the contrast is too low, just skip this block as it probably
            // won't be very useful to us.
            contrast = block_contrast(src2, x, y, stride, deshake->blocksize);
            if (contrast > deshake->contrast) {
                //av_log(NULL, AV_LOG_ERROR, "%d\n", contrast);
                find_block_motion(deshake, src1, src2, x, y, stride, &mv);
                if (mv.x != -1 && mv.y != -1) {
                    deshake->counts[mv.x + deshake->rx][mv.y + deshake->ry] += 1;
                    if (x > deshake->rx && y > deshake->ry)
                        deshake->angles[pos++] = block_angle(x, y, 0, 0, &mv);

                    center_x += mv.x;
                    center_y += mv.y;
                }
            }
        }
    }

    if (pos) {
         center_x /= pos;
         center_y /= pos;
         t->angle = clean_mean(deshake->angles, pos);
         if (t->angle < 0.001)
              t->angle = 0;
    } else {
         t->angle = 0;
    }

    // Find the most common motion vector in the frame and use it as the gmv
    for (y = deshake->ry * 2; y >= 0; y--) {
        for (x = 0; x < deshake->rx * 2 + 1; x++) {
            //av_log(NULL, AV_LOG_ERROR, "%5d ", deshake->counts[x][y]);
            if (deshake->counts[x][y] > count_max_value) {
                t->vec.x = x - deshake->rx;
                t->vec.y = y - deshake->ry;
                count_max_value = deshake->counts[x][y];
            }
        }
        //av_log(NULL, AV_LOG_ERROR, "\n");
    }

    p_x = (center_x - width / 2.0);
    p_y = (center_y - height / 2.0);
    t->vec.x += (cos(t->angle)-1)*p_x  - sin(t->angle)*p_y;
    t->vec.y += sin(t->angle)*p_x  + (cos(t->angle)-1)*p_y;

    // Clamp max shift & rotation?
    t->vec.x = av_clipf(t->vec.x, -deshake->rx * 2, deshake->rx * 2);
    t->vec.y = av_clipf(t->vec.y, -deshake->ry * 2, deshake->ry * 2);
    t->angle = av_clipf(t->angle, -0.1, 0.1);

    //av_log(NULL, AV_LOG_ERROR, "%d x %d\n", avg->x, avg->y);
}
Exemple #18
0
/* FIXME: This is adapted from ff_h264_decode_nal, avoiding duplication
 * between these functions would be nice. */
int ff_hevc_extract_rbsp(HEVCContext *s, const uint8_t *src, int length,
                         HEVCNAL *nal)
{
    int i, si, di;
    uint8_t *dst;

    if (s)
        s->skipped_bytes = 0;
#define STARTCODE_TEST                                                  \
        if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) {     \
            if (src[i + 2] != 3) {                                      \
                /* startcode, so we must be past the end */             \
                length = i;                                             \
            }                                                           \
            break;                                                      \
        }
#if HAVE_FAST_UNALIGNED
#define FIND_FIRST_ZERO                                                 \
        if (i > 0 && !src[i])                                           \
            i--;                                                        \
        while (src[i])                                                  \
            i++
#if HAVE_FAST_64BIT
    for (i = 0; i + 1 < length; i += 9) {
        if (!((~AV_RN64A(src + i) &
               (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
              0x8000800080008080ULL))
            continue;
        FIND_FIRST_ZERO;
        STARTCODE_TEST;
        i -= 7;
    }
#else
    for (i = 0; i + 1 < length; i += 5) {
        if (!((~AV_RN32A(src + i) &
               (AV_RN32A(src + i) - 0x01000101U)) &
              0x80008080U))
            continue;
        FIND_FIRST_ZERO;
        STARTCODE_TEST;
        i -= 3;
    }
#endif /* HAVE_FAST_64BIT */
#else
    for (i = 0; i + 1 < length; i += 2) {
        if (src[i])
            continue;
        if (i > 0 && src[i - 1] == 0)
            i--;
        STARTCODE_TEST;
    }
#endif /* HAVE_FAST_UNALIGNED */

    if (i >= length - 1) { // no escaped 0
        nal->data     =
        nal->raw_data = src;
        nal->size     =
        nal->raw_size = length;
        return length;
    }

    av_fast_malloc(&nal->rbsp_buffer, &nal->rbsp_buffer_size,
                   length + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!nal->rbsp_buffer)
        return AVERROR(ENOMEM);

    dst = nal->rbsp_buffer;

    memcpy(dst, src, i);
    si = di = i;
    while (si + 2 < length) {
        // remove escapes (very rare 1:2^22)
        if (src[si + 2] > 3) {
            dst[di++] = src[si++];
            dst[di++] = src[si++];
        } else if (src[si] == 0 && src[si + 1] == 0) {
            if (src[si + 2] == 3) { // escape
                dst[di++] = 0;
                dst[di++] = 0;
                si       += 3;

                if (s) {
                        s->skipped_bytes++;
                        if (s->skipped_bytes_pos_size < s->skipped_bytes) {
                        s->skipped_bytes_pos_size *= 2;
                        av_reallocp_array(&s->skipped_bytes_pos,
                                s->skipped_bytes_pos_size,
                                sizeof(*s->skipped_bytes_pos));
                        if (!s->skipped_bytes_pos)
                                return AVERROR(ENOMEM);
                        }
                        if (s->skipped_bytes_pos)
                        s->skipped_bytes_pos[s->skipped_bytes-1] = di - 1;
                }
                continue;
            } else // next start code
                goto nsc;
        }

        dst[di++] = src[si++];
    }
    while (si < length)
        dst[di++] = src[si++];

nsc:
    memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);

    nal->data = dst;
    nal->size = di;
    nal->raw_data = src;
    nal->raw_size = si;
    return si;
}
Exemple #19
0
static int opus_decode_frame(OpusStreamContext *s, const uint8_t *data, int size)
{
    int samples    = s->packet.frame_duration;
    int redundancy = 0;
    int redundancy_size, redundancy_pos;
    int ret, i, consumed;
    int delayed_samples = s->delayed_samples;

    ret = ff_opus_rc_dec_init(&s->rc, data, size);
    if (ret < 0)
        return ret;

    /* decode the silk frame */
    if (s->packet.mode == OPUS_MODE_SILK || s->packet.mode == OPUS_MODE_HYBRID) {
#if CONFIG_SWRESAMPLE
        if (!swr_is_initialized(s->swr)) {
#elif CONFIG_AVRESAMPLE
        if (!avresample_is_open(s->avr)) {
#endif
            ret = opus_init_resample(s);
            if (ret < 0)
                return ret;
        }

        samples = ff_silk_decode_superframe(s->silk, &s->rc, s->silk_output,
                                            FFMIN(s->packet.bandwidth, OPUS_BANDWIDTH_WIDEBAND),
                                            s->packet.stereo + 1,
                                            silk_frame_duration_ms[s->packet.config]);
        if (samples < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error decoding a SILK frame.\n");
            return samples;
        }
#if CONFIG_SWRESAMPLE
        samples = swr_convert(s->swr,
                              (uint8_t**)s->out, s->packet.frame_duration,
                              (const uint8_t**)s->silk_output, samples);
#elif CONFIG_AVRESAMPLE
        samples = avresample_convert(s->avr, (uint8_t**)s->out, s->out_size,
                                     s->packet.frame_duration,
                                     (uint8_t**)s->silk_output,
                                     sizeof(s->silk_buf[0]),
                                     samples);
#endif
        if (samples < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error resampling SILK data.\n");
            return samples;
        }
        av_assert2((samples & 7) == 0);
        s->delayed_samples += s->packet.frame_duration - samples;
    } else
        ff_silk_flush(s->silk);

    // decode redundancy information
    consumed = opus_rc_tell(&s->rc);
    if (s->packet.mode == OPUS_MODE_HYBRID && consumed + 37 <= size * 8)
        redundancy = ff_opus_rc_dec_log(&s->rc, 12);
    else if (s->packet.mode == OPUS_MODE_SILK && consumed + 17 <= size * 8)
        redundancy = 1;

    if (redundancy) {
        redundancy_pos = ff_opus_rc_dec_log(&s->rc, 1);

        if (s->packet.mode == OPUS_MODE_HYBRID)
            redundancy_size = ff_opus_rc_dec_uint(&s->rc, 256) + 2;
        else
            redundancy_size = size - (consumed + 7) / 8;
        size -= redundancy_size;
        if (size < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Invalid redundancy frame size.\n");
            return AVERROR_INVALIDDATA;
        }

        if (redundancy_pos) {
            ret = opus_decode_redundancy(s, data + size, redundancy_size);
            if (ret < 0)
                return ret;
            ff_celt_flush(s->celt);
        }
    }

    /* decode the CELT frame */
    if (s->packet.mode == OPUS_MODE_CELT || s->packet.mode == OPUS_MODE_HYBRID) {
        float *out_tmp[2] = { s->out[0], s->out[1] };
        float **dst = (s->packet.mode == OPUS_MODE_CELT) ?
                      out_tmp : s->celt_output;
        int celt_output_samples = samples;
        int delay_samples = av_audio_fifo_size(s->celt_delay);

        if (delay_samples) {
            if (s->packet.mode == OPUS_MODE_HYBRID) {
                av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, delay_samples);

                for (i = 0; i < s->output_channels; i++) {
                    s->fdsp->vector_fmac_scalar(out_tmp[i], s->celt_output[i], 1.0,
                                                delay_samples);
                    out_tmp[i] += delay_samples;
                }
                celt_output_samples -= delay_samples;
            } else {
                av_log(s->avctx, AV_LOG_WARNING,
                       "Spurious CELT delay samples present.\n");
                av_audio_fifo_drain(s->celt_delay, delay_samples);
                if (s->avctx->err_recognition & AV_EF_EXPLODE)
                    return AVERROR_BUG;
            }
        }

        ff_opus_rc_dec_raw_init(&s->rc, data + size, size);

        ret = ff_celt_decode_frame(s->celt, &s->rc, dst,
                                   s->packet.stereo + 1,
                                   s->packet.frame_duration,
                                   (s->packet.mode == OPUS_MODE_HYBRID) ? 17 : 0,
                                   ff_celt_band_end[s->packet.bandwidth]);
        if (ret < 0)
            return ret;

        if (s->packet.mode == OPUS_MODE_HYBRID) {
            int celt_delay = s->packet.frame_duration - celt_output_samples;
            void *delaybuf[2] = { s->celt_output[0] + celt_output_samples,
                                  s->celt_output[1] + celt_output_samples };

            for (i = 0; i < s->output_channels; i++) {
                s->fdsp->vector_fmac_scalar(out_tmp[i],
                                            s->celt_output[i], 1.0,
                                            celt_output_samples);
            }

            ret = av_audio_fifo_write(s->celt_delay, delaybuf, celt_delay);
            if (ret < 0)
                return ret;
        }
    } else
        ff_celt_flush(s->celt);

    if (s->redundancy_idx) {
        for (i = 0; i < s->output_channels; i++)
            opus_fade(s->out[i], s->out[i],
                      s->redundancy_output[i] + 120 + s->redundancy_idx,
                      ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
        s->redundancy_idx = 0;
    }
    if (redundancy) {
        if (!redundancy_pos) {
            ff_celt_flush(s->celt);
            ret = opus_decode_redundancy(s, data + size, redundancy_size);
            if (ret < 0)
                return ret;

            for (i = 0; i < s->output_channels; i++) {
                opus_fade(s->out[i] + samples - 120 + delayed_samples,
                          s->out[i] + samples - 120 + delayed_samples,
                          s->redundancy_output[i] + 120,
                          ff_celt_window2, 120 - delayed_samples);
                if (delayed_samples)
                    s->redundancy_idx = 120 - delayed_samples;
            }
        } else {
            for (i = 0; i < s->output_channels; i++) {
                memcpy(s->out[i] + delayed_samples, s->redundancy_output[i], 120 * sizeof(float));
                opus_fade(s->out[i] + 120 + delayed_samples,
                          s->redundancy_output[i] + 120,
                          s->out[i] + 120 + delayed_samples,
                          ff_celt_window2, 120);
            }
        }
    }

    return samples;
}

static int opus_decode_subpacket(OpusStreamContext *s,
                                 const uint8_t *buf, int buf_size,
                                 float **out, int out_size,
                                 int nb_samples)
{
    int output_samples = 0;
    int flush_needed   = 0;
    int i, j, ret;

    s->out[0]   = out[0];
    s->out[1]   = out[1];
    s->out_size = out_size;

    /* check if we need to flush the resampler */
#if CONFIG_SWRESAMPLE
    if (swr_is_initialized(s->swr)) {
        if (buf) {
            int64_t cur_samplerate;
            av_opt_get_int(s->swr, "in_sample_rate", 0, &cur_samplerate);
            flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
        } else {
            flush_needed = !!s->delayed_samples;
        }
    }
#elif CONFIG_AVRESAMPLE
    if (avresample_is_open(s->avr)) {
        if (buf) {
            int64_t cur_samplerate;
            av_opt_get_int(s->avr, "in_sample_rate", 0, &cur_samplerate);
            flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
        } else {
            flush_needed = !!s->delayed_samples;
        }
    }
#endif

    if (!buf && !flush_needed)
        return 0;

    /* use dummy output buffers if the channel is not mapped to anything */
    if (!s->out[0] ||
        (s->output_channels == 2 && !s->out[1])) {
        av_fast_malloc(&s->out_dummy, &s->out_dummy_allocated_size, s->out_size);
        if (!s->out_dummy)
            return AVERROR(ENOMEM);
        if (!s->out[0])
            s->out[0] = s->out_dummy;
        if (!s->out[1])
            s->out[1] = s->out_dummy;
    }

    /* flush the resampler if necessary */
    if (flush_needed) {
        ret = opus_flush_resample(s, s->delayed_samples);
        if (ret < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error flushing the resampler.\n");
            return ret;
        }
#if CONFIG_SWRESAMPLE
        swr_close(s->swr);
#elif CONFIG_AVRESAMPLE
        avresample_close(s->avr);
#endif
        output_samples += s->delayed_samples;
        s->delayed_samples = 0;

        if (!buf)
            goto finish;
    }

    /* decode all the frames in the packet */
    for (i = 0; i < s->packet.frame_count; i++) {
        int size = s->packet.frame_size[i];
        int samples = opus_decode_frame(s, buf + s->packet.frame_offset[i], size);

        if (samples < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error decoding an Opus frame.\n");
            if (s->avctx->err_recognition & AV_EF_EXPLODE)
                return samples;

            for (j = 0; j < s->output_channels; j++)
                memset(s->out[j], 0, s->packet.frame_duration * sizeof(float));
            samples = s->packet.frame_duration;
        }
        output_samples += samples;

        for (j = 0; j < s->output_channels; j++)
            s->out[j] += samples;
        s->out_size -= samples * sizeof(float);
    }

finish:
    s->out[0] = s->out[1] = NULL;
    s->out_size = 0;

    return output_samples;
}
Exemple #20
0
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
    GifState *s = avctx->priv_data;
    int ret;

    bytestream2_init(&s->gb, avpkt->data, avpkt->size);

    s->frame->pts     = avpkt->pts;
    s->frame->pkt_pts = avpkt->pts;
    s->frame->pkt_dts = avpkt->dts;
    av_frame_set_pkt_duration(s->frame, avpkt->duration);

    if (avpkt->size >= 6) {
        s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 ||
                      memcmp(avpkt->data, gif89a_sig, 6) == 0;
    } else {
        s->keyframe = 0;
    }

    if (s->keyframe) {
        s->keyframe_ok = 0;
        s->gce_prev_disposal = GCE_DISPOSAL_NONE;
        if ((ret = gif_read_header1(s)) < 0)
            return ret;

        if ((ret = ff_set_dimensions(avctx, s->screen_width, s->screen_height)) < 0)
            return ret;

        av_frame_unref(s->frame);
        if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
            return ret;

        av_fast_malloc(&s->idx_line, &s->idx_line_size, s->screen_width);
        if (!s->idx_line)
            return AVERROR(ENOMEM);

        s->frame->pict_type = AV_PICTURE_TYPE_I;
        s->frame->key_frame = 1;
        s->keyframe_ok = 1;
    } else {
        if (!s->keyframe_ok) {
            av_log(avctx, AV_LOG_ERROR, "cannot decode frame without keyframe\n");
            return AVERROR_INVALIDDATA;
        }

        if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
            return ret;

        s->frame->pict_type = AV_PICTURE_TYPE_P;
        s->frame->key_frame = 0;
    }

    ret = gif_parse_next_image(s, s->frame);
    if (ret < 0)
        return ret;

    if ((ret = av_frame_ref(data, s->frame)) < 0)
        return ret;
    *got_frame = 1;

    return bytestream2_tell(&s->gb);
}
Exemple #21
0
static int dcadec_decode_frame(AVCodecContext *avctx, void *data,
                               int *got_frame_ptr, AVPacket *avpkt)
{
    DCAContext *s = avctx->priv_data;
    AVFrame *frame = data;
    uint8_t *input = avpkt->data;
    int input_size = avpkt->size;
    int i, ret, prev_packet = s->packet;

    if (input_size < MIN_PACKET_SIZE || input_size > MAX_PACKET_SIZE) {
        av_log(avctx, AV_LOG_ERROR, "Invalid packet size\n");
        return AVERROR_INVALIDDATA;
    }

    av_fast_malloc(&s->buffer, &s->buffer_size,
                   FFALIGN(input_size, 4096) + DCA_BUFFER_PADDING_SIZE);
    if (!s->buffer)
        return AVERROR(ENOMEM);

    for (i = 0, ret = AVERROR_INVALIDDATA; i < input_size - MIN_PACKET_SIZE + 1 && ret < 0; i++)
        ret = convert_bitstream(input + i, input_size - i, s->buffer, s->buffer_size);

    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Not a valid DCA frame\n");
        return ret;
    }

    input      = s->buffer;
    input_size = ret;

    s->packet = 0;

    // Parse backward compatible core sub-stream
    if (AV_RB32(input) == DCA_SYNCWORD_CORE_BE) {
        int frame_size;

        if ((ret = ff_dca_core_parse(&s->core, input, input_size)) < 0) {
            s->core_residual_valid = 0;
            return ret;
        }

        s->packet |= DCA_PACKET_CORE;

        // EXXS data must be aligned on 4-byte boundary
        frame_size = FFALIGN(s->core.frame_size, 4);
        if (input_size - 4 > frame_size) {
            input      += frame_size;
            input_size -= frame_size;
        }
    }

    if (!s->core_only) {
        DCAExssAsset *asset = NULL;

        // Parse extension sub-stream (EXSS)
        if (AV_RB32(input) == DCA_SYNCWORD_SUBSTREAM) {
            if ((ret = ff_dca_exss_parse(&s->exss, input, input_size)) < 0) {
                if (avctx->err_recognition & AV_EF_EXPLODE)
                    return ret;
            } else {
                s->packet |= DCA_PACKET_EXSS;
                asset = &s->exss.assets[0];
            }
        }

        // Parse XLL component in EXSS
        if (asset && (asset->extension_mask & DCA_EXSS_XLL)) {
            if ((ret = ff_dca_xll_parse(&s->xll, input, asset)) < 0) {
                // Conceal XLL synchronization error
                if (ret == AVERROR(EAGAIN)
                    && (prev_packet & DCA_PACKET_XLL)
                    && (s->packet & DCA_PACKET_CORE))
                    s->packet |= DCA_PACKET_XLL | DCA_PACKET_RECOVERY;
                else if (ret == AVERROR(ENOMEM) || (avctx->err_recognition & AV_EF_EXPLODE))
                    return ret;
            } else {
                s->packet |= DCA_PACKET_XLL;
            }
        }

        // Parse LBR component in EXSS
        if (asset && (asset->extension_mask & DCA_EXSS_LBR)) {
            if ((ret = ff_dca_lbr_parse(&s->lbr, input, asset)) < 0) {
                if (ret == AVERROR(ENOMEM) || (avctx->err_recognition & AV_EF_EXPLODE))
                    return ret;
            } else {
                s->packet |= DCA_PACKET_LBR;
            }
        }

        // Parse core extensions in EXSS or backward compatible core sub-stream
        if ((s->packet & DCA_PACKET_CORE)
            && (ret = ff_dca_core_parse_exss(&s->core, input, asset)) < 0)
            return ret;
    }

    // Filter the frame
    if (s->packet & DCA_PACKET_LBR) {
        if ((ret = ff_dca_lbr_filter_frame(&s->lbr, frame)) < 0)
            return ret;
    } else if (s->packet & DCA_PACKET_XLL) {
        if (s->packet & DCA_PACKET_CORE) {
            int x96_synth = -1;

            // Enable X96 synthesis if needed
            if (s->xll.chset[0].freq == 96000 && s->core.sample_rate == 48000)
                x96_synth = 1;

            if ((ret = ff_dca_core_filter_fixed(&s->core, x96_synth)) < 0) {
                s->core_residual_valid = 0;
                return ret;
            }

            // Force lossy downmixed output on the first core frame filtered.
            // This prevents audible clicks when seeking and is consistent with
            // what reference decoder does when there are multiple channel sets.
            if (!s->core_residual_valid) {
                if (s->xll.nreschsets > 0 && s->xll.nchsets > 1)
                    s->packet |= DCA_PACKET_RECOVERY;
                s->core_residual_valid = 1;
            }
        }

        if ((ret = ff_dca_xll_filter_frame(&s->xll, frame)) < 0) {
            // Fall back to core unless hard error
            if (!(s->packet & DCA_PACKET_CORE))
                return ret;
            if (ret != AVERROR_INVALIDDATA || (avctx->err_recognition & AV_EF_EXPLODE))
                return ret;
            if ((ret = ff_dca_core_filter_frame(&s->core, frame)) < 0) {
                s->core_residual_valid = 0;
                return ret;
            }
        }
    } else if (s->packet & DCA_PACKET_CORE) {
        if ((ret = ff_dca_core_filter_frame(&s->core, frame)) < 0) {
            s->core_residual_valid = 0;
            return ret;
        }
        s->core_residual_valid = !!(s->core.filter_mode & DCA_FILTER_MODE_FIXED);
    } else {
        av_log(avctx, AV_LOG_ERROR, "No valid DCA sub-stream found\n");
        if (s->core_only)
            av_log(avctx, AV_LOG_WARNING, "Consider disabling 'core_only' option\n");
        return AVERROR_INVALIDDATA;
    }

    *got_frame_ptr = 1;

    return avpkt->size;
}
Exemple #22
0
static int opus_decode_subpacket(OpusStreamContext *s,
                                 const uint8_t *buf, int buf_size,
                                 int nb_samples)
{
    int output_samples = 0;
    int flush_needed   = 0;
    int i, j, ret;

    /* check if we need to flush the resampler */
    if (swr_is_initialized(s->swr)) {
        if (buf) {
            int64_t cur_samplerate;
            av_opt_get_int(s->swr, "in_sample_rate", 0, &cur_samplerate);
            flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
        } else {
            flush_needed = !!s->delayed_samples;
        }
    }

    if (!buf && !flush_needed)
        return 0;

    /* use dummy output buffers if the channel is not mapped to anything */
    if (!s->out[0] ||
            (s->output_channels == 2 && !s->out[1])) {
        av_fast_malloc(&s->out_dummy, &s->out_dummy_allocated_size, s->out_size);
        if (!s->out_dummy)
            return AVERROR(ENOMEM);
        if (!s->out[0])
            s->out[0] = s->out_dummy;
        if (!s->out[1])
            s->out[1] = s->out_dummy;
    }

    /* flush the resampler if necessary */
    if (flush_needed) {
        ret = opus_flush_resample(s, s->delayed_samples);
        if (ret < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error flushing the resampler.\n");
            return ret;
        }
        swr_close(s->swr);
        output_samples += s->delayed_samples;
        s->delayed_samples = 0;

        if (!buf)
            goto finish;
    }

    /* decode all the frames in the packet */
    for (i = 0; i < s->packet.frame_count; i++) {
        int size = s->packet.frame_size[i];
        int samples = opus_decode_frame(s, buf + s->packet.frame_offset[i], size);

        if (samples < 0) {
            av_log(s->avctx, AV_LOG_ERROR, "Error decoding an Opus frame.\n");
            if (s->avctx->err_recognition & AV_EF_EXPLODE)
                return samples;

            for (j = 0; j < s->output_channels; j++)
                memset(s->out[j], 0, s->packet.frame_duration * sizeof(float));
            samples = s->packet.frame_duration;
        }
        output_samples += samples;

        for (j = 0; j < s->output_channels; j++)
            s->out[j] += samples;
        s->out_size -= samples * sizeof(float);
    }

finish:
    s->out[0] = s->out[1] = NULL;
    s->out_size = 0;

    return output_samples;
}
Exemple #23
0
static int submit_packet(PerThreadContext *p, AVPacket *avpkt)
{
    FrameThreadContext *fctx = p->parent;
    PerThreadContext *prev_thread = fctx->prev_thread;
    const AVCodec *codec = p->avctx->codec;

    if (!avpkt->size && !(codec->capabilities & CODEC_CAP_DELAY)) return 0;

    pthread_mutex_lock(&p->mutex);

    release_delayed_buffers(p);

    if (prev_thread) {
        int err;
        if (prev_thread->state == STATE_SETTING_UP) {
            pthread_mutex_lock(&prev_thread->progress_mutex);
            while (prev_thread->state == STATE_SETTING_UP)
                pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
            pthread_mutex_unlock(&prev_thread->progress_mutex);
        }

        err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
        if (err) {
            pthread_mutex_unlock(&p->mutex);
            return err;
        }
    }

    av_buffer_unref(&p->avpkt.buf);
    p->avpkt = *avpkt;
    if (avpkt->buf)
        p->avpkt.buf = av_buffer_ref(avpkt->buf);
    else {
        av_fast_malloc(&p->buf, &p->allocated_buf_size, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!p->buf) {
            pthread_mutex_unlock(&p->mutex);
            return AVERROR(ENOMEM);
        }
        p->avpkt.data = p->buf;
        memcpy(p->buf, avpkt->data, avpkt->size);
        memset(p->buf + avpkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
    }

    p->state = STATE_SETTING_UP;
    pthread_cond_signal(&p->input_cond);
    pthread_mutex_unlock(&p->mutex);

    /*
     * If the client doesn't have a thread-safe get_buffer(),
     * then decoding threads call back to the main thread,
     * and it calls back to the client here.
     */

FF_DISABLE_DEPRECATION_WARNINGS
    if (!p->avctx->thread_safe_callbacks && (
         p->avctx->get_format != avcodec_default_get_format ||
#if FF_API_GET_BUFFER
         p->avctx->get_buffer ||
#endif
         p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
FF_ENABLE_DEPRECATION_WARNINGS
        while (p->state != STATE_SETUP_FINISHED && p->state != STATE_INPUT_READY) {
            int call_done = 1;
            pthread_mutex_lock(&p->progress_mutex);
            while (p->state == STATE_SETTING_UP)
                pthread_cond_wait(&p->progress_cond, &p->progress_mutex);

            switch (p->state) {
            case STATE_GET_BUFFER:
                p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
                break;
            case STATE_GET_FORMAT:
                p->result_format = p->avctx->get_format(p->avctx, p->available_formats);
                break;
            default:
                call_done = 0;
                break;
            }
            if (call_done) {
                p->state  = STATE_SETTING_UP;
                pthread_cond_signal(&p->progress_cond);
            }
            pthread_mutex_unlock(&p->progress_mutex);
        }
    }
Exemple #24
0
static int mp_decode_frame(AVCodecContext *avctx,
                                 void *data, int *got_frame,
                                 AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    MotionPixelsContext *mp = avctx->priv_data;
    GetBitContext gb;
    int i, count1, count2, sz, ret;

    if ((ret = ff_reget_buffer(avctx, &mp->frame)) < 0)
        return ret;

    /* le32 bitstream msb first */
    av_fast_malloc(&mp->bswapbuf, &mp->bswapbuf_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!mp->bswapbuf)
        return AVERROR(ENOMEM);
    mp->dsp.bswap_buf((uint32_t *)mp->bswapbuf, (const uint32_t *)buf, buf_size / 4);
    if (buf_size & 3)
        memcpy(mp->bswapbuf + (buf_size & ~3), buf + (buf_size & ~3), buf_size & 3);
    memset(mp->bswapbuf + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
    init_get_bits(&gb, mp->bswapbuf, buf_size * 8);

    memset(mp->changes_map, 0, avctx->width * avctx->height);
    for (i = !(avctx->extradata[1] & 2); i < 2; ++i) {
        count1 = get_bits(&gb, 12);
        count2 = get_bits(&gb, 12);
        mp_read_changes_map(mp, &gb, count1, 8, i);
        mp_read_changes_map(mp, &gb, count2, 4, i);
    }

    mp->codes_count = get_bits(&gb, 4);
    if (mp->codes_count == 0)
        goto end;

    if (mp->changes_map[0] == 0) {
        *(uint16_t *)mp->frame.data[0] = get_bits(&gb, 15);
        mp->changes_map[0] = 1;
    }
    if (mp_read_codes_table(mp, &gb) < 0)
        goto end;

    sz = get_bits(&gb, 18);
    if (avctx->extradata[0] != 5)
        sz += get_bits(&gb, 18);
    if (sz == 0)
        goto end;

    if (mp->max_codes_bits <= 0)
        goto end;
    if (init_vlc(&mp->vlc, mp->max_codes_bits, mp->codes_count, &mp->codes[0].size, sizeof(HuffCode), 1, &mp->codes[0].code, sizeof(HuffCode), 4, 0))
        goto end;
    mp_decode_frame_helper(mp, &gb);
    ff_free_vlc(&mp->vlc);

end:
    if ((ret = av_frame_ref(data, &mp->frame)) < 0)
        return ret;
    *got_frame       = 1;
    return buf_size;
}
Exemple #25
0
static int gif_read_image(GifState *s)
{
    int left, top, width, height, bits_per_pixel, code_size, flags;
    int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size;
    uint32_t *ptr, *pal, *px, *pr, *ptr1;
    int ret;
    uint8_t *idx;

    /* At least 9 bytes of Image Descriptor. */
    if (s->bytestream_end < s->bytestream + 9)
        return AVERROR_INVALIDDATA;

    left = bytestream_get_le16(&s->bytestream);
    top = bytestream_get_le16(&s->bytestream);
    width = bytestream_get_le16(&s->bytestream);
    height = bytestream_get_le16(&s->bytestream);
    flags = bytestream_get_byte(&s->bytestream);
    is_interleaved = flags & 0x40;
    has_local_palette = flags & 0x80;
    bits_per_pixel = (flags & 0x07) + 1;

    av_dlog(s->avctx, "image x=%d y=%d w=%d h=%d\n", left, top, width, height);

    if (has_local_palette) {
        pal_size = 1 << bits_per_pixel;

        if (s->bytestream_end < s->bytestream + pal_size * 3)
            return AVERROR_INVALIDDATA;

        gif_read_palette(&s->bytestream, s->local_palette, pal_size);
        pal = s->local_palette;
    } else {
        if (!s->has_global_palette) {
            av_log(s->avctx, AV_LOG_FATAL, "picture doesn't have either global or local palette.\n");
            return AVERROR_INVALIDDATA;
        }

        pal = s->global_palette;
    }

    if (s->keyframe) {
        if (s->transparent_color_index == -1 && s->has_global_palette) {
            /* transparency wasn't set before the first frame, fill with background color */
            gif_fill(&s->picture, s->bg_color);
        } else {
            /* otherwise fill with transparent color.
             * this is necessary since by default picture filled with 0x80808080. */
            gif_fill(&s->picture, s->trans_color);
        }
    }

    /* verify that all the image is inside the screen dimensions */
    if (left + width > s->screen_width ||
        top + height > s->screen_height)
        return AVERROR(EINVAL);

    /* process disposal method */
    if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) {
        gif_fill_rect(&s->picture, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h);
    } else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) {
        gif_copy_img_rect(s->stored_img, (uint32_t *)s->picture.data[0],
            s->picture.linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h);
    }

    s->gce_prev_disposal = s->gce_disposal;

    if (s->gce_disposal != GCE_DISPOSAL_NONE) {
        s->gce_l = left;  s->gce_t = top;
        s->gce_w = width; s->gce_h = height;

        if (s->gce_disposal == GCE_DISPOSAL_BACKGROUND) {
            if (s->background_color_index == s->transparent_color_index)
                s->stored_bg_color = s->trans_color;
            else
                s->stored_bg_color = s->bg_color;
        } else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) {
            av_fast_malloc(&s->stored_img, &s->stored_img_size, s->picture.linesize[0] * s->picture.height);
            if (!s->stored_img)
                return AVERROR(ENOMEM);

            gif_copy_img_rect((uint32_t *)s->picture.data[0], s->stored_img,
                s->picture.linesize[0] / sizeof(uint32_t), left, top, width, height);
        }
    }

    /* Expect at least 2 bytes: 1 for lzw code size and 1 for block size. */
    if (s->bytestream_end < s->bytestream + 2)
        return AVERROR_INVALIDDATA;

    /* now get the image data */
    code_size = bytestream_get_byte(&s->bytestream);
    if ((ret = ff_lzw_decode_init(s->lzw, code_size, s->bytestream,
                       s->bytestream_end - s->bytestream, FF_LZW_GIF)) < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "LZW init failed\n");
        return ret;
    }

    /* read all the image */
    linesize = s->picture.linesize[0] / sizeof(uint32_t);
    ptr1 = (uint32_t *)s->picture.data[0] + top * linesize + left;
    ptr = ptr1;
    pass = 0;
    y1 = 0;
    for (y = 0; y < height; y++) {
        if (ff_lzw_decode(s->lzw, s->idx_line, width) == 0)
            goto decode_tail;

        pr = ptr + width;

        for (px = ptr, idx = s->idx_line; px < pr; px++, idx++) {
            if (*idx != s->transparent_color_index)
                *px = pal[*idx];
        }

        if (is_interleaved) {
            switch(pass) {
            default:
            case 0:
            case 1:
                y1 += 8;
                ptr += linesize * 8;
                if (y1 >= height) {
                    y1 = pass ? 2 : 4;
                    ptr = ptr1 + linesize * y1;
                    pass++;
                }
                break;
            case 2:
                y1 += 4;
                ptr += linesize * 4;
                if (y1 >= height) {
                    y1 = 1;
                    ptr = ptr1 + linesize;
                    pass++;
                }
                break;
            case 3:
                y1 += 2;
                ptr += linesize * 2;
                break;
            }
        } else {
            ptr += linesize;
        }
    }

 decode_tail:
    /* read the garbage data until end marker is found */
    ff_lzw_decode_tail(s->lzw);
    s->bytestream = ff_lzw_cur_ptr(s->lzw);

    /* Graphic Control Extension's scope is single frame.
     * Remove its influence. */
    s->transparent_color_index = -1;
    s->gce_disposal = GCE_DISPOSAL_NONE;

    return 0;
}
Exemple #26
0
static int rscc_decode_frame(AVCodecContext *avctx, void *data,
                                     int *got_frame, AVPacket *avpkt)
{
    RsccContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    GetByteContext tiles_gbc;
    AVFrame *frame = data;
    const uint8_t *pixels, *raw;
    uint8_t *inflated_tiles = NULL;
    int tiles_nb, packed_size, pixel_size = 0;
    int i, ret = 0;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    /* Size check */
    if (bytestream2_get_bytes_left(gbc) < 12) {
        av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
        return AVERROR_INVALIDDATA;
    }

    /* Read number of tiles, and allocate the array */
    tiles_nb = bytestream2_get_le16(gbc);
    av_fast_malloc(&ctx->tiles, &ctx->tiles_size,
                   tiles_nb * sizeof(*ctx->tiles));
    if (!ctx->tiles) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    av_log(avctx, AV_LOG_DEBUG, "Frame with %d tiles.\n", tiles_nb);

    /* When there are more than 5 tiles, they are packed together with
     * a size header. When that size does not match the number of tiles
     * times the tile size, it means it needs to be inflated as well */
    if (tiles_nb > 5) {
        uLongf packed_tiles_size;

        if (tiles_nb < 32)
            packed_tiles_size = bytestream2_get_byte(gbc);
        else
            packed_tiles_size = bytestream2_get_le16(gbc);

        ff_dlog(avctx, "packed tiles of size %lu.\n", packed_tiles_size);

        /* If necessary, uncompress tiles, and hijack the bytestream reader */
        if (packed_tiles_size != tiles_nb * TILE_SIZE) {
            uLongf length = tiles_nb * TILE_SIZE;
            inflated_tiles = av_malloc(length);
            if (!inflated_tiles) {
                ret = AVERROR(ENOMEM);
                goto end;
            }

            ret = uncompress(inflated_tiles, &length,
                             gbc->buffer, packed_tiles_size);
            if (ret) {
                av_log(avctx, AV_LOG_ERROR, "Tile deflate error %d.\n", ret);
                ret = AVERROR_UNKNOWN;
                goto end;
            }

            /* Skip the compressed tile section in the main byte reader,
             * and point it to read the newly uncompressed data */
            bytestream2_skip(gbc, packed_tiles_size);
            bytestream2_init(&tiles_gbc, inflated_tiles, length);
            gbc = &tiles_gbc;
        }
    }

    /* Fill in array of tiles, keeping track of how many pixels are updated */
    for (i = 0; i < tiles_nb; i++) {
        ctx->tiles[i].x = bytestream2_get_le16(gbc);
        ctx->tiles[i].w = bytestream2_get_le16(gbc);
        ctx->tiles[i].y = bytestream2_get_le16(gbc);
        ctx->tiles[i].h = bytestream2_get_le16(gbc);

        pixel_size += ctx->tiles[i].w * ctx->tiles[i].h * ctx->component_size;

        ff_dlog(avctx, "tile %d orig(%d,%d) %dx%d.\n", i,
                ctx->tiles[i].x, ctx->tiles[i].y,
                ctx->tiles[i].w, ctx->tiles[i].h);

        if (ctx->tiles[i].w == 0 || ctx->tiles[i].h == 0) {
            av_log(avctx, AV_LOG_ERROR,
                   "invalid tile %d at (%d.%d) with size %dx%d.\n", i,
                   ctx->tiles[i].x, ctx->tiles[i].y,
                   ctx->tiles[i].w, ctx->tiles[i].h);
            ret = AVERROR_INVALIDDATA;
            goto end;
        } else if (ctx->tiles[i].x + ctx->tiles[i].w > avctx->width ||
                   ctx->tiles[i].y + ctx->tiles[i].h > avctx->height) {
            av_log(avctx, AV_LOG_ERROR,
                   "out of bounds tile %d at (%d.%d) with size %dx%d.\n", i,
                   ctx->tiles[i].x, ctx->tiles[i].y,
                   ctx->tiles[i].w, ctx->tiles[i].h);
            ret = AVERROR_INVALIDDATA;
            goto end;
        }
    }

    /* Reset the reader in case it had been modified before */
    gbc = &ctx->gbc;

    /* Extract how much pixel data the tiles contain */
    if (pixel_size < 0x100)
        packed_size = bytestream2_get_byte(gbc);
    else if (pixel_size < 0x10000)
        packed_size = bytestream2_get_le16(gbc);
    else if (pixel_size < 0x1000000)
        packed_size = bytestream2_get_le24(gbc);
    else
        packed_size = bytestream2_get_le32(gbc);

    ff_dlog(avctx, "pixel_size %d packed_size %d.\n", pixel_size, packed_size);

    if (packed_size < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid tile size %d\n", packed_size);
        ret = AVERROR_INVALIDDATA;
        goto end;
    }

    /* Get pixels buffer, it may be deflated or just raw */
    if (pixel_size == packed_size) {
        if (bytestream2_get_bytes_left(gbc) < pixel_size) {
            av_log(avctx, AV_LOG_ERROR, "Insufficient input for %d\n", pixel_size);
            ret = AVERROR_INVALIDDATA;
            goto end;
        }
        pixels = gbc->buffer;
    } else {
        uLongf len = ctx->inflated_size;
        if (bytestream2_get_bytes_left(gbc) < packed_size) {
            av_log(avctx, AV_LOG_ERROR, "Insufficient input for %d\n", packed_size);
            ret = AVERROR_INVALIDDATA;
            goto end;
        }
        ret = uncompress(ctx->inflated_buf, &len, gbc->buffer, packed_size);
        if (ret) {
            av_log(avctx, AV_LOG_ERROR, "Pixel deflate error %d.\n", ret);
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        pixels = ctx->inflated_buf;
    }

    /* Allocate when needed */
    ret = ff_reget_buffer(avctx, ctx->reference);
    if (ret < 0)
        goto end;

    /* Pointer to actual pixels, will be updated when data is consumed */
    raw = pixels;
    for (i = 0; i < tiles_nb; i++) {
        uint8_t *dst = ctx->reference->data[0] + ctx->reference->linesize[0] *
                       (avctx->height - ctx->tiles[i].y - 1) +
                       ctx->tiles[i].x * ctx->component_size;
        av_image_copy_plane(dst, -1 * ctx->reference->linesize[0],
                            raw, ctx->tiles[i].w * ctx->component_size,
                            ctx->tiles[i].w * ctx->component_size,
                            ctx->tiles[i].h);
        raw += ctx->tiles[i].w * ctx->component_size * ctx->tiles[i].h;
    }

    /* Frame is ready to be output */
    ret = av_frame_ref(frame, ctx->reference);
    if (ret < 0)
        goto end;

    /* Keyframe when the number of pixels updated matches the whole surface */
    if (pixel_size == ctx->inflated_size) {
        frame->pict_type = AV_PICTURE_TYPE_I;
        frame->key_frame = 1;
    } else {
        frame->pict_type = AV_PICTURE_TYPE_P;
    }
    *got_frame = 1;

end:
    av_free(inflated_tiles);
    return ret;
}
Exemple #27
0
static int gif_read_image(GifState *s, AVFrame *frame)
{
    int left, top, width, height, bits_per_pixel, code_size, flags, pw;
    int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size;
    uint32_t *ptr, *pal, *px, *pr, *ptr1;
    int ret;
    uint8_t *idx;

    /* At least 9 bytes of Image Descriptor. */
    if (bytestream2_get_bytes_left(&s->gb) < 9)
        return AVERROR_INVALIDDATA;

    left   = bytestream2_get_le16u(&s->gb);
    top    = bytestream2_get_le16u(&s->gb);
    width  = bytestream2_get_le16u(&s->gb);
    height = bytestream2_get_le16u(&s->gb);
    flags  = bytestream2_get_byteu(&s->gb);
    is_interleaved = flags & 0x40;
    has_local_palette = flags & 0x80;
    bits_per_pixel = (flags & 0x07) + 1;

    av_dlog(s->avctx, "image x=%d y=%d w=%d h=%d\n", left, top, width, height);

    if (has_local_palette) {
        pal_size = 1 << bits_per_pixel;

        if (bytestream2_get_bytes_left(&s->gb) < pal_size * 3)
            return AVERROR_INVALIDDATA;

        gif_read_palette(s, s->local_palette, pal_size);
        pal = s->local_palette;
    } else {
        if (!s->has_global_palette) {
            av_log(s->avctx, AV_LOG_ERROR, "picture doesn't have either global or local palette.\n");
            return AVERROR_INVALIDDATA;
        }

        pal = s->global_palette;
    }

    if (s->keyframe) {
        if (s->transparent_color_index == -1 && s->has_global_palette) {
            /* transparency wasn't set before the first frame, fill with background color */
            gif_fill(frame, s->bg_color);
        } else {
            /* otherwise fill with transparent color.
             * this is necessary since by default picture filled with 0x80808080. */
            gif_fill(frame, s->trans_color);
        }
    }

    /* verify that all the image is inside the screen dimensions */
    if (!width || width > s->screen_width || left >= s->screen_width) {
        av_log(s->avctx, AV_LOG_ERROR, "Invalid image width.\n");
        return AVERROR_INVALIDDATA;
    }
    if (!height || height > s->screen_height || top >= s->screen_height) {
        av_log(s->avctx, AV_LOG_ERROR, "Invalid image height.\n");
        return AVERROR_INVALIDDATA;
    }
    if (left + width > s->screen_width) {
        /* width must be kept around to avoid lzw vs line desync */
        pw = s->screen_width - left;
        av_log(s->avctx, AV_LOG_WARNING, "Image too wide by %d, truncating.\n",
               left + width - s->screen_width);
    } else {
        pw = width;
    }
    if (top + height > s->screen_height) {
        /* we don't care about the extra invisible lines */
        av_log(s->avctx, AV_LOG_WARNING, "Image too high by %d, truncating.\n",
               top + height - s->screen_height);
        height = s->screen_height - top;
    }

    /* process disposal method */
    if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) {
        gif_fill_rect(frame, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h);
    } else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) {
        gif_copy_img_rect(s->stored_img, (uint32_t *)frame->data[0],
            frame->linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h);
    }

    s->gce_prev_disposal = s->gce_disposal;

    if (s->gce_disposal != GCE_DISPOSAL_NONE) {
        s->gce_l = left;  s->gce_t = top;
        s->gce_w = pw;    s->gce_h = height;

        if (s->gce_disposal == GCE_DISPOSAL_BACKGROUND) {
            if (s->transparent_color_index >= 0)
                s->stored_bg_color = s->trans_color;
            else
                s->stored_bg_color = s->bg_color;
        } else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) {
            av_fast_malloc(&s->stored_img, &s->stored_img_size, frame->linesize[0] * frame->height);
            if (!s->stored_img)
                return AVERROR(ENOMEM);

            gif_copy_img_rect((uint32_t *)frame->data[0], s->stored_img,
                frame->linesize[0] / sizeof(uint32_t), left, top, pw, height);
        }
    }

    /* Expect at least 2 bytes: 1 for lzw code size and 1 for block size. */
    if (bytestream2_get_bytes_left(&s->gb) < 2)
        return AVERROR_INVALIDDATA;

    /* now get the image data */
    code_size = bytestream2_get_byteu(&s->gb);
    if ((ret = ff_lzw_decode_init(s->lzw, code_size, s->gb.buffer,
                                  bytestream2_get_bytes_left(&s->gb), FF_LZW_GIF)) < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "LZW init failed\n");
        return ret;
    }

    /* read all the image */
    linesize = frame->linesize[0] / sizeof(uint32_t);
    ptr1 = (uint32_t *)frame->data[0] + top * linesize + left;
    ptr = ptr1;
    pass = 0;
    y1 = 0;
    for (y = 0; y < height; y++) {
        int count = ff_lzw_decode(s->lzw, s->idx_line, width);
        if (count != width) {
            if (count)
                av_log(s->avctx, AV_LOG_ERROR, "LZW decode failed\n");
            goto decode_tail;
        }

        pr = ptr + pw;

        for (px = ptr, idx = s->idx_line; px < pr; px++, idx++) {
            if (*idx != s->transparent_color_index)
                *px = pal[*idx];
        }

        if (is_interleaved) {
            switch(pass) {
            default:
            case 0:
            case 1:
                y1 += 8;
                ptr += linesize * 8;
                if (y1 >= height) {
                    y1 = pass ? 2 : 4;
                    ptr = ptr1 + linesize * y1;
                    pass++;
                }
                break;
            case 2:
                y1 += 4;
                ptr += linesize * 4;
                if (y1 >= height) {
                    y1 = 1;
                    ptr = ptr1 + linesize;
                    pass++;
                }
                break;
            case 3:
                y1 += 2;
                ptr += linesize * 2;
                break;
            }
        } else {
            ptr += linesize;
        }
    }

 decode_tail:
    /* read the garbage data until end marker is found */
    ff_lzw_decode_tail(s->lzw);

    /* Graphic Control Extension's scope is single frame.
     * Remove its influence. */
    s->transparent_color_index = -1;
    s->gce_disposal = GCE_DISPOSAL_NONE;

    return 0;
}
Exemple #28
0
static int magy_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
{
    MagicYUVContext *s = avctx->priv_data;
    ThreadFrame frame = { .f = data };
    AVFrame *p = data;
    GetByteContext gbyte;
    GetBitContext gbit;
    uint32_t first_offset, offset, next_offset, header_size, slice_width;
    int width, height, format, version, table_size;
    int ret, i, j;

    bytestream2_init(&gbyte, avpkt->data, avpkt->size);
    if (bytestream2_get_le32(&gbyte) != MKTAG('M', 'A', 'G', 'Y'))
        return AVERROR_INVALIDDATA;

    header_size = bytestream2_get_le32(&gbyte);
    if (header_size < 32 || header_size >= avpkt->size) {
        av_log(avctx, AV_LOG_ERROR,
               "header or packet too small %"PRIu32"\n", header_size);
        return AVERROR_INVALIDDATA;
    }

    version = bytestream2_get_byte(&gbyte);
    if (version != 7) {
        avpriv_request_sample(avctx, "Version %d", version);
        return AVERROR_PATCHWELCOME;
    }

    s->hshift[1] =
    s->vshift[1] =
    s->hshift[2] =
    s->vshift[2] = 0;
    s->decorrelate = 0;
    s->max = 256;
    s->huff_build = huff_build;
    s->magy_decode_slice = magy_decode_slice;

    format = bytestream2_get_byte(&gbyte);
    switch (format) {
    case 0x65:
        avctx->pix_fmt = AV_PIX_FMT_GBRP;
        s->decorrelate = 1;
        break;
    case 0x66:
        avctx->pix_fmt = AV_PIX_FMT_GBRAP;
        s->decorrelate = 1;
        break;
    case 0x67:
        avctx->pix_fmt = AV_PIX_FMT_YUV444P;
        break;
    case 0x68:
        avctx->pix_fmt = AV_PIX_FMT_YUV422P;
        s->hshift[1] =
        s->hshift[2] = 1;
        break;
    case 0x69:
        avctx->pix_fmt = AV_PIX_FMT_YUV420P;
        s->hshift[1] =
        s->vshift[1] =
        s->hshift[2] =
        s->vshift[2] = 1;
        break;
    case 0x6a:
        avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
        break;
    case 0x6b:
        avctx->pix_fmt = AV_PIX_FMT_GRAY8;
        break;
    case 0x6c:
        avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
        s->hshift[1] =
        s->hshift[2] = 1;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    case 0x6d:
        avctx->pix_fmt = AV_PIX_FMT_GBRP10;
        s->decorrelate = 1;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    case 0x6e:
        avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
        s->decorrelate = 1;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    case 0x73:
        avctx->pix_fmt = AV_PIX_FMT_GRAY10;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    default:
        avpriv_request_sample(avctx, "Format 0x%X", format);
        return AVERROR_PATCHWELCOME;
    }
    s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);

    bytestream2_skip(&gbyte, 2);
    s->interlaced = !!(bytestream2_get_byte(&gbyte) & 2);
    bytestream2_skip(&gbyte, 3);

    width  = bytestream2_get_le32(&gbyte);
    height = bytestream2_get_le32(&gbyte);
    ret = ff_set_dimensions(avctx, width, height);
    if (ret < 0)
        return ret;

    slice_width = bytestream2_get_le32(&gbyte);
    if (slice_width != avctx->coded_width) {
        avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
        return AVERROR_PATCHWELCOME;
    }
    s->slice_height = bytestream2_get_le32(&gbyte);
    if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
        av_log(avctx, AV_LOG_ERROR,
               "invalid slice height: %d\n", s->slice_height);
        return AVERROR_INVALIDDATA;
    }

    bytestream2_skip(&gbyte, 4);

    s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
    if (s->nb_slices > INT_MAX / sizeof(Slice)) {
        av_log(avctx, AV_LOG_ERROR,
               "invalid number of slices: %d\n", s->nb_slices);
        return AVERROR_INVALIDDATA;
    }

    for (i = 0; i < s->planes; i++) {
        av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
        if (!s->slices[i])
            return AVERROR(ENOMEM);

        offset = bytestream2_get_le32(&gbyte);
        if (offset >= avpkt->size - header_size)
            return AVERROR_INVALIDDATA;

        if (i == 0)
            first_offset = offset;

        for (j = 0; j < s->nb_slices - 1; j++) {
            s->slices[i][j].start = offset + header_size;

            next_offset = bytestream2_get_le32(&gbyte);
            if (next_offset <= offset || next_offset >= avpkt->size - header_size)
                return AVERROR_INVALIDDATA;

            s->slices[i][j].size = next_offset - offset;
            offset = next_offset;
        }

        s->slices[i][j].start = offset + header_size;
        s->slices[i][j].size  = avpkt->size - s->slices[i][j].start;
    }

    if (bytestream2_get_byte(&gbyte) != s->planes)
        return AVERROR_INVALIDDATA;

    bytestream2_skip(&gbyte, s->nb_slices * s->planes);

    table_size = header_size + first_offset - bytestream2_tell(&gbyte);
    if (table_size < 2)
        return AVERROR_INVALIDDATA;

    ret = init_get_bits8(&gbit, avpkt->data + bytestream2_tell(&gbyte), table_size);
    if (ret < 0)
        return ret;

    ret = build_huffman(avctx, &gbit, s->max);
    if (ret < 0)
        return ret;

    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
        return ret;

    s->buf = avpkt->data;
    s->p = p;
    avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);

    if (avctx->pix_fmt == AV_PIX_FMT_GBRP   ||
        avctx->pix_fmt == AV_PIX_FMT_GBRAP  ||
        avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
        avctx->pix_fmt == AV_PIX_FMT_GBRAP10) {
        FFSWAP(uint8_t*, p->data[0], p->data[1]);
        FFSWAP(int, p->linesize[0], p->linesize[1]);
    }