Example #1
0
static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    int buf_size = avpkt->size;
    DPCMContext *s = avctx->priv_data;
    AVFrame *frame = data;
    int out = 0, ret;
    int predictor[2];
    int ch = 0;
    int stereo = avctx->channels - 1;
    int16_t *output_samples, *samples_end;
    GetByteContext gb;

    if (stereo && (buf_size & 1))
        buf_size--;
    bytestream2_init(&gb, avpkt->data, buf_size);

    /* calculate output size */
    switch(avctx->codec->id) {
    case AV_CODEC_ID_ROQ_DPCM:
        out = buf_size - 8;
        break;
    case AV_CODEC_ID_INTERPLAY_DPCM:
        out = buf_size - 6 - avctx->channels;
        break;
    case AV_CODEC_ID_XAN_DPCM:
        out = buf_size - 2 * avctx->channels;
        break;
    case AV_CODEC_ID_SOL_DPCM:
        if (avctx->codec_tag != 3)
            out = buf_size * 2;
        else
            out = buf_size;
        break;
    case AV_CODEC_ID_SDX2_DPCM:
        out = buf_size;
        break;
    }
    if (out <= 0) {
        av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
        return AVERROR(EINVAL);
    }
    if (out % avctx->channels) {
        av_log(avctx, AV_LOG_WARNING, "channels have differing number of samples\n");
    }

    /* get output buffer */
    frame->nb_samples = (out + avctx->channels - 1) / avctx->channels;
    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
        return ret;
    output_samples = (int16_t *)frame->data[0];
    samples_end = output_samples + out;

    switch(avctx->codec->id) {

    case AV_CODEC_ID_ROQ_DPCM:
        bytestream2_skipu(&gb, 6);

        if (stereo) {
            predictor[1] = sign_extend(bytestream2_get_byteu(&gb) << 8, 16);
            predictor[0] = sign_extend(bytestream2_get_byteu(&gb) << 8, 16);
        } else {
            predictor[0] = sign_extend(bytestream2_get_le16u(&gb), 16);
        }

        /* decode the samples */
        while (output_samples < samples_end) {
            predictor[ch] += s->square_array[bytestream2_get_byteu(&gb)];
            predictor[ch]  = av_clip_int16(predictor[ch]);
            *output_samples++ = predictor[ch];

            /* toggle channel */
            ch ^= stereo;
        }
        break;

    case AV_CODEC_ID_INTERPLAY_DPCM:
        bytestream2_skipu(&gb, 6);  /* skip over the stream mask and stream length */

        for (ch = 0; ch < avctx->channels; ch++) {
            predictor[ch] = sign_extend(bytestream2_get_le16u(&gb), 16);
            *output_samples++ = predictor[ch];
        }

        ch = 0;
        while (output_samples < samples_end) {
            predictor[ch] += interplay_delta_table[bytestream2_get_byteu(&gb)];
            predictor[ch]  = av_clip_int16(predictor[ch]);
            *output_samples++ = predictor[ch];

            /* toggle channel */
            ch ^= stereo;
        }
        break;

    case AV_CODEC_ID_XAN_DPCM:
    {
        int shift[2] = { 4, 4 };

        for (ch = 0; ch < avctx->channels; ch++)
            predictor[ch] = sign_extend(bytestream2_get_le16u(&gb), 16);

        ch = 0;
        while (output_samples < samples_end) {
            int diff = bytestream2_get_byteu(&gb);
            int n    = diff & 3;

            if (n == 3)
                shift[ch]++;
            else
                shift[ch] -= (2 * n);
            diff = sign_extend((diff &~ 3) << 8, 16);

            /* saturate the shifter to a lower limit of 0 */
            if (shift[ch] < 0)
                shift[ch] = 0;

            diff >>= shift[ch];
            predictor[ch] += diff;

            predictor[ch] = av_clip_int16(predictor[ch]);
            *output_samples++ = predictor[ch];

            /* toggle channel */
            ch ^= stereo;
        }
        break;
    }
    case AV_CODEC_ID_SOL_DPCM:
        if (avctx->codec_tag != 3) {
            uint8_t *output_samples_u8 = frame->data[0],
                    *samples_end_u8 = output_samples_u8 + out;
            while (output_samples_u8 < samples_end_u8) {
                int n = bytestream2_get_byteu(&gb);

                s->sample[0] += s->sol_table[n >> 4];
                s->sample[0]  = av_clip_uint8(s->sample[0]);
                *output_samples_u8++ = s->sample[0];

                s->sample[stereo] += s->sol_table[n & 0x0F];
                s->sample[stereo]  = av_clip_uint8(s->sample[stereo]);
                *output_samples_u8++ = s->sample[stereo];
            }
        } else {
            while (output_samples < samples_end) {
Example #2
0
static int submit_packet(PerThreadContext *p, AVPacket *avpkt)
{
    FrameThreadContext *fctx = p->parent;
    PerThreadContext *prev_thread = fctx->prev_thread;
    const AVCodec *codec = p->avctx->codec;

    if (!avpkt->size && !(codec->capabilities & CODEC_CAP_DELAY)) return 0;

    pthread_mutex_lock(&p->mutex);

    release_delayed_buffers(p);

    if (prev_thread) {
        int err;
        if (prev_thread->state == STATE_SETTING_UP) {
            pthread_mutex_lock(&prev_thread->progress_mutex);
            while (prev_thread->state == STATE_SETTING_UP)
                pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
            pthread_mutex_unlock(&prev_thread->progress_mutex);
        }

        err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
        if (err) {
            pthread_mutex_unlock(&p->mutex);
            return err;
        }
    }

    av_buffer_unref(&p->avpkt.buf);
    p->avpkt = *avpkt;
    if (avpkt->buf)
        p->avpkt.buf = av_buffer_ref(avpkt->buf);
    else {
        av_fast_malloc(&p->buf, &p->allocated_buf_size, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
        p->avpkt.data = p->buf;
        memcpy(p->buf, avpkt->data, avpkt->size);
        memset(p->buf + avpkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
    }

    p->state = STATE_SETTING_UP;
    pthread_cond_signal(&p->input_cond);
    pthread_mutex_unlock(&p->mutex);

    /*
     * If the client doesn't have a thread-safe get_buffer(),
     * then decoding threads call back to the main thread,
     * and it calls back to the client here.
     */

FF_DISABLE_DEPRECATION_WARNINGS
    if (!p->avctx->thread_safe_callbacks && (
#if FF_API_GET_BUFFER
         p->avctx->get_buffer ||
#endif
         p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
FF_ENABLE_DEPRECATION_WARNINGS
        while (p->state != STATE_SETUP_FINISHED && p->state != STATE_INPUT_READY) {
            pthread_mutex_lock(&p->progress_mutex);
            while (p->state == STATE_SETTING_UP)
                pthread_cond_wait(&p->progress_cond, &p->progress_mutex);

            if (p->state == STATE_GET_BUFFER) {
                p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
                p->state  = STATE_SETTING_UP;
                pthread_cond_signal(&p->progress_cond);
            }
            pthread_mutex_unlock(&p->progress_mutex);
        }
    }
static int libschroedinger_decode_frame(AVCodecContext *avccontext,
                                        void *data, int *got_frame,
                                        AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    int64_t pts  = avpkt->pts;
    SchroTag *tag;

    SchroDecoderParams *p_schro_params = avccontext->priv_data;
    SchroDecoder *decoder = p_schro_params->decoder;
    SchroBuffer *enc_buf;
    SchroFrame* frame;
    int state;
    int go = 1;
    int outer = 1;
    SchroParseUnitContext parse_ctx;
    LibSchroFrameContext *framewithpts = NULL;

    *got_frame = 0;

    parse_context_init(&parse_ctx, buf, buf_size);
    if (!buf_size) {
        if (!p_schro_params->eos_signalled) {
            state = schro_decoder_push_end_of_stream(decoder);
            p_schro_params->eos_signalled = 1;
        }
    }

    /* Loop through all the individual parse units in the input buffer */
    do {
        if ((enc_buf = find_next_parse_unit(&parse_ctx))) {
            /* Set Schrotag with the pts to be recovered after decoding*/
            enc_buf->tag = schro_tag_new(av_malloc(sizeof(int64_t)), av_free);
            if (!enc_buf->tag->value) {
                av_log(avccontext, AV_LOG_ERROR, "Unable to allocate SchroTag\n");
                return AVERROR(ENOMEM);
            }
            AV_WN(64, enc_buf->tag->value, pts);
            /* Push buffer into decoder. */
            if (SCHRO_PARSE_CODE_IS_PICTURE(enc_buf->data[4]) &&
                SCHRO_PARSE_CODE_NUM_REFS(enc_buf->data[4]) > 0)
                avccontext->has_b_frames = 1;
            state = schro_decoder_push(decoder, enc_buf);
            if (state == SCHRO_DECODER_FIRST_ACCESS_UNIT)
                libschroedinger_handle_first_access_unit(avccontext);
            go = 1;
        } else
            outer = 0;

        while (go) {
            /* Parse data and process result. */
            state = schro_decoder_wait(decoder);
            switch (state) {
            case SCHRO_DECODER_FIRST_ACCESS_UNIT:
                libschroedinger_handle_first_access_unit(avccontext);
                break;

            case SCHRO_DECODER_NEED_BITS:
                /* Need more input data - stop iterating over what we have. */
                go = 0;
                break;

            case SCHRO_DECODER_NEED_FRAME:
                /* Decoder needs a frame - create one and push it in. */
                frame = ff_create_schro_frame(avccontext,
                                              p_schro_params->frame_format);
                schro_decoder_add_output_picture(decoder, frame);
                break;

            case SCHRO_DECODER_OK:
                /* Pull a frame out of the decoder. */
                tag   = schro_decoder_get_picture_tag(decoder);
                frame = schro_decoder_pull(decoder);

                if (frame) {
                    /* Add relation between schroframe and pts. */
                    framewithpts = av_malloc(sizeof(LibSchroFrameContext));
                    if (!framewithpts) {
                        av_log(avccontext, AV_LOG_ERROR, "Unable to allocate FrameWithPts\n");
                        return AVERROR(ENOMEM);
                    }
                    framewithpts->frame = frame;
                    framewithpts->pts   = AV_RN64(tag->value);
                    ff_schro_queue_push_back(&p_schro_params->dec_frame_queue,
                                             framewithpts);
                }
                break;
            case SCHRO_DECODER_EOS:
                go = 0;
                p_schro_params->eos_pulled = 1;
                schro_decoder_reset(decoder);
                outer = 0;
                break;

            case SCHRO_DECODER_ERROR:
                return -1;
                break;
            }
        }
    } while (outer);

    /* Grab next frame to be returned from the top of the queue. */
    framewithpts = ff_schro_queue_pop(&p_schro_params->dec_frame_queue);

    if (framewithpts && framewithpts->frame) {
        if (p_schro_params->dec_frame.data[0])
            avccontext->release_buffer(avccontext, &p_schro_params->dec_frame);
        if (ff_get_buffer(avccontext, &p_schro_params->dec_frame) < 0) {
            av_log(avccontext, AV_LOG_ERROR, "Unable to allocate buffer\n");
            return AVERROR(ENOMEM);
        }

        memcpy(p_schro_params->dec_frame.data[0],
               framewithpts->frame->components[0].data,
               framewithpts->frame->components[0].length);

        memcpy(p_schro_params->dec_frame.data[1],
               framewithpts->frame->components[1].data,
               framewithpts->frame->components[1].length);

        memcpy(p_schro_params->dec_frame.data[2],
               framewithpts->frame->components[2].data,
               framewithpts->frame->components[2].length);

        /* Fill frame with current buffer data from Schroedinger. */
        p_schro_params->dec_frame.format  = -1; /* Unknown -1 */
        p_schro_params->dec_frame.width   = framewithpts->frame->width;
        p_schro_params->dec_frame.height  = framewithpts->frame->height;
        p_schro_params->dec_frame.pkt_pts = framewithpts->pts;
        p_schro_params->dec_frame.linesize[0] = framewithpts->frame->components[0].stride;
        p_schro_params->dec_frame.linesize[1] = framewithpts->frame->components[1].stride;
        p_schro_params->dec_frame.linesize[2] = framewithpts->frame->components[2].stride;

        *(AVFrame*)data = p_schro_params->dec_frame;
        *got_frame      = 1;

        /* Now free the frame resources. */
        libschroedinger_decode_frame_free(framewithpts->frame);
        av_free(framewithpts);
    } else {
        data       = NULL;
        *got_frame = 0;
    }
    return buf_size;
}
Example #4
0
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
    GifState *s = avctx->priv_data;
    int ret;

    bytestream2_init(&s->gb, avpkt->data, avpkt->size);

    s->frame->pts     = avpkt->pts;
    s->frame->pkt_pts = avpkt->pts;
    s->frame->pkt_dts = avpkt->dts;
    av_frame_set_pkt_duration(s->frame, avpkt->duration);

    if (avpkt->size >= 6) {
        s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 ||
                      memcmp(avpkt->data, gif89a_sig, 6) == 0;
    } else {
        s->keyframe = 0;
    }

    if (s->keyframe) {
        s->keyframe_ok = 0;
        s->gce_prev_disposal = GCE_DISPOSAL_NONE;
        if ((ret = gif_read_header1(s)) < 0)
            return ret;

        if ((ret = ff_set_dimensions(avctx, s->screen_width, s->screen_height)) < 0)
            return ret;

        av_frame_unref(s->frame);
        if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
            return ret;

        av_fast_malloc(&s->idx_line, &s->idx_line_size, s->screen_width);
        if (!s->idx_line)
            return AVERROR(ENOMEM);

        s->frame->pict_type = AV_PICTURE_TYPE_I;
        s->frame->key_frame = 1;
        s->keyframe_ok = 1;
    } else {
        if (!s->keyframe_ok) {
            av_log(avctx, AV_LOG_ERROR, "cannot decode frame without keyframe\n");
            return AVERROR_INVALIDDATA;
        }

        if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
            return ret;

        s->frame->pict_type = AV_PICTURE_TYPE_P;
        s->frame->key_frame = 0;
    }

    ret = gif_parse_next_image(s, s->frame);
    if (ret < 0)
        return ret;

    if ((ret = av_frame_ref(data, s->frame)) < 0)
        return ret;
    *got_frame = 1;

    return bytestream2_tell(&s->gb);
}
Example #5
0
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *got_frame,
                        AVPacket *avpkt)
{
    SgiState *s = avctx->priv_data;
    AVFrame *p = data;
    unsigned int dimension, rle;
    int ret = 0;
    uint8_t *out_buf, *out_end;

    bytestream2_init(&s->g, avpkt->data, avpkt->size);
    if (bytestream2_get_bytes_left(&s->g) < SGI_HEADER_SIZE) {
        av_log(avctx, AV_LOG_ERROR, "buf_size too small (%d)\n", avpkt->size);
        return AVERROR_INVALIDDATA;
    }

    /* Test for SGI magic. */
    if (bytestream2_get_be16u(&s->g) != SGI_MAGIC) {
        av_log(avctx, AV_LOG_ERROR, "bad magic number\n");
        return AVERROR_INVALIDDATA;
    }

    rle                  = bytestream2_get_byteu(&s->g);
    s->bytes_per_channel = bytestream2_get_byteu(&s->g);
    dimension            = bytestream2_get_be16u(&s->g);
    s->width             = bytestream2_get_be16u(&s->g);
    s->height            = bytestream2_get_be16u(&s->g);
    s->depth             = bytestream2_get_be16u(&s->g);

    if (s->bytes_per_channel != 1 && (s->bytes_per_channel != 2 || rle)) {
        av_log(avctx, AV_LOG_ERROR, "wrong channel number\n");
        return AVERROR_INVALIDDATA;
    }

    /* Check for supported image dimensions. */
    if (dimension != 2 && dimension != 3) {
        av_log(avctx, AV_LOG_ERROR, "wrong dimension number\n");
        return AVERROR_INVALIDDATA;
    }

    if (s->depth == SGI_GRAYSCALE) {
        avctx->pix_fmt = s->bytes_per_channel == 2 ? AV_PIX_FMT_GRAY16BE : AV_PIX_FMT_GRAY8;
    } else if (s->depth == SGI_RGB) {
        avctx->pix_fmt = s->bytes_per_channel == 2 ? AV_PIX_FMT_RGB48BE : AV_PIX_FMT_RGB24;
    } else if (s->depth == SGI_RGBA) {
        avctx->pix_fmt = s->bytes_per_channel == 2 ? AV_PIX_FMT_RGBA64BE : AV_PIX_FMT_RGBA;
    } else {
        av_log(avctx, AV_LOG_ERROR, "wrong picture format\n");
        return AVERROR_INVALIDDATA;
    }

    if (av_image_check_size(s->width, s->height, 0, avctx))
        return AVERROR_INVALIDDATA;
    avcodec_set_dimensions(avctx, s->width, s->height);

    if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
        return ret;

    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;
    out_buf = p->data[0];

    out_end = out_buf + p->linesize[0] * s->height;

    s->linesize = p->linesize[0];

    /* Skip header. */
    bytestream2_seek(&s->g, SGI_HEADER_SIZE, SEEK_SET);
    if (rle) {
        ret = read_rle_sgi(out_end, s);
    } else {
        ret = read_uncompressed_sgi(out_buf, s);
    }
    if (ret)
        return ret;

    *got_frame = 1;
    return avpkt->size;
}
Example #6
0
static int pnm_decode_frame(AVCodecContext *avctx, void *data,
                            int *got_frame, AVPacket *avpkt)
{
    const uint8_t *buf   = avpkt->data;
    int buf_size         = avpkt->size;
    PNMContext * const s = avctx->priv_data;
    AVFrame * const p    = data;
    int i, j, k, n, linesize, h, upgrade = 0, is_mono = 0;
    unsigned char *ptr;
    int components, sample_len, ret;

    s->bytestream_start =
    s->bytestream       = (uint8_t *)buf;
    s->bytestream_end   = (uint8_t *)buf + buf_size;

    if ((ret = ff_pnm_decode_header(avctx, s)) < 0)
        return ret;

    if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
        return ret;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;
    avctx->bits_per_raw_sample = av_log2(s->maxval) + 1;

    switch (avctx->pix_fmt) {
    default:
        return AVERROR(EINVAL);
    case AV_PIX_FMT_RGBA64:
        n = avctx->width * 8;
        components=4;
        sample_len=16;
        if (s->maxval < 65535)
            upgrade = 2;
        goto do_read;
    case AV_PIX_FMT_RGB48:
        n = avctx->width * 6;
        components=3;
        sample_len=16;
        if (s->maxval < 65535)
            upgrade = 2;
        goto do_read;
    case AV_PIX_FMT_RGBA:
        n = avctx->width * 4;
        components=4;
        sample_len=8;
        goto do_read;
    case AV_PIX_FMT_RGB24:
        n = avctx->width * 3;
        components=3;
        sample_len=8;
        if (s->maxval < 255)
            upgrade = 1;
        goto do_read;
    case AV_PIX_FMT_GRAY8:
        n = avctx->width;
        components=1;
        sample_len=8;
        if (s->maxval < 255)
            upgrade = 1;
        goto do_read;
    case AV_PIX_FMT_GRAY8A:
        n = avctx->width * 2;
        components=2;
        sample_len=8;
        goto do_read;
    case AV_PIX_FMT_GRAY16:
        n = avctx->width * 2;
        components=1;
        sample_len=16;
        if (s->maxval < 65535)
            upgrade = 2;
        goto do_read;
    case AV_PIX_FMT_YA16:
        n =  avctx->width * 4;
        components=2;
        sample_len=16;
        if (s->maxval < 65535)
            upgrade = 2;
        goto do_read;
    case AV_PIX_FMT_MONOWHITE:
    case AV_PIX_FMT_MONOBLACK:
        n = (avctx->width + 7) >> 3;
        components=1;
        sample_len=1;
        is_mono = 1;
    do_read:
        ptr      = p->data[0];
        linesize = p->linesize[0];
        if (n * avctx->height > s->bytestream_end - s->bytestream)
            return AVERROR_INVALIDDATA;
        if(s->type < 4 || (is_mono && s->type==7)){
            for (i=0; i<avctx->height; i++) {
                PutBitContext pb;
                init_put_bits(&pb, ptr, linesize);
                for(j=0; j<avctx->width * components; j++){
                    unsigned int c=0;
                    int v=0;
                    if(s->type < 4)
                    while(s->bytestream < s->bytestream_end && (*s->bytestream < '0' || *s->bytestream > '9' ))
                        s->bytestream++;
                    if(s->bytestream >= s->bytestream_end)
                        return AVERROR_INVALIDDATA;
                    if (is_mono) {
                        /* read a single digit */
                        v = (*s->bytestream++)&1;
                    } else {
                        /* read a sequence of digits */
                        for (k = 0; k < 5 && c <= 9; k += 1) {
                            v = 10*v + c;
                            c = (*s->bytestream++) - '0';
                        }
                        if (v > s->maxval) {
                            av_log(avctx, AV_LOG_ERROR, "value %d larger than maxval %d\n", v, s->maxval);
                            return AVERROR_INVALIDDATA;
                        }
                    }
                    if (sample_len == 16) {
                        ((uint16_t*)ptr)[j] = (((1<<sample_len)-1)*v + (s->maxval>>1))/s->maxval;
                    } else
                        put_bits(&pb, sample_len, (((1<<sample_len)-1)*v + (s->maxval>>1))/s->maxval);
                }
                if (sample_len != 16)
                    flush_put_bits(&pb);
                ptr+= linesize;
            }
        }else{
Example #7
0
static int cook_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame     = data;
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    COOKContext *q = avctx->priv_data;
    float **samples = NULL;
    int i, ret;
    int offset = 0;
    int chidx = 0;

    if (buf_size < avctx->block_align)
        return buf_size;

    /* get output buffer */
    if (q->discarded_packets >= 2) {
        frame->nb_samples = q->samples_per_channel;
        if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
            return ret;
        samples = (float **)frame->extended_data;
    }

    /* estimate subpacket sizes */
    q->subpacket[0].size = avctx->block_align;

    for (i = 1; i < q->num_subpackets; i++) {
        q->subpacket[i].size = 2 * buf[avctx->block_align - q->num_subpackets + i];
        q->subpacket[0].size -= q->subpacket[i].size + 1;
        if (q->subpacket[0].size < 0) {
            av_log(avctx, AV_LOG_DEBUG,
                   "frame subpacket size total > avctx->block_align!\n");
            return AVERROR_INVALIDDATA;
        }
    }

    /* decode supbackets */
    for (i = 0; i < q->num_subpackets; i++) {
        q->subpacket[i].bits_per_subpacket = (q->subpacket[i].size * 8) >>
                                              q->subpacket[i].bits_per_subpdiv;
        q->subpacket[i].ch_idx = chidx;
        av_log(avctx, AV_LOG_DEBUG,
               "subpacket[%i] size %i js %i %i block_align %i\n",
               i, q->subpacket[i].size, q->subpacket[i].joint_stereo, offset,
               avctx->block_align);

        if ((ret = decode_subpacket(q, &q->subpacket[i], buf + offset, samples)) < 0)
            return ret;
        offset += q->subpacket[i].size;
        chidx += q->subpacket[i].num_channels;
        av_log(avctx, AV_LOG_DEBUG, "subpacket[%i] %i %i\n",
               i, q->subpacket[i].size * 8, get_bits_count(&q->gb));
    }

    /* Discard the first two frames: no valid audio. */
    if (q->discarded_packets < 2) {
        q->discarded_packets++;
        *got_frame_ptr = 0;
        return avctx->block_align;
    }

    *got_frame_ptr = 1;

    return avctx->block_align;
}
Example #8
0
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                        AVPacket *avpkt)
{
    const uint32_t *src = (const uint32_t *)avpkt->data;
    AVFrame *pic        = data;
    int width           = avctx->width;
    int y               = 0;
    uint16_t *ydst, *udst, *vdst, *yend;
    int ret;

    if (avpkt->size < avctx->width * avctx->height * 8 / 3) {
        av_log(avctx, AV_LOG_ERROR, "Packet too small\n");
        return AVERROR_INVALIDDATA;
    }

    if (avpkt->size > avctx->width * avctx->height * 8 / 3) {
        avpriv_request_sample(avctx, "(Probably) padded data");
    }

    if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
        return ret;

    ydst = (uint16_t *)pic->data[0];
    udst = (uint16_t *)pic->data[1];
    vdst = (uint16_t *)pic->data[2];
    yend = ydst + width;
    pic->pict_type = AV_PICTURE_TYPE_I;
    pic->key_frame = 1;

    for (;;) {
        uint32_t v = av_be2ne32(*src++);
        *udst++ = (v >> 16) & 0xFFC0;
        *ydst++ = (v >> 6 ) & 0xFFC0;
        *vdst++ = (v << 4 ) & 0xFFC0;

        v       = av_be2ne32(*src++);
        *ydst++ = (v >> 16) & 0xFFC0;

        if (ydst >= yend) {
            ydst += pic->linesize[0] / 2 - width;
            udst += pic->linesize[1] / 2 - width / 2;
            vdst += pic->linesize[2] / 2 - width / 2;
            yend = ydst + width;
            if (++y >= avctx->height)
                break;
        }

        *udst++ = (v >> 6 ) & 0xFFC0;
        *ydst++ = (v << 4 ) & 0xFFC0;

        v = av_be2ne32(*src++);
        *vdst++ = (v >> 16) & 0xFFC0;
        *ydst++ = (v >> 6 ) & 0xFFC0;

        if (ydst >= yend) {
            ydst += pic->linesize[0] / 2 - width;
            udst += pic->linesize[1] / 2 - width / 2;
            vdst += pic->linesize[2] / 2 - width / 2;
            yend  = ydst + width;
            if (++y >= avctx->height)
                break;
        }

        *udst++ = (v << 4 ) & 0xFFC0;

        v = av_be2ne32(*src++);
        *ydst++ = (v >> 16) & 0xFFC0;
        *vdst++ = (v >> 6 ) & 0xFFC0;
        *ydst++ = (v << 4 ) & 0xFFC0;
        if (ydst >= yend) {
            ydst += pic->linesize[0] / 2 - width;
            udst += pic->linesize[1] / 2 - width / 2;
            vdst += pic->linesize[2] / 2 - width / 2;
            yend  = ydst + width;
            if (++y >= avctx->height)
                break;
        }
    }

    *got_frame = 1;

    return avpkt->size;
}
Example #9
0
static int xwd_decode_frame(AVCodecContext *avctx, void *data,
                            int *got_frame, AVPacket *avpkt)
{
    AVFrame *p = data;
    const uint8_t *buf = avpkt->data;
    int i, ret, buf_size = avpkt->size;
    uint32_t version, header_size, vclass, ncolors;
    uint32_t xoffset, be, bpp, lsize, rsize;
    uint32_t pixformat, pixdepth, bunit, bitorder, bpad;
    uint32_t rgb[3];
    uint8_t *ptr;
    GetByteContext gb;

    if (buf_size < XWD_HEADER_SIZE)
        return AVERROR_INVALIDDATA;

    bytestream2_init(&gb, buf, buf_size);
    header_size = bytestream2_get_be32u(&gb);

    version = bytestream2_get_be32u(&gb);
    if (version != XWD_VERSION) {
        av_log(avctx, AV_LOG_ERROR, "unsupported version\n");
        return AVERROR_INVALIDDATA;
    }

    if (buf_size < header_size || header_size < XWD_HEADER_SIZE) {
        av_log(avctx, AV_LOG_ERROR, "invalid header size\n");
        return AVERROR_INVALIDDATA;
    }

    pixformat     = bytestream2_get_be32u(&gb);
    pixdepth      = bytestream2_get_be32u(&gb);
    avctx->width  = bytestream2_get_be32u(&gb);
    avctx->height = bytestream2_get_be32u(&gb);
    xoffset       = bytestream2_get_be32u(&gb);
    be            = bytestream2_get_be32u(&gb);
    bunit         = bytestream2_get_be32u(&gb);
    bitorder      = bytestream2_get_be32u(&gb);
    bpad          = bytestream2_get_be32u(&gb);
    bpp           = bytestream2_get_be32u(&gb);
    lsize         = bytestream2_get_be32u(&gb);
    vclass        = bytestream2_get_be32u(&gb);
    rgb[0]        = bytestream2_get_be32u(&gb);
    rgb[1]        = bytestream2_get_be32u(&gb);
    rgb[2]        = bytestream2_get_be32u(&gb);
    bytestream2_skipu(&gb, 8);
    ncolors       = bytestream2_get_be32u(&gb);
    bytestream2_skipu(&gb, header_size - (XWD_HEADER_SIZE - 20));

    av_log(avctx, AV_LOG_DEBUG,
           "pixformat %"PRIu32", pixdepth %"PRIu32", bunit %"PRIu32", bitorder %"PRIu32", bpad %"PRIu32"\n",
           pixformat, pixdepth, bunit, bitorder, bpad);
    av_log(avctx, AV_LOG_DEBUG,
           "vclass %"PRIu32", ncolors %"PRIu32", bpp %"PRIu32", be %"PRIu32", lsize %"PRIu32", xoffset %"PRIu32"\n",
           vclass, ncolors, bpp, be, lsize, xoffset);
    av_log(avctx, AV_LOG_DEBUG,
           "red %0"PRIx32", green %0"PRIx32", blue %0"PRIx32"\n",
           rgb[0], rgb[1], rgb[2]);

    if (pixformat > XWD_Z_PIXMAP) {
        av_log(avctx, AV_LOG_ERROR, "invalid pixmap format\n");
        return AVERROR_INVALIDDATA;
    }

    if (pixdepth == 0 || pixdepth > 32) {
        av_log(avctx, AV_LOG_ERROR, "invalid pixmap depth\n");
        return AVERROR_INVALIDDATA;
    }

    if (xoffset) {
        avpriv_request_sample(avctx, "xoffset %"PRIu32"", xoffset);
        return AVERROR_PATCHWELCOME;
    }

    if (be > 1) {
        av_log(avctx, AV_LOG_ERROR, "invalid byte order\n");
        return AVERROR_INVALIDDATA;
    }

    if (bitorder > 1) {
        av_log(avctx, AV_LOG_ERROR, "invalid bitmap bit order\n");
        return AVERROR_INVALIDDATA;
    }

    if (bunit != 8 && bunit != 16 && bunit != 32) {
        av_log(avctx, AV_LOG_ERROR, "invalid bitmap unit\n");
        return AVERROR_INVALIDDATA;
    }

    if (bpad != 8 && bpad != 16 && bpad != 32) {
        av_log(avctx, AV_LOG_ERROR, "invalid bitmap scan-line pad\n");
        return AVERROR_INVALIDDATA;
    }

    if (bpp == 0 || bpp > 32) {
        av_log(avctx, AV_LOG_ERROR, "invalid bits per pixel\n");
        return AVERROR_INVALIDDATA;
    }

    if (ncolors > 256) {
        av_log(avctx, AV_LOG_ERROR, "invalid number of entries in colormap\n");
        return AVERROR_INVALIDDATA;
    }

    if ((ret = av_image_check_size(avctx->width, avctx->height, 0, NULL)) < 0)
        return ret;

    rsize = FFALIGN(avctx->width * bpp, bpad) / 8;
    if (lsize < rsize) {
        av_log(avctx, AV_LOG_ERROR, "invalid bytes per scan-line\n");
        return AVERROR_INVALIDDATA;
    }

    if (bytestream2_get_bytes_left(&gb) < ncolors * XWD_CMAP_SIZE + (uint64_t)avctx->height * lsize) {
        av_log(avctx, AV_LOG_ERROR, "input buffer too small\n");
        return AVERROR_INVALIDDATA;
    }

    if (pixformat != XWD_Z_PIXMAP) {
        avpriv_report_missing_feature(avctx, "Pixmap format %"PRIu32, pixformat);
        return AVERROR_PATCHWELCOME;
    }

    avctx->pix_fmt = AV_PIX_FMT_NONE;
    switch (vclass) {
    case XWD_STATIC_GRAY:
    case XWD_GRAY_SCALE:
        if (bpp != 1 && bpp != 8)
            return AVERROR_INVALIDDATA;
        if (bpp == 1 && pixdepth == 1) {
            avctx->pix_fmt = AV_PIX_FMT_MONOWHITE;
        } else if (bpp == 8 && pixdepth == 8) {
            avctx->pix_fmt = AV_PIX_FMT_GRAY8;
        }
        break;
    case XWD_STATIC_COLOR:
    case XWD_PSEUDO_COLOR:
        if (bpp == 8)
            avctx->pix_fmt = AV_PIX_FMT_PAL8;
        break;
    case XWD_TRUE_COLOR:
    case XWD_DIRECT_COLOR:
        if (bpp != 16 && bpp != 24 && bpp != 32)
            return AVERROR_INVALIDDATA;
        if (bpp == 16 && pixdepth == 15) {
            if (rgb[0] == 0x7C00 && rgb[1] == 0x3E0 && rgb[2] == 0x1F)
                avctx->pix_fmt = be ? AV_PIX_FMT_RGB555BE : AV_PIX_FMT_RGB555LE;
            else if (rgb[0] == 0x1F && rgb[1] == 0x3E0 && rgb[2] == 0x7C00)
                avctx->pix_fmt = be ? AV_PIX_FMT_BGR555BE : AV_PIX_FMT_BGR555LE;
        } else if (bpp == 16 && pixdepth == 16) {
            if (rgb[0] == 0xF800 && rgb[1] == 0x7E0 && rgb[2] == 0x1F)
                avctx->pix_fmt = be ? AV_PIX_FMT_RGB565BE : AV_PIX_FMT_RGB565LE;
            else if (rgb[0] == 0x1F && rgb[1] == 0x7E0 && rgb[2] == 0xF800)
                avctx->pix_fmt = be ? AV_PIX_FMT_BGR565BE : AV_PIX_FMT_BGR565LE;
        } else if (bpp == 24) {
            if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF)
                avctx->pix_fmt = be ? AV_PIX_FMT_RGB24 : AV_PIX_FMT_BGR24;
            else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000)
                avctx->pix_fmt = be ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24;
        } else if (bpp == 32) {
            if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF)
                avctx->pix_fmt = be ? AV_PIX_FMT_ARGB : AV_PIX_FMT_BGRA;
            else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000)
                avctx->pix_fmt = be ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA;
        }
        bytestream2_skipu(&gb, ncolors * XWD_CMAP_SIZE);
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "invalid visual class\n");
        return AVERROR_INVALIDDATA;
    }

    if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
        avpriv_request_sample(avctx,
                              "Unknown file: bpp %"PRIu32", pixdepth %"PRIu32", vclass %"PRIu32"",
                              bpp, pixdepth, vclass);
        return AVERROR_PATCHWELCOME;
    }

    if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
        return ret;

    p->key_frame = 1;
    p->pict_type = AV_PICTURE_TYPE_I;

    if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
        uint32_t *dst = (uint32_t *)p->data[1];
        uint8_t red, green, blue;

        for (i = 0; i < ncolors; i++) {

            bytestream2_skipu(&gb, 4); // skip colormap entry number
            red    = bytestream2_get_byteu(&gb);
            bytestream2_skipu(&gb, 1);
            green  = bytestream2_get_byteu(&gb);
            bytestream2_skipu(&gb, 1);
            blue   = bytestream2_get_byteu(&gb);
            bytestream2_skipu(&gb, 3); // skip bitmask flag and padding

            dst[i] = 0xFFU << 24 | red << 16 | green << 8 | blue;
        }
    }

    ptr = p->data[0];
    for (i = 0; i < avctx->height; i++) {
        bytestream2_get_bufferu(&gb, ptr, rsize);
        bytestream2_skipu(&gb, lsize - rsize);
        ptr += p->linesize[0];
    }

    *got_frame       = 1;

    return buf_size;
}
Example #10
0
File: 8bps.c Project: 0MasteR0/xbmc
static int decode_frame(AVCodecContext *avctx, void *data,
                        int *got_frame, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;
    EightBpsContext * const c = avctx->priv_data;
    const unsigned char *encoded = buf;
    unsigned char *pixptr, *pixptr_end;
    unsigned int height = avctx->height; // Real image height
    unsigned int dlen, p, row;
    const unsigned char *lp, *dp;
    unsigned char count;
    unsigned int planes     = c->planes;
    unsigned char *planemap = c->planemap;
    int ret;

    if (c->pic.data[0])
        avctx->release_buffer(avctx, &c->pic);

    c->pic.reference    = 0;
    c->pic.buffer_hints = FF_BUFFER_HINTS_VALID;
    if ((ret = ff_get_buffer(avctx, &c->pic)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    /* Set data pointer after line lengths */
    dp = encoded + planes * (height << 1);

    for (p = 0; p < planes; p++) {
        /* Lines length pointer for this plane */
        lp = encoded + p * (height << 1);

        /* Decode a plane */
        for (row = 0; row < height; row++) {
            pixptr = c->pic.data[0] + row * c->pic.linesize[0] + planemap[p];
            pixptr_end = pixptr + c->pic.linesize[0];
            if(lp - encoded + row*2 + 1 >= buf_size)
                return -1;
            dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
            /* Decode a row of this plane */
            while (dlen > 0) {
                if (dp + 1 >= buf + buf_size)
                    return AVERROR_INVALIDDATA;
                if ((count = *dp++) <= 127) {
                    count++;
                    dlen -= count + 1;
                    if (pixptr + count * planes > pixptr_end)
                        break;
                    if (dp + count > buf + buf_size)
                        return AVERROR_INVALIDDATA;
                    while (count--) {
                        *pixptr = *dp++;
                        pixptr += planes;
                    }
                } else {
                    count = 257 - count;
                    if (pixptr + count * planes > pixptr_end)
                        break;
                    while (count--) {
                        *pixptr = *dp;
                        pixptr += planes;
                    }
                    dp++;
                    dlen -= 2;
                }
            }
        }
    }

    if (avctx->bits_per_coded_sample <= 8) {
        const uint8_t *pal = av_packet_get_side_data(avpkt,
                                                     AV_PKT_DATA_PALETTE,
                                                     NULL);
        if (pal) {
            c->pic.palette_has_changed = 1;
            memcpy(c->pal, pal, AVPALETTE_SIZE);
        }

        memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
    }

    *got_frame = 1;
    *(AVFrame*)data = c->pic;

    /* always report that the buffer was completely consumed */
    return buf_size;
}
Example #11
0
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
    GifState *s = avctx->priv_data;
    AVFrame *picture = data;
    int ret;

    bytestream2_init(&s->gb, avpkt->data, avpkt->size);

    s->picture.pts          = avpkt->pts;
    s->picture.pkt_pts      = avpkt->pts;
    s->picture.pkt_dts      = avpkt->dts;
    s->picture.pkt_duration = avpkt->duration;

    if (avpkt->size >= 6) {
        s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 ||
                      memcmp(avpkt->data, gif89a_sig, 6) == 0;
    } else {
        s->keyframe = 0;
    }

    if (s->keyframe) {
        s->keyframe_ok = 0;
        if ((ret = gif_read_header1(s)) < 0)
            return ret;

        if ((ret = av_image_check_size(s->screen_width, s->screen_height, 0, avctx)) < 0)
            return ret;
        avcodec_set_dimensions(avctx, s->screen_width, s->screen_height);

        if (s->picture.data[0])
            avctx->release_buffer(avctx, &s->picture);

        if ((ret = ff_get_buffer(avctx, &s->picture)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
            return ret;
        }

        s->picture.pict_type = AV_PICTURE_TYPE_I;
        s->picture.key_frame = 1;
        s->keyframe_ok = 1;
    } else {
        if (!s->keyframe_ok) {
            av_log(avctx, AV_LOG_ERROR, "cannot decode frame without keyframe\n");
            return AVERROR_INVALIDDATA;
        }

        if ((ret = avctx->reget_buffer(avctx, &s->picture)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
            return ret;
        }

        s->picture.pict_type = AV_PICTURE_TYPE_P;
        s->picture.key_frame = 0;
    }

    ret = gif_parse_next_image(s, got_frame);
    if (ret < 0)
        return ret;
    else if (*got_frame)
        *picture = s->picture;

    return avpkt->size;
}
Example #12
0
static int opus_decode_packet(AVCodecContext *avctx, void *data,
                              int *got_frame_ptr, AVPacket *avpkt)
{
    OpusContext *c      = avctx->priv_data;
    AVFrame *frame      = data;
    const uint8_t *buf  = avpkt->data;
    int buf_size        = avpkt->size;
    int coded_samples   = 0;
    int decoded_samples = INT_MAX;
    int delayed_samples = 0;
    int i, ret;

    /* calculate the number of delayed samples */
    for (i = 0; i < c->nb_streams; i++) {
        OpusStreamContext *s = &c->streams[i];
        s->out[0] =
        s->out[1] = NULL;
        delayed_samples = FFMAX(delayed_samples,
                                s->delayed_samples + av_audio_fifo_size(c->sync_buffers[i]));
    }

    /* decode the header of the first sub-packet to find out the sample count */
    if (buf) {
        OpusPacket *pkt = &c->streams[0].packet;
        ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);
        if (ret < 0) {
            av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
            return ret;
        }
        coded_samples += pkt->frame_count * pkt->frame_duration;
        c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
    }

    frame->nb_samples = coded_samples + delayed_samples;

    /* no input or buffered data => nothing to do */
    if (!frame->nb_samples) {
        *got_frame_ptr = 0;
        return 0;
    }

    /* setup the data buffers */
    ret = ff_get_buffer(avctx, frame, 0);
    if (ret < 0)
        return ret;
    frame->nb_samples = 0;

    memset(c->out, 0, c->nb_streams * 2 * sizeof(*c->out));
    for (i = 0; i < avctx->channels; i++) {
        ChannelMap *map = &c->channel_maps[i];
        if (!map->copy)
            c->out[2 * map->stream_idx + map->channel_idx] = (float*)frame->extended_data[i];
    }

    /* read the data from the sync buffers */
    for (i = 0; i < c->nb_streams; i++) {
        float          **out = c->out + 2 * i;
        int sync_size = av_audio_fifo_size(c->sync_buffers[i]);

        float sync_dummy[32];
        int out_dummy = (!out[0]) | ((!out[1]) << 1);

        if (!out[0])
            out[0] = sync_dummy;
        if (!out[1])
            out[1] = sync_dummy;
        if (out_dummy && sync_size > FF_ARRAY_ELEMS(sync_dummy))
            return AVERROR_BUG;

        ret = av_audio_fifo_read(c->sync_buffers[i], (void**)out, sync_size);
        if (ret < 0)
            return ret;

        if (out_dummy & 1)
            out[0] = NULL;
        else
            out[0] += ret;
        if (out_dummy & 2)
            out[1] = NULL;
        else
            out[1] += ret;

        c->out_size[i] = frame->linesize[0] - ret * sizeof(float);
    }

    /* decode each sub-packet */
    for (i = 0; i < c->nb_streams; i++) {
        OpusStreamContext *s = &c->streams[i];

        if (i && buf) {
            ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);
            if (ret < 0) {
                av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
                return ret;
            }
            if (coded_samples != s->packet.frame_count * s->packet.frame_duration) {
                av_log(avctx, AV_LOG_ERROR,
                       "Mismatching coded sample count in substream %d.\n", i);
                return AVERROR_INVALIDDATA;
            }

            s->silk_samplerate = get_silk_samplerate(s->packet.config);
        }

        ret = opus_decode_subpacket(&c->streams[i], buf, s->packet.data_size,
                                    c->out + 2 * i, c->out_size[i], coded_samples);
        if (ret < 0)
            return ret;
        c->decoded_samples[i] = ret;
        decoded_samples       = FFMIN(decoded_samples, ret);

        buf      += s->packet.packet_size;
        buf_size -= s->packet.packet_size;
    }

    /* buffer the extra samples */
    for (i = 0; i < c->nb_streams; i++) {
        int buffer_samples = c->decoded_samples[i] - decoded_samples;
        if (buffer_samples) {
            float *buf[2] = { c->out[2 * i + 0] ? c->out[2 * i + 0] : (float*)frame->extended_data[0],
                              c->out[2 * i + 1] ? c->out[2 * i + 1] : (float*)frame->extended_data[0] };
            buf[0] += decoded_samples;
            buf[1] += decoded_samples;
            ret = av_audio_fifo_write(c->sync_buffers[i], (void**)buf, buffer_samples);
            if (ret < 0)
                return ret;
        }
    }

    for (i = 0; i < avctx->channels; i++) {
        ChannelMap *map = &c->channel_maps[i];

        /* handle copied channels */
        if (map->copy) {
            memcpy(frame->extended_data[i],
                   frame->extended_data[map->copy_idx],
                   frame->linesize[0]);
        } else if (map->silence) {
            memset(frame->extended_data[i], 0, frame->linesize[0]);
        }

        if (c->gain_i && decoded_samples > 0) {
            c->fdsp->vector_fmul_scalar((float*)frame->extended_data[i],
                                       (float*)frame->extended_data[i],
                                       c->gain, FFALIGN(decoded_samples, 8));
        }
    }

    frame->nb_samples = decoded_samples;
    *got_frame_ptr    = !!decoded_samples;

    return avpkt->size;
}
Example #13
0
static int cyuv_decode_frame(AVCodecContext *avctx,
                             void *data, int *got_frame,
                             AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    CyuvDecodeContext *s=avctx->priv_data;
    AVFrame *frame = data;

    unsigned char *y_plane;
    unsigned char *u_plane;
    unsigned char *v_plane;
    int y_ptr;
    int u_ptr;
    int v_ptr;

    /* prediction error tables (make it clear that they are signed values) */
    const signed char *y_table = (const signed char*)buf +  0;
    const signed char *u_table = (const signed char*)buf + 16;
    const signed char *v_table = (const signed char*)buf + 32;

    unsigned char y_pred, u_pred, v_pred;
    int stream_ptr;
    unsigned char cur_byte;
    int pixel_groups;
    int rawsize = s->height * FFALIGN(s->width,2) * 2;
    int ret;

    if (avctx->codec_id == AV_CODEC_ID_AURA) {
        y_table = u_table;
        u_table = v_table;
    }
    /* sanity check the buffer size: A buffer has 3x16-bytes tables
     * followed by (height) lines each with 3 bytes to represent groups
     * of 4 pixels. Thus, the total size of the buffer ought to be:
     *    (3 * 16) + height * (width * 3 / 4) */
    if (buf_size == 48 + s->height * (s->width * 3 / 4)) {
        avctx->pix_fmt = AV_PIX_FMT_YUV411P;
    } else if(buf_size == rawsize ) {
        avctx->pix_fmt = AV_PIX_FMT_UYVY422;
    } else {
        av_log(avctx, AV_LOG_ERROR, "got a buffer with %d bytes when %d were expected\n",
               buf_size, 48 + s->height * (s->width * 3 / 4));
        return AVERROR_INVALIDDATA;
    }

    /* pixel data starts 48 bytes in, after 3x16-byte tables */
    stream_ptr = 48;

    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
        return ret;

    y_plane = frame->data[0];
    u_plane = frame->data[1];
    v_plane = frame->data[2];

    if (buf_size == rawsize) {
        int linesize = FFALIGN(s->width,2) * 2;
        y_plane += frame->linesize[0] * s->height;
        for (stream_ptr = 0; stream_ptr < rawsize; stream_ptr += linesize) {
            y_plane -= frame->linesize[0];
            memcpy(y_plane, buf+stream_ptr, linesize);
        }
    } else {

    /* iterate through each line in the height */
    for (y_ptr = 0, u_ptr = 0, v_ptr = 0;
         y_ptr < (s->height * frame->linesize[0]);
         y_ptr += frame->linesize[0] - s->width,
         u_ptr += frame->linesize[1] - s->width / 4,
         v_ptr += frame->linesize[2] - s->width / 4) {

        /* reset predictors */
        cur_byte = buf[stream_ptr++];
        u_plane[u_ptr++] = u_pred = cur_byte & 0xF0;
        y_plane[y_ptr++] = y_pred = (cur_byte & 0x0F) << 4;

        cur_byte = buf[stream_ptr++];
        v_plane[v_ptr++] = v_pred = cur_byte & 0xF0;
        y_pred += y_table[cur_byte & 0x0F];
        y_plane[y_ptr++] = y_pred;

        cur_byte = buf[stream_ptr++];
        y_pred += y_table[cur_byte & 0x0F];
        y_plane[y_ptr++] = y_pred;
        y_pred += y_table[(cur_byte & 0xF0) >> 4];
        y_plane[y_ptr++] = y_pred;

        /* iterate through the remaining pixel groups (4 pixels/group) */
        pixel_groups = s->width / 4 - 1;
        while (pixel_groups--) {

            cur_byte = buf[stream_ptr++];
            u_pred += u_table[(cur_byte & 0xF0) >> 4];
            u_plane[u_ptr++] = u_pred;
            y_pred += y_table[cur_byte & 0x0F];
            y_plane[y_ptr++] = y_pred;

            cur_byte = buf[stream_ptr++];
            v_pred += v_table[(cur_byte & 0xF0) >> 4];
            v_plane[v_ptr++] = v_pred;
            y_pred += y_table[cur_byte & 0x0F];
            y_plane[y_ptr++] = y_pred;

            cur_byte = buf[stream_ptr++];
            y_pred += y_table[cur_byte & 0x0F];
            y_plane[y_ptr++] = y_pred;
            y_pred += y_table[(cur_byte & 0xF0) >> 4];
            y_plane[y_ptr++] = y_pred;

        }
    }
    }

    *got_frame = 1;

    return buf_size;
}
Example #14
0
static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *pkt)
{
    CDXLVideoContext *c = avctx->priv_data;
    AVFrame * const p = &c->frame;
    int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
    const uint8_t *buf = pkt->data;

    if (buf_size < 32)
        return AVERROR_INVALIDDATA;
    encoding        = buf[1] & 7;
    c->format       = buf[1] & 0xE0;
    w               = AV_RB16(&buf[14]);
    h               = AV_RB16(&buf[16]);
    c->bpp          = buf[19];
    c->palette_size = AV_RB16(&buf[20]);
    c->palette      = buf + 32;
    c->video        = c->palette + c->palette_size;
    c->video_size   = buf_size - c->palette_size - 32;

    if (c->palette_size > 512)
        return AVERROR_INVALIDDATA;
    if (buf_size < c->palette_size + 32)
        return AVERROR_INVALIDDATA;
    if (c->bpp < 1)
        return AVERROR_INVALIDDATA;
    if (c->format != BIT_PLANAR && c->format != BIT_LINE) {
        av_log_ask_for_sample(avctx, "unsupported pixel format: 0x%0x\n", c->format);
        return AVERROR_PATCHWELCOME;
    }

    if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
        return ret;
    if (w != avctx->width || h != avctx->height)
        avcodec_set_dimensions(avctx, w, h);

    aligned_width = FFALIGN(c->avctx->width, 16);
    c->padded_bits  = aligned_width - c->avctx->width;
    if (c->video_size < aligned_width * avctx->height * c->bpp / 8)
        return AVERROR_INVALIDDATA;
    if (!encoding && c->palette_size && c->bpp <= 8) {
        avctx->pix_fmt = AV_PIX_FMT_PAL8;
    } else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) {
        if (c->palette_size != (1 << (c->bpp - 1)))
            return AVERROR_INVALIDDATA;
        avctx->pix_fmt = AV_PIX_FMT_BGR24;
    } else {
        av_log_ask_for_sample(avctx, "unsupported encoding %d and bpp %d\n",
                              encoding, c->bpp);
        return AVERROR_PATCHWELCOME;
    }

    if (p->data[0])
        avctx->release_buffer(avctx, p);

    p->reference = 0;
    if ((ret = ff_get_buffer(avctx, p)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    p->pict_type = AV_PICTURE_TYPE_I;

    if (encoding) {
        av_fast_padded_malloc(&c->new_video, &c->new_video_size,
                              h * w + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!c->new_video)
            return AVERROR(ENOMEM);
        if (c->bpp == 8)
            cdxl_decode_ham8(c);
        else
            cdxl_decode_ham6(c);
    } else {
        cdxl_decode_rgb(c);
    }
    *got_frame = 1;
    *(AVFrame*)data = c->frame;

    return buf_size;
}
Example #15
0
static int mediacodec_wrap_sw_buffer(AVCodecContext *avctx,
                                  MediaCodecDecContext *s,
                                  uint8_t *data,
                                  size_t size,
                                  ssize_t index,
                                  FFAMediaCodecBufferInfo *info,
                                  AVFrame *frame)
{
    int ret = 0;
    int status = 0;

    frame->width = avctx->width;
    frame->height = avctx->height;
    frame->format = avctx->pix_fmt;

    /* MediaCodec buffers needs to be copied to our own refcounted buffers
     * because the flush command invalidates all input and output buffers.
     */
    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate buffer\n");
        goto done;
    }

    /* Override frame->pkt_pts as ff_get_buffer will override its value based
     * on the last avpacket received which is not in sync with the frame:
     *   * N avpackets can be pushed before 1 frame is actually returned
     *   * 0-sized avpackets are pushed to flush remaining frames at EOS */
    if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
        frame->pts = av_rescale_q(info->presentationTimeUs,
                                      av_make_q(1, 1000000),
                                      avctx->pkt_timebase);
    } else {
        frame->pts = info->presentationTimeUs;
    }
#if FF_API_PKT_PTS
FF_DISABLE_DEPRECATION_WARNINGS
    frame->pkt_pts = frame->pts;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
    frame->pkt_dts = AV_NOPTS_VALUE;

    av_log(avctx, AV_LOG_TRACE,
            "Frame: width=%d stride=%d height=%d slice-height=%d "
            "crop-top=%d crop-bottom=%d crop-left=%d crop-right=%d encoder=%s\n"
            "destination linesizes=%d,%d,%d\n" ,
            avctx->width, s->stride, avctx->height, s->slice_height,
            s->crop_top, s->crop_bottom, s->crop_left, s->crop_right, s->codec_name,
            frame->linesize[0], frame->linesize[1], frame->linesize[2]);

    switch (s->color_format) {
    case COLOR_FormatYUV420Planar:
        ff_mediacodec_sw_buffer_copy_yuv420_planar(avctx, s, data, size, info, frame);
        break;
    case COLOR_FormatYUV420SemiPlanar:
    case COLOR_QCOM_FormatYUV420SemiPlanar:
    case COLOR_QCOM_FormatYUV420SemiPlanar32m:
        ff_mediacodec_sw_buffer_copy_yuv420_semi_planar(avctx, s, data, size, info, frame);
        break;
    case COLOR_TI_FormatYUV420PackedSemiPlanar:
    case COLOR_TI_FormatYUV420PackedSemiPlanarInterlaced:
        ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar(avctx, s, data, size, info, frame);
        break;
    case COLOR_QCOM_FormatYUV420PackedSemiPlanar64x32Tile2m8ka:
        ff_mediacodec_sw_buffer_copy_yuv420_packed_semi_planar_64x32Tile2m8ka(avctx, s, data, size, info, frame);
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Unsupported color format 0x%x (value=%d)\n",
            s->color_format, s->color_format);
        ret = AVERROR(EINVAL);
        goto done;
    }

    ret = 0;
done:
    status = ff_AMediaCodec_releaseOutputBuffer(s->codec, index, 0);
    if (status < 0) {
        av_log(avctx, AV_LOG_ERROR, "Failed to release output buffer\n");
        ret = AVERROR_EXTERNAL;
    }

    return ret;
}
Example #16
0
static int adx_decode_frame(AVCodecContext *avctx, void *data,
                            int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame      = data;
    int buf_size        = avpkt->size;
    ADXContext *c       = avctx->priv_data;
    int16_t **samples;
    int samples_offset;
    const uint8_t *buf  = avpkt->data;
    const uint8_t *buf_end = buf + avpkt->size;
    int num_blocks, ch, ret;

    if (c->eof) {
        *got_frame_ptr = 0;
        return buf_size;
    }

    if (!c->header_parsed && buf_size >= 2 && AV_RB16(buf) == 0x8000) {
        int header_size;
        if ((ret = ff_adx_decode_header(avctx, buf, buf_size, &header_size,
                                        c->coeff)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "error parsing ADX header\n");
            return AVERROR_INVALIDDATA;
        }
        c->channels      = avctx->channels;
        c->header_parsed = 1;
        if (buf_size < header_size)
            return AVERROR_INVALIDDATA;
        buf      += header_size;
        buf_size -= header_size;
    }
    if (!c->header_parsed)
        return AVERROR_INVALIDDATA;

    /* calculate number of blocks in the packet */
    num_blocks = buf_size / (BLOCK_SIZE * c->channels);

    /* if the packet is not an even multiple of BLOCK_SIZE, check for an EOF
       packet */
    if (!num_blocks || buf_size % (BLOCK_SIZE * avctx->channels)) {
        if (buf_size >= 4 && (AV_RB16(buf) & 0x8000)) {
            c->eof = 1;
            *got_frame_ptr = 0;
            return avpkt->size;
        }
        return AVERROR_INVALIDDATA;
    }

    /* get output buffer */
    frame->nb_samples = num_blocks * BLOCK_SAMPLES;
    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
        return ret;
    samples = (int16_t **)frame->extended_data;
    samples_offset = 0;

    while (num_blocks--) {
        for (ch = 0; ch < c->channels; ch++) {
            if (buf_end - buf < BLOCK_SIZE || adx_decode(c, samples[ch], samples_offset, buf, ch)) {
                c->eof = 1;
                buf = avpkt->data + avpkt->size;
                break;
            }
            buf_size -= BLOCK_SIZE;
            buf      += BLOCK_SIZE;
        }
        if (!c->eof)
            samples_offset += BLOCK_SAMPLES;
    }

    frame->nb_samples = samples_offset;
    *got_frame_ptr = 1;

    return buf - avpkt->data;
}
Example #17
0
/* Decoder for mpff format. Copies data from the passed in    *AVPacket to the data pointer argument
*/
static int mpff_decode_frame(AVCodecContext *avctx,
                            void *data, int *got_frame,
                            AVPacket *avpkt)
{
    //Pointer to the av packet's data
    const uint8_t *buf = avpkt->data;
    //The size of the buffer is the size of the avpacket
    int buf_size       = avpkt->size;
    //Sets up a AVFrame pointer to the data pointer
    AVFrame *p         = data;
    int width, height, linesize, ret;
    int i, j;
    //byte toadd;
    uint8_t *ptr, *ptr0;
    
    //Conditional statement checks that the header file indicates
    //mpff format
    if (bytestream_get_byte(&buf) != 'M' ||bytestream_get_byte(&buf) != 'P' ||bytestream_get_byte(&buf) != 'F' ||bytestream_get_byte(&buf) != 'F') {
        av_log(avctx, AV_LOG_ERROR, "bad magic number\n");
        return AVERROR_INVALIDDATA;
    }
    //Get the width out of the buffer
    width  = bytestream_get_le32(&buf);
    //Get the height out of the buffer
    height = bytestream_get_le32(&buf);

    //set up the av context
    avctx->width  = width;
    avctx->height = height;
    //RGB8 is the only pixel fmt supported (see structure)
    avctx->pix_fmt = AV_PIX_FMT_RGB8;
    //Set up av context
    if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
      return ret;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;
    //ptr is the frame's data and the line size is just
    //the size of the frame
    ptr = p->data[0];
    ptr0 = ptr;
    linesize = p->linesize[0];  
    
    //Loop over each line of the image
    for (i = 0; i < avctx->height; i++) 
      {
	for (j = 0; j < avctx->width; j++)
	{
	//Copy the bits from ptr to the buffer
	//advance the buffer and ptr pointers
	 bytestream_put_byte(&ptr, bytestream_get_byte(&buf));
	}
	ptr = ptr0 + linesize;
	ptr0 = ptr;


	/*
	//copy the bits from the buffer to ptr
	memcpy(ptr, buf, width);
	//Move the buffer and ptr pointer up
	buf += linesize; //width;
	ptr += linesize;
	*/
      }
	
    *got_frame = 1;
    return buf_size;
}
Example #18
0
static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
                                 int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame     = data;
    const uint8_t *buf = avpkt->data;
    const uint8_t *buf_end;
    int buf_size = avpkt->size;
    VmdAudioContext *s = avctx->priv_data;
    int block_type, silent_chunks, audio_chunks;
    int ret;
    uint8_t *output_samples_u8;
    int16_t *output_samples_s16;

    if (buf_size < 16) {
        av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
        *got_frame_ptr = 0;
        return buf_size;
    }

    block_type = buf[6];
    if (block_type < BLOCK_TYPE_AUDIO || block_type > BLOCK_TYPE_SILENCE) {
        av_log(avctx, AV_LOG_ERROR, "unknown block type: %d\n", block_type);
        return AVERROR(EINVAL);
    }
    buf      += 16;
    buf_size -= 16;

    /* get number of silent chunks */
    silent_chunks = 0;
    if (block_type == BLOCK_TYPE_INITIAL) {
        uint32_t flags;
        if (buf_size < 4) {
            av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
            return AVERROR(EINVAL);
        }
        flags         = AV_RB32(buf);
        silent_chunks = av_popcount(flags);
        buf      += 4;
        buf_size -= 4;
    } else if (block_type == BLOCK_TYPE_SILENCE) {
        silent_chunks = 1;
        buf_size = 0; // should already be zero but set it just to be sure
    }

    /* ensure output buffer is large enough */
    audio_chunks = buf_size / s->chunk_size;

    /* drop incomplete chunks */
    buf_size     = audio_chunks * s->chunk_size;

    /* get output buffer */
    frame->nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) /
                        avctx->channels;
    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    output_samples_u8  =            frame->data[0];
    output_samples_s16 = (int16_t *)frame->data[0];

    /* decode silent chunks */
    if (silent_chunks > 0) {
        int silent_size = FFMIN(avctx->block_align * silent_chunks,
                                frame->nb_samples * avctx->channels);
        if (s->out_bps == 2) {
            memset(output_samples_s16, 0x00, silent_size * 2);
            output_samples_s16 += silent_size;
        } else {
            memset(output_samples_u8,  0x80, silent_size);
            output_samples_u8 += silent_size;
        }
    }

    /* decode audio chunks */
    if (audio_chunks > 0) {
        buf_end = buf + (buf_size & ~(avctx->channels > 1));
        while (buf + s->chunk_size <= buf_end) {
            if (s->out_bps == 2) {
                decode_audio_s16(output_samples_s16, buf, s->chunk_size,
                                 avctx->channels);
                output_samples_s16 += avctx->block_align;
            } else {
                memcpy(output_samples_u8, buf, s->chunk_size);
                output_samples_u8  += avctx->block_align;
            }
            buf += s->chunk_size;
        }
    }

    *got_frame_ptr = 1;

    return avpkt->size;
}
Example #19
0
static int brpix_decode_frame(AVCodecContext *avctx,
                              void *data, int *got_frame,
                              AVPacket *avpkt)
{
    BRPixContext *s = avctx->priv_data;
    AVFrame *frame_out = data;

    int ret;
    GetByteContext gb;

    unsigned int bytes_pp;

    unsigned int magic[4];
    unsigned int chunk_type;
    unsigned int data_len;
    BRPixHeader hdr;

    bytestream2_init(&gb, avpkt->data, avpkt->size);

    magic[0] = bytestream2_get_be32(&gb);
    magic[1] = bytestream2_get_be32(&gb);
    magic[2] = bytestream2_get_be32(&gb);
    magic[3] = bytestream2_get_be32(&gb);

    if (magic[0] != 0x12 ||
        magic[1] != 0x8 ||
        magic[2] != 0x2 ||
        magic[3] != 0x2) {
        av_log(avctx, AV_LOG_ERROR, "Not a BRender PIX file\n");
        return AVERROR_INVALIDDATA;
    }

    chunk_type = bytestream2_get_be32(&gb);
    if (chunk_type != 0x3 && chunk_type != 0x3d) {
        av_log(avctx, AV_LOG_ERROR, "Invalid chunk type %d\n", chunk_type);
        return AVERROR_INVALIDDATA;
    }

    ret = brpix_decode_header(&hdr, &gb);
    if (!ret) {
        av_log(avctx, AV_LOG_ERROR, "Invalid header length\n");
        return AVERROR_INVALIDDATA;
    }
    switch (hdr.format) {
    case 3:
        avctx->pix_fmt = AV_PIX_FMT_PAL8;
        bytes_pp = 1;
        break;
    case 4:
        avctx->pix_fmt = AV_PIX_FMT_RGB555BE;
        bytes_pp = 2;
        break;
    case 5:
        avctx->pix_fmt = AV_PIX_FMT_RGB565BE;
        bytes_pp = 2;
        break;
    case 6:
        avctx->pix_fmt = AV_PIX_FMT_RGB24;
        bytes_pp = 3;
        break;
    case 7:
        avctx->pix_fmt = AV_PIX_FMT_0RGB;
        bytes_pp = 4;
        break;
    case 18:
        avctx->pix_fmt = AV_PIX_FMT_GRAY8A;
        bytes_pp = 2;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Format %d is not supported\n",
                                    hdr.format);
        return AVERROR_PATCHWELCOME;
    }

    if (s->frame.data[0])
        avctx->release_buffer(avctx, &s->frame);

    if (av_image_check_size(hdr.width, hdr.height, 0, avctx) < 0)
        return AVERROR_INVALIDDATA;

    if (hdr.width != avctx->width || hdr.height != avctx->height)
        avcodec_set_dimensions(avctx, hdr.width, hdr.height);

    if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    chunk_type = bytestream2_get_be32(&gb);

    if (avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
        (chunk_type == 0x3 || chunk_type == 0x3d)) {
        BRPixHeader palhdr;
        uint32_t *pal_out = (uint32_t *)s->frame.data[1];
        int i;

        ret = brpix_decode_header(&palhdr, &gb);
        if (!ret) {
            av_log(avctx, AV_LOG_ERROR, "Invalid palette header length\n");
            return AVERROR_INVALIDDATA;
        }
        if (palhdr.format != 7) {
            av_log(avctx, AV_LOG_ERROR, "Palette is not in 0RGB format\n");
            return AVERROR_INVALIDDATA;
        }

        chunk_type = bytestream2_get_be32(&gb);
        data_len = bytestream2_get_be32(&gb);
        bytestream2_skip(&gb, 8);
        if (chunk_type != 0x21 || data_len != 1032 ||
            bytestream2_get_bytes_left(&gb) < 1032) {
            av_log(avctx, AV_LOG_ERROR, "Invalid palette data\n");
            return AVERROR_INVALIDDATA;
        }
        // convert 0RGB to machine endian format (ARGB32)
        for (i = 0; i < 256; ++i) {
            bytestream2_skipu(&gb, 1);
            *pal_out++ = (0xFFU << 24) | bytestream2_get_be24u(&gb);
        }
        bytestream2_skip(&gb, 8);

        s->frame.palette_has_changed = 1;

        chunk_type = bytestream2_get_be32(&gb);
    }

    data_len = bytestream2_get_be32(&gb);
    bytestream2_skip(&gb, 8);

    // read the image data to the buffer
    {
        unsigned int bytes_per_scanline = bytes_pp * hdr.width;
        unsigned int bytes_left = bytestream2_get_bytes_left(&gb);

        if (chunk_type != 0x21 || data_len != bytes_left ||
            bytes_left / bytes_per_scanline < hdr.height)
        {
            av_log(avctx, AV_LOG_ERROR, "Invalid image data\n");
            return AVERROR_INVALIDDATA;
        }

        av_image_copy_plane(s->frame.data[0], s->frame.linesize[0],
                            avpkt->data + bytestream2_tell(&gb),
                            bytes_per_scanline,
                            bytes_per_scanline, hdr.height);
    }

    *frame_out = s->frame;
    *got_frame = 1;

    return avpkt->size;
}
Example #20
0
static int escape124_decode_frame(AVCodecContext *avctx,
                                  void *data, int *got_frame,
                                  AVPacket *avpkt)
{
    int buf_size = avpkt->size;
    Escape124Context *s = avctx->priv_data;
    AVFrame *frame = data;

    GetBitContext gb;
    unsigned frame_flags, frame_size;
    unsigned i;

    unsigned superblock_index, cb_index = 1,
             superblock_col_index = 0,
             superblocks_per_row = avctx->width / 8, skip = -1;

    uint16_t* old_frame_data, *new_frame_data;
    unsigned old_stride, new_stride;

    int ret;

    if ((ret = init_get_bits8(&gb, avpkt->data, avpkt->size)) < 0)
        return ret;

    // This call also guards the potential depth reads for the
    // codebook unpacking.
    if (get_bits_left(&gb) < 64)
        return -1;

    frame_flags = get_bits_long(&gb, 32);
    frame_size  = get_bits_long(&gb, 32);

    // Leave last frame unchanged
    // FIXME: Is this necessary?  I haven't seen it in any real samples
    if (!(frame_flags & 0x114) || !(frame_flags & 0x7800000)) {
        if (!s->frame->data[0])
            return AVERROR_INVALIDDATA;

        av_log(avctx, AV_LOG_DEBUG, "Skipping frame\n");

        *got_frame = 1;
        if ((ret = av_frame_ref(frame, s->frame)) < 0)
            return ret;

        return frame_size;
    }

    for (i = 0; i < 3; i++) {
        if (frame_flags & (1 << (17 + i))) {
            unsigned cb_depth, cb_size;
            if (i == 2) {
                // This codebook can be cut off at places other than
                // powers of 2, leaving some of the entries undefined.
                cb_size = get_bits_long(&gb, 20);
                cb_depth = av_log2(cb_size - 1) + 1;
            } else {
                cb_depth = get_bits(&gb, 4);
                if (i == 0) {
                    // This is the most basic codebook: pow(2,depth) entries
                    // for a depth-length key
                    cb_size = 1 << cb_depth;
                } else {
                    // This codebook varies per superblock
                    // FIXME: I don't think this handles integer overflow
                    // properly
                    cb_size = s->num_superblocks << cb_depth;
                }
            }
            av_free(s->codebooks[i].blocks);
            s->codebooks[i] = unpack_codebook(&gb, cb_depth, cb_size);
            if (!s->codebooks[i].blocks)
                return -1;
        }
    }

    if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
        return ret;

    new_frame_data = (uint16_t*)frame->data[0];
    new_stride = frame->linesize[0] / 2;
    old_frame_data = (uint16_t*)s->frame->data[0];
    old_stride = s->frame->linesize[0] / 2;

    for (superblock_index = 0; superblock_index < s->num_superblocks;
         superblock_index++) {
        MacroBlock mb;
        SuperBlock sb;
        unsigned multi_mask = 0;

        if (skip == -1) {
            // Note that this call will make us skip the rest of the blocks
            // if the frame prematurely ends
            skip = decode_skip_count(&gb);
        }

        if (skip) {
            copy_superblock(new_frame_data, new_stride,
                            old_frame_data, old_stride);
        } else {
            copy_superblock(sb.pixels, 8,
                            old_frame_data, old_stride);

            while (get_bits_left(&gb) >= 1 && !get_bits1(&gb)) {
                unsigned mask;
                mb = decode_macroblock(s, &gb, &cb_index, superblock_index);
                mask = get_bits(&gb, 16);
                multi_mask |= mask;
                for (i = 0; i < 16; i++) {
                    if (mask & mask_matrix[i]) {
                        insert_mb_into_sb(&sb, mb, i);
                    }
                }
            }

            if (!get_bits1(&gb)) {
                unsigned inv_mask = get_bits(&gb, 4);
                for (i = 0; i < 4; i++) {
                    if (inv_mask & (1 << i)) {
                        multi_mask ^= 0xF << i*4;
                    } else {
                        multi_mask ^= get_bits(&gb, 4) << i*4;
                    }
                }

                for (i = 0; i < 16; i++) {
                    if (multi_mask & mask_matrix[i]) {
                        mb = decode_macroblock(s, &gb, &cb_index,
                                               superblock_index);
                        insert_mb_into_sb(&sb, mb, i);
                    }
                }
            } else if (frame_flags & (1 << 16)) {
                while (get_bits_left(&gb) >= 1 && !get_bits1(&gb)) {
                    mb = decode_macroblock(s, &gb, &cb_index, superblock_index);
                    insert_mb_into_sb(&sb, mb, get_bits(&gb, 4));
                }
            }

            copy_superblock(new_frame_data, new_stride, sb.pixels, 8);
        }

        superblock_col_index++;
        new_frame_data += 8;
        if (old_frame_data)
            old_frame_data += 8;
        if (superblock_col_index == superblocks_per_row) {
            new_frame_data += new_stride * 8 - superblocks_per_row * 8;
            if (old_frame_data)
                old_frame_data += old_stride * 8 - superblocks_per_row * 8;
            superblock_col_index = 0;
        }
        skip--;
    }

    av_log(avctx, AV_LOG_DEBUG,
           "Escape sizes: %i, %i, %i\n",
           frame_size, buf_size, get_bits_count(&gb) / 8);

    av_frame_unref(s->frame);
    if ((ret = av_frame_ref(s->frame, frame)) < 0)
        return ret;

    *got_frame = 1;

    return frame_size;
}
Example #21
0
/**
 * Decode Smacker audio data
 */
static int smka_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame     = data;
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    GetBitContext gb;
    HuffContext h[4] = { { 0 } };
    VLC vlc[4]       = { { 0 } };
    int16_t *samples;
    uint8_t *samples8;
    int val;
    int i, res, ret;
    int unp_size;
    int bits, stereo;
    int pred[2] = {0, 0};

    if (buf_size <= 4) {
        av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
        return AVERROR(EINVAL);
    }

    unp_size = AV_RL32(buf);

    if (unp_size > (1U<<24)) {
        av_log(avctx, AV_LOG_ERROR, "packet is too big\n");
        return AVERROR_INVALIDDATA;
    }

    init_get_bits(&gb, buf + 4, (buf_size - 4) * 8);

    if(!get_bits1(&gb)){
        av_log(avctx, AV_LOG_INFO, "Sound: no data\n");
        *got_frame_ptr = 0;
        return 1;
    }
    stereo = get_bits1(&gb);
    bits = get_bits1(&gb);
    if (stereo ^ (avctx->channels != 1)) {
        av_log(avctx, AV_LOG_ERROR, "channels mismatch\n");
        return AVERROR(EINVAL);
    }
    if (bits && avctx->sample_fmt == AV_SAMPLE_FMT_U8) {
        av_log(avctx, AV_LOG_ERROR, "sample format mismatch\n");
        return AVERROR(EINVAL);
    }

    /* get output buffer */
    frame->nb_samples = unp_size / (avctx->channels * (bits + 1));
    if ((ret = ff_get_buffer(avctx, frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    samples  = (int16_t *)frame->data[0];
    samples8 =            frame->data[0];

    // Initialize
    for(i = 0; i < (1 << (bits + stereo)); i++) {
        h[i].length = 256;
        h[i].maxlength = 0;
        h[i].current = 0;
        h[i].bits = av_mallocz(256 * 4);
        h[i].lengths = av_mallocz(256 * sizeof(int));
        h[i].values = av_mallocz(256 * sizeof(int));
        skip_bits1(&gb);
        res = smacker_decode_tree(&gb, &h[i], 0, 0);
        if (res < 0)
            return res;
        skip_bits1(&gb);
        if(h[i].current > 1) {
            res = init_vlc(&vlc[i], SMKTREE_BITS, h[i].length,
                    h[i].lengths, sizeof(int), sizeof(int),
                    h[i].bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
            if(res < 0) {
                av_log(avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
                return AVERROR_INVALIDDATA;
            }
        }
    }
    if(bits) { //decode 16-bit data
        for(i = stereo; i >= 0; i--)
            pred[i] = sign_extend(av_bswap16(get_bits(&gb, 16)), 16);
        for(i = 0; i <= stereo; i++)
            *samples++ = pred[i];
        for(; i < unp_size / 2; i++) {
            if(get_bits_left(&gb)<0)
                return AVERROR_INVALIDDATA;
            if(i & stereo) {
                if(vlc[2].table)
                    res = get_vlc2(&gb, vlc[2].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                val  = h[2].values[res];
                if(vlc[3].table)
                    res = get_vlc2(&gb, vlc[3].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                val |= h[3].values[res] << 8;
                pred[1] += sign_extend(val, 16);
                *samples++ = pred[1];
            } else {
                if(vlc[0].table)
                    res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                val  = h[0].values[res];
                if(vlc[1].table)
                    res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                val |= h[1].values[res] << 8;
                pred[0] += sign_extend(val, 16);
                *samples++ = pred[0];
            }
        }
    } else { //8-bit data
        for(i = stereo; i >= 0; i--)
            pred[i] = get_bits(&gb, 8);
        for(i = 0; i <= stereo; i++)
            *samples8++ = pred[i];
        for(; i < unp_size; i++) {
            if(get_bits_left(&gb)<0)
                return AVERROR_INVALIDDATA;
            if(i & stereo){
                if(vlc[1].table)
                    res = get_vlc2(&gb, vlc[1].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                pred[1] += sign_extend(h[1].values[res], 8);
                *samples8++ = av_clip_uint8(pred[1]);
            } else {
                if(vlc[0].table)
                    res = get_vlc2(&gb, vlc[0].table, SMKTREE_BITS, 3);
                else
                    res = 0;
                if (res < 0) {
                    av_log(avctx, AV_LOG_ERROR, "invalid vlc\n");
                    return AVERROR_INVALIDDATA;
                }
                pred[0] += sign_extend(h[0].values[res], 8);
                *samples8++ = av_clip_uint8(pred[0]);
            }
        }
    }

    for(i = 0; i < 4; i++) {
        if(vlc[i].table)
            ff_free_vlc(&vlc[i]);
        av_free(h[i].bits);
        av_free(h[i].lengths);
        av_free(h[i].values);
    }

    *got_frame_ptr = 1;

    return buf_size;
}
Example #22
0
File: ansi.c Project: AVbin/libav
/**
 * Execute ANSI escape code
 * @return 0 on success, negative on error
 */
static int execute_code(AVCodecContext * avctx, int c)
{
    AnsiContext *s = avctx->priv_data;
    int ret, i, width, height;
    switch(c) {
    case 'A': //Cursor Up
        s->y = FFMAX(s->y - (s->nb_args > 0 ? s->args[0]*s->font_height : s->font_height), 0);
        break;
    case 'B': //Cursor Down
        s->y = FFMIN(s->y + (s->nb_args > 0 ? s->args[0]*s->font_height : s->font_height), avctx->height - s->font_height);
        break;
    case 'C': //Cursor Right
        s->x = FFMIN(s->x + (s->nb_args > 0 ? s->args[0]*FONT_WIDTH : FONT_WIDTH), avctx->width  - FONT_WIDTH);
        break;
    case 'D': //Cursor Left
        s->x = FFMAX(s->x - (s->nb_args > 0 ? s->args[0]*FONT_WIDTH : FONT_WIDTH), 0);
        break;
    case 'H': //Cursor Position
    case 'f': //Horizontal and Vertical Position
        s->y = s->nb_args > 0 ? av_clip((s->args[0] - 1)*s->font_height, 0, avctx->height - s->font_height) : 0;
        s->x = s->nb_args > 1 ? av_clip((s->args[1] - 1)*FONT_WIDTH,     0, avctx->width  - FONT_WIDTH) : 0;
        break;
    case 'h': //set creen mode
    case 'l': //reset screen mode
        if (s->nb_args < 2)
            s->args[0] = DEFAULT_SCREEN_MODE;
        switch(s->args[0]) {
        case 0: case 1: case 4: case 5: case 13: case 19: //320x200 (25 rows)
            s->font = ff_cga_font;
            s->font_height = 8;
            width  = 40<<3;
            height = 25<<3;
            break;
        case 2: case 3: //640x400 (25 rows)
            s->font = ff_vga16_font;
            s->font_height = 16;
            width  = 80<<3;
            height = 25<<4;
            break;
        case 6: case 14: //640x200 (25 rows)
            s->font = ff_cga_font;
            s->font_height = 8;
            width  = 80<<3;
            height = 25<<3;
            break;
        case 7: //set line wrapping
            break;
        case 15: case 16: //640x350 (43 rows)
            s->font = ff_cga_font;
            s->font_height = 8;
            width  = 80<<3;
            height = 43<<3;
            break;
        case 17: case 18: //640x480 (60 rows)
            s->font = ff_cga_font;
            s->font_height = 8;
            width  = 80<<3;
            height = 60<<4;
            break;
        default:
            avpriv_request_sample(avctx, "Unsupported screen mode");
        }
        if (width != avctx->width || height != avctx->height) {
            av_frame_unref(s->frame);
            avcodec_set_dimensions(avctx, width, height);
            ret = ff_get_buffer(avctx, s->frame, AV_GET_BUFFER_FLAG_REF);
            if (ret < 0) {
                av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
                return ret;
            }
            s->frame->pict_type           = AV_PICTURE_TYPE_I;
            s->frame->palette_has_changed = 1;
            memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);
            erase_screen(avctx);
        } else if (c == 'l') {
            erase_screen(avctx);
        }
        break;
    case 'J': //Erase in Page
        switch (s->args[0]) {
        case 0:
            erase_line(avctx, s->x, avctx->width - s->x);
            if (s->y < avctx->height - s->font_height)
                memset(s->frame->data[0] + (s->y + s->font_height)*s->frame->linesize[0],
                    DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame->linesize[0]);
            break;
        case 1:
            erase_line(avctx, 0, s->x);
            if (s->y > 0)
                memset(s->frame->data[0], DEFAULT_BG_COLOR, s->y * s->frame->linesize[0]);
            break;
        case 2:
            erase_screen(avctx);
        }
        break;
    case 'K': //Erase in Line
        switch(s->args[0]) {
        case 0:
            erase_line(avctx, s->x, avctx->width - s->x);
            break;
        case 1:
            erase_line(avctx, 0, s->x);
            break;
        case 2:
            erase_line(avctx, 0, avctx->width);
        }
        break;
    case 'm': //Select Graphics Rendition
        if (s->nb_args == 0) {
            s->nb_args = 1;
            s->args[0] = 0;
        }
        for (i = 0; i < FFMIN(s->nb_args, MAX_NB_ARGS); i++) {
            int m = s->args[i];
            if (m == 0) {
                s->attributes = 0;
                s->fg = DEFAULT_FG_COLOR;
                s->bg = DEFAULT_BG_COLOR;
            } else if (m == 1 || m == 2 || m == 4 || m == 5 || m == 7 || m == 8) {
                s->attributes |= 1 << (m - 1);
            } else if (m >= 30 && m <= 38) {
                s->fg = ansi_to_cga[m - 30];
            } else if (m == 39) {
                s->fg = ansi_to_cga[DEFAULT_FG_COLOR];
            } else if (m >= 40 && m <= 47) {
                s->bg = ansi_to_cga[m - 40];
            } else if (m == 49) {
                s->fg = ansi_to_cga[DEFAULT_BG_COLOR];
            } else {
                avpriv_request_sample(avctx, "Unsupported rendition parameter");
            }
        }
        break;
    case 'n': //Device Status Report
    case 'R': //report current line and column
        /* ignore */
        break;
    case 's': //Save Cursor Position
        s->sx = s->x;
        s->sy = s->y;
        break;
    case 'u': //Restore Cursor Position
        s->x = av_clip(s->sx, 0, avctx->width  - FONT_WIDTH);
        s->y = av_clip(s->sy, 0, avctx->height - s->font_height);
        break;
    default:
        avpriv_request_sample(avctx, "Unknown escape code");
        break;
    }
    return 0;
}
Example #23
0
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                        AVPacket *avpkt)
{
  V210DecContext *s = avctx->priv_data;

  int h, w, ret, stride, aligned_input;
  AVFrame *pic = data;
  const uint8_t *psrc = avpkt->data;
  uint16_t *y, *u, *v;

  if (s->custom_stride )
    stride = s->custom_stride;
  else {
    int aligned_width = ((avctx->width + 47) / 48) * 48;
    stride = aligned_width * 8 / 3;
  }

  if (avpkt->size < stride * avctx->height) {
    if ((((avctx->width + 23) / 24) * 24 * 8) / 3 * avctx->height == avpkt->size) {
      stride = avpkt->size / avctx->height;
      if (!s->stride_warning_shown)
        av_log(avctx, AV_LOG_WARNING, "Broken v210 with too small padding (64 byte) detected\n");
      s->stride_warning_shown = 1;
    } else {
      av_log(avctx, AV_LOG_ERROR, "packet too small\n");
      return AVERROR_INVALIDDATA;
    }
  }

  aligned_input = !((uintptr_t)psrc & 0xf) && !(stride & 0xf);
  if (aligned_input != s->aligned_input) {
    s->aligned_input = aligned_input;
    if (HAVE_MMX)
      v210_x86_init(s);
  }

  if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
    return ret;

  y = (uint16_t*)pic->data[0];
  u = (uint16_t*)pic->data[1];
  v = (uint16_t*)pic->data[2];
  pic->pict_type = AV_PICTURE_TYPE_I;
  pic->key_frame = 1;

  for (h = 0; h < avctx->height; h++) {
    const uint32_t *src = (const uint32_t*)psrc;
    uint32_t val;

    w = (avctx->width / 6) * 6;
    s->unpack_frame(src, y, u, v, w);

    y += w;
    u += w >> 1;
    v += w >> 1;
    src += (w << 1) / 3;

    if (w < avctx->width - 1) {
      READ_PIXELS(u, y, v);

      val  = av_le2ne32(*src++);
      *y++ =  val & 0x3FF;
      if (w < avctx->width - 3) {
        *u++ = (val >> 10) & 0x3FF;
        *y++ = (val >> 20) & 0x3FF;

        val  = av_le2ne32(*src++);
        *v++ =  val & 0x3FF;
        *y++ = (val >> 10) & 0x3FF;
      }
    }
Example #24
0
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *got_frame,
                        AVPacket *avpkt)
{
    PicContext *s = avctx->priv_data;
    uint32_t *palette;
    int bits_per_plane, bpp, etype, esize, npal, pos_after_pal;
    int i, x, y, plane, tmp;

    bytestream2_init(&s->g, avpkt->data, avpkt->size);

    if (bytestream2_get_bytes_left(&s->g) < 11)
        return AVERROR_INVALIDDATA;

    if (bytestream2_get_le16u(&s->g) != 0x1234)
        return AVERROR_INVALIDDATA;

    s->width       = bytestream2_get_le16u(&s->g);
    s->height      = bytestream2_get_le16u(&s->g);
    bytestream2_skip(&s->g, 4);
    tmp            = bytestream2_get_byteu(&s->g);
    bits_per_plane = tmp & 0xF;
    s->nb_planes   = (tmp >> 4) + 1;
    bpp            = bits_per_plane * s->nb_planes;
    if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
        av_log_ask_for_sample(s, "unsupported bit depth\n");
        return AVERROR_PATCHWELCOME;
    }

    if (bytestream2_peek_byte(&s->g) == 0xFF) {
        bytestream2_skip(&s->g, 2);
        etype = bytestream2_get_le16(&s->g);
        esize = bytestream2_get_le16(&s->g);
        if (bytestream2_get_bytes_left(&s->g) < esize)
            return AVERROR_INVALIDDATA;
    } else {
        etype = -1;
        esize = 0;
    }

    avctx->pix_fmt = AV_PIX_FMT_PAL8;

    if (s->width != avctx->width && s->height != avctx->height) {
        if (av_image_check_size(s->width, s->height, 0, avctx) < 0)
            return -1;
        avcodec_set_dimensions(avctx, s->width, s->height);
        if (s->frame.data[0])
            avctx->release_buffer(avctx, &s->frame);
    }

    if (ff_get_buffer(avctx, &s->frame) < 0){
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }
    memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]);
    s->frame.pict_type           = AV_PICTURE_TYPE_I;
    s->frame.palette_has_changed = 1;

    pos_after_pal = bytestream2_tell(&s->g) + esize;
    palette = (uint32_t*)s->frame.data[1];
    if (etype == 1 && esize > 1 && bytestream2_peek_byte(&s->g) < 6) {
        int idx = bytestream2_get_byte(&s->g);
        npal = 4;
        for (i = 0; i < npal; i++)
            palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ];
    } else if (etype == 2) {
        npal = FFMIN(esize, 16);
        for (i = 0; i < npal; i++) {
            int pal_idx = bytestream2_get_byte(&s->g);
            palette[i]  = ff_cga_palette[FFMIN(pal_idx, 16)];
        }
    } else if (etype == 3) {
        npal = FFMIN(esize, 16);
        for (i = 0; i < npal; i++) {
            int pal_idx = bytestream2_get_byte(&s->g);
            palette[i]  = ff_ega_palette[FFMIN(pal_idx, 63)];
        }
    } else if (etype == 4 || etype == 5) {
        npal = FFMIN(esize / 3, 256);
        for (i = 0; i < npal; i++)
            palette[i] = bytestream2_get_be24(&s->g) << 2;
    } else {
        if (bpp == 1) {
            npal = 2;
            palette[0] = 0x000000;
            palette[1] = 0xFFFFFF;
        } else if (bpp == 2) {
            npal = 4;
            for (i = 0; i < npal; i++)
                palette[i] = ff_cga_palette[ cga_mode45_index[0][i] ];
        } else {
            npal = 16;
            memcpy(palette, ff_cga_palette, npal * 4);
        }
    }
    // fill remaining palette entries
    memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4);
    // skip remaining palette bytes
    bytestream2_seek(&s->g, pos_after_pal, SEEK_SET);

    x = 0;
    y = s->height - 1;
    plane = 0;
    if (bytestream2_get_le16(&s->g)) {
        while (bytestream2_get_bytes_left(&s->g) >= 6) {
            int stop_size, marker, t1, t2;

            t1        = bytestream2_get_bytes_left(&s->g);
            t2        = bytestream2_get_le16(&s->g);
            stop_size = t1 - FFMIN(t1, t2);
            // ignore uncompressed block size
            bytestream2_skip(&s->g, 2);
            marker    = bytestream2_get_byte(&s->g);

            while (plane < s->nb_planes &&
                   bytestream2_get_bytes_left(&s->g) > stop_size) {
                int run = 1;
                int val = bytestream2_get_byte(&s->g);
                if (val == marker) {
                    run = bytestream2_get_byte(&s->g);
                    if (run == 0)
                        run = bytestream2_get_le16(&s->g);
                    val = bytestream2_get_byte(&s->g);
                }
                if (!bytestream2_get_bytes_left(&s->g))
                    break;

                if (bits_per_plane == 8) {
                    picmemset_8bpp(s, val, run, &x, &y);
                    if (y < 0)
                        break;
                } else {
                    picmemset(s, val, run, &x, &y, &plane, bits_per_plane);
                }
            }
        }
    } else {
        av_log_ask_for_sample(s, "uncompressed image\n");
        return avpkt->size;
    }

    *got_frame      = 1;
    *(AVFrame*)data = s->frame;
    return avpkt->size;
}
Example #25
0
File: 8bps.c Project: bavison/libav
static int decode_frame(AVCodecContext *avctx, void *data,
                        int *got_frame, AVPacket *avpkt)
{
    AVFrame *frame = data;
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;
    EightBpsContext * const c = avctx->priv_data;
    const unsigned char *encoded = buf;
    unsigned char *pixptr, *pixptr_end;
    unsigned int height = avctx->height; // Real image height
    unsigned int dlen, p, row;
    const unsigned char *lp, *dp, *ep;
    unsigned char count;
    unsigned int px_inc;
    unsigned int planes     = c->planes;
    unsigned char *planemap = c->planemap;
    int ret;

    if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }

    ep = encoded + buf_size;

    /* Set data pointer after line lengths */
    dp = encoded + planes * (height << 1);

    /* Ignore alpha plane, don't know what to do with it */
    if (planes == 4)
        planes--;

    px_inc = planes + (avctx->pix_fmt == AV_PIX_FMT_RGB32);

    for (p = 0; p < planes; p++) {
        /* Lines length pointer for this plane */
        lp = encoded + p * (height << 1);

        /* Decode a plane */
        for (row = 0; row < height; row++) {
            pixptr = frame->data[0] + row * frame->linesize[0] + planemap[p];
            pixptr_end = pixptr + frame->linesize[0];
            if (ep - lp < row * 2 + 2)
                return AVERROR_INVALIDDATA;
            dlen = av_be2ne16(*(const unsigned short *)(lp + row * 2));
            /* Decode a row of this plane */
            while (dlen > 0) {
                if (ep - dp <= 1)
                    return AVERROR_INVALIDDATA;
                if ((count = *dp++) <= 127) {
                    count++;
                    dlen -= count + 1;
                    if (pixptr_end - pixptr < count * px_inc)
                        break;
                    if (ep - dp < count)
                        return AVERROR_INVALIDDATA;
                    while (count--) {
                        *pixptr = *dp++;
                        pixptr += px_inc;
                    }
                } else {
                    count = 257 - count;
                    if (pixptr_end - pixptr < count * px_inc)
                        break;
                    while (count--) {
                        *pixptr = *dp;
                        pixptr += px_inc;
                    }
                    dp++;
                    dlen -= 2;
                }
            }
        }
    }

    if (avctx->bits_per_coded_sample <= 8) {
        const uint8_t *pal = av_packet_get_side_data(avpkt,
                                                     AV_PKT_DATA_PALETTE,
                                                     NULL);
        if (pal) {
            frame->palette_has_changed = 1;
            memcpy(c->pal, pal, AVPALETTE_SIZE);
        }

        memcpy (frame->data[1], c->pal, AVPALETTE_SIZE);
    }

    *got_frame = 1;

    /* always report that the buffer was completely consumed */
    return buf_size;
}
Example #26
0
static int decode_frame(AVCodecContext *avctx,
                        void *data, int *got_frame,
                        AVPacket *avpkt)
{
  const uint8_t *buf     = avpkt->data;
  const uint8_t *buf_end = avpkt->data + avpkt->size;
  int buf_size           = avpkt->size;
  AVFrame * const p      = data;
  uint8_t* outdata;
  int colors;
  int i, ret;
  uint32_t *pal;
  int r, g, b;

  if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
    return ret;
  p->pict_type = AV_PICTURE_TYPE_I;
  p->key_frame = 1;

  outdata = p->data[0];

  if (buf_end - buf < 0x68 + 4)
    return AVERROR_INVALIDDATA;
  buf   += 0x68; /* jump to palette */
  colors = AV_RB32(buf);
  buf   += 4;

  if (colors < 0 || colors > 256) {
    av_log(avctx, AV_LOG_ERROR, "Error color count - %i(0x%X)\n", colors, colors);
    return AVERROR_INVALIDDATA;
  }
  if (buf_end - buf < (colors + 1) * 8)
    return AVERROR_INVALIDDATA;

  pal = (uint32_t*)p->data[1];
  for (i = 0; i <= colors; i++) {
    unsigned int idx;
    idx = AV_RB16(buf); /* color index */
    buf += 2;

    if (idx > 255) {
      av_log(avctx, AV_LOG_ERROR, "Palette index out of range: %u\n", idx);
      buf += 6;
      continue;
    }
    r = *buf++;
    buf++;
    g = *buf++;
    buf++;
    b = *buf++;
    buf++;
    pal[idx] = 0xFFU << 24 | r << 16 | g << 8 | b;
  }
  p->palette_has_changed = 1;

  if (buf_end - buf < 18)
    return AVERROR_INVALIDDATA;
  buf += 18; /* skip unneeded data */
  for (i = 0; i < avctx->height; i++) {
    int size, left, code, pix;
    const uint8_t *next;
    uint8_t *out;
    int tsize = 0;

    /* decode line */
    out  = outdata;
    size = AV_RB16(buf); /* size of packed line */
    buf += 2;
    if (buf_end - buf < size)
      return AVERROR_INVALIDDATA;

    left = size;
    next = buf + size;
    while (left > 0) {
      code = *buf++;
      if (code & 0x80 ) { /* run */
        pix = *buf++;
        if ((out + (257 - code)) > (outdata +  p->linesize[0]))
          break;
        memset(out, pix, 257 - code);
        out   += 257 - code;
        tsize += 257 - code;
        left  -= 2;
      } else { /* copy */
        if ((out + code) > (outdata +  p->linesize[0]))
          break;
        if (buf_end - buf < code + 1)
          return AVERROR_INVALIDDATA;
        memcpy(out, buf, code + 1);
        out   += code + 1;
        buf   += code + 1;
        left  -= 2 + code;
        tsize += code + 1;
      }
    }
    buf = next;
    outdata += p->linesize[0];
  }

  *got_frame      = 1;

  return buf_size;
}
Example #27
0
static int submit_frame(QSVEncContext *q, const AVFrame *frame,
                        mfxFrameSurface1 **surface)
{
    QSVFrame *qf;
    int ret;

    ret = get_free_frame(q, &qf);
    if (ret < 0)
        return ret;

    if (frame->format == AV_PIX_FMT_QSV) {
        ret = av_frame_ref(qf->frame, frame);
        if (ret < 0)
            return ret;

        qf->surface = (mfxFrameSurface1*)qf->frame->data[3];
        *surface = qf->surface;
        return 0;
    }

    /* make a copy if the input is not padded as libmfx requires */
    if (frame->height & 31 || frame->linesize[0] & 15) {
        qf->frame->height = FFALIGN(frame->height, 32);
        qf->frame->width  = FFALIGN(frame->width, 16);

        ret = ff_get_buffer(q->avctx, qf->frame, AV_GET_BUFFER_FLAG_REF);
        if (ret < 0)
            return ret;

        qf->frame->height = frame->height;
        qf->frame->width  = frame->width;
        ret = av_frame_copy(qf->frame, frame);
        if (ret < 0) {
            av_frame_unref(qf->frame);
            return ret;
        }
    } else {
        ret = av_frame_ref(qf->frame, frame);
        if (ret < 0)
            return ret;
    }

    qf->surface_internal.Info = q->param.mfx.FrameInfo;

    qf->surface_internal.Info.PicStruct =
        !frame->interlaced_frame ? MFX_PICSTRUCT_PROGRESSIVE :
        frame->top_field_first   ? MFX_PICSTRUCT_FIELD_TFF :
                                   MFX_PICSTRUCT_FIELD_BFF;
    if (frame->repeat_pict == 1)
        qf->surface_internal.Info.PicStruct |= MFX_PICSTRUCT_FIELD_REPEATED;
    else if (frame->repeat_pict == 2)
        qf->surface_internal.Info.PicStruct |= MFX_PICSTRUCT_FRAME_DOUBLING;
    else if (frame->repeat_pict == 4)
        qf->surface_internal.Info.PicStruct |= MFX_PICSTRUCT_FRAME_TRIPLING;

    qf->surface_internal.Data.PitchLow  = qf->frame->linesize[0];
    qf->surface_internal.Data.Y         = qf->frame->data[0];
    qf->surface_internal.Data.UV        = qf->frame->data[1];
    qf->surface_internal.Data.TimeStamp = av_rescale_q(frame->pts, q->avctx->time_base, (AVRational){1, 90000});

    qf->surface = &qf->surface_internal;

    *surface = qf->surface;

    return 0;
}
Example #28
0
static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
                                int *got_frame, AVPacket *avpkt)
{
    int line = 0, ret;
    const int width = avctx->width;
    AVFrame *pic = data;
    uint16_t *y, *u, *v;
    const uint8_t *line_end, *src = avpkt->data;
    int stride = avctx->width * 8 / 3;

    if (width == 1) {
        av_log(avctx, AV_LOG_ERROR, "Width 1 not supported.\n");
        return AVERROR_INVALIDDATA;
    }

    if (   avctx->codec_tag == MKTAG('0', '1', '2', 'v')
        && avpkt->size % avctx->height == 0
        && avpkt->size / avctx->height * 3 >= width * 8)
        stride = avpkt->size / avctx->height;

    if (avpkt->size < avctx->height * stride) {
        av_log(avctx, AV_LOG_ERROR, "Packet too small: %d instead of %d\n",
               avpkt->size, avctx->height * stride);
        return AVERROR_INVALIDDATA;
    }

    if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
        return ret;

    pic->pict_type = AV_PICTURE_TYPE_I;
    pic->key_frame = 1;

    y = (uint16_t *)pic->data[0];
    u = (uint16_t *)pic->data[1];
    v = (uint16_t *)pic->data[2];
    line_end = avpkt->data + stride;

    while (line++ < avctx->height) {
        while (1) {
            uint32_t t = AV_RL32(src);
            src += 4;
            *u++ = t <<  6 & 0xFFC0;
            *y++ = t >>  4 & 0xFFC0;
            *v++ = t >> 14 & 0xFFC0;

            if (src >= line_end - 1) {
                *y = 0x80;
                src++;
                line_end += stride;
                y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
                u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
                v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
                break;
            }

            t = AV_RL32(src);
            src += 4;
            *y++ = t <<  6 & 0xFFC0;
            *u++ = t >>  4 & 0xFFC0;
            *y++ = t >> 14 & 0xFFC0;
            if (src >= line_end - 2) {
                if (!(width & 1)) {
                    *y = 0x80;
                    src += 2;
                }
                line_end += stride;
                y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
                u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
                v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
                break;
            }

            t = AV_RL32(src);
            src += 4;
            *v++ = t <<  6 & 0xFFC0;
            *y++ = t >>  4 & 0xFFC0;
            *u++ = t >> 14 & 0xFFC0;

            if (src >= line_end - 1) {
                *y = 0x80;
                src++;
                line_end += stride;
                y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
                u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
                v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
                break;
            }

            t = AV_RL32(src);
            src += 4;
            *y++ = t <<  6 & 0xFFC0;
            *v++ = t >>  4 & 0xFFC0;
            *y++ = t >> 14 & 0xFFC0;

            if (src >= line_end - 2) {
                if (width & 1) {
                    *y = 0x80;
                    src += 2;
                }
                line_end += stride;
                y = (uint16_t *)(pic->data[0] + line * pic->linesize[0]);
                u = (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
                v = (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
                break;
            }
        }
    }

    *got_frame = 1;

    return avpkt->size;
}
Example #29
0
static int opus_decode_packet(AVCodecContext *avctx, void *data,
                              int *got_frame_ptr, AVPacket *avpkt)
{
    OpusContext *c      = avctx->priv_data;
    AVFrame *frame      = data;
    const uint8_t *buf  = avpkt->data;
    int buf_size        = avpkt->size;
    int coded_samples   = 0;
    int decoded_samples = 0;
    int i, ret;

    /* decode the header of the first sub-packet to find out the sample count */
    if (buf) {
        OpusPacket *pkt = &c->streams[0].packet;
        ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);
        if (ret < 0) {
            av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
            return ret;
        }
        coded_samples += pkt->frame_count * pkt->frame_duration;
        c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
    }

    frame->nb_samples = coded_samples + c->streams[0].delayed_samples;

    /* no input or buffered data => nothing to do */
    if (!frame->nb_samples) {
        *got_frame_ptr = 0;
        return 0;
    }

    /* setup the data buffers */
    ret = ff_get_buffer(avctx, frame, 0);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    frame->nb_samples = 0;

    for (i = 0; i < avctx->channels; i++) {
        ChannelMap *map = &c->channel_maps[i];
        if (!map->copy)
            c->streams[map->stream_idx].out[map->channel_idx] = (float*)frame->extended_data[i];
    }

    for (i = 0; i < c->nb_streams; i++)
        c->streams[i].out_size = frame->linesize[0];

    /* decode each sub-packet */
    for (i = 0; i < c->nb_streams; i++) {
        OpusStreamContext *s = &c->streams[i];

        if (i && buf) {
            ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);
            if (ret < 0) {
                av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
                return ret;
            }
            if (coded_samples != s->packet.frame_count * s->packet.frame_duration) {
                av_log(avctx, AV_LOG_ERROR,
                       "Mismatching coded sample count in substream %d.\n", i);
                return AVERROR_INVALIDDATA;
            }

            s->silk_samplerate = get_silk_samplerate(s->packet.config);
        }

        ret = opus_decode_subpacket(&c->streams[i], buf,
                                    s->packet.data_size, coded_samples);
        if (ret < 0)
            return ret;
        if (decoded_samples && ret != decoded_samples) {
            av_log(avctx, AV_LOG_ERROR, "Different numbers of decoded samples "
                   "in a multi-channel stream\n");
            return AVERROR_INVALIDDATA;
        }
        decoded_samples = ret;
        buf      += s->packet.packet_size;
        buf_size -= s->packet.packet_size;
    }

    for (i = 0; i < avctx->channels; i++) {
        ChannelMap *map = &c->channel_maps[i];

        /* handle copied channels */
        if (map->copy) {
            memcpy(frame->extended_data[i],
                   frame->extended_data[map->copy_idx],
                   frame->linesize[0]);
        } else if (map->silence) {
            memset(frame->extended_data[i], 0, frame->linesize[0]);
        }

        if (c->gain_i) {
            c->fdsp.vector_fmul_scalar((float*)frame->extended_data[i],
                                       (float*)frame->extended_data[i],
                                       c->gain, FFALIGN(decoded_samples, 8));
        }
    }

    frame->nb_samples = decoded_samples;
    *got_frame_ptr    = !!decoded_samples;

    return avpkt->size;
}
Example #30
0
// decodes the frame
static int utah_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
    UTAHContext *context = avctx->priv_data;
    const uint8_t *buffer = avpkt->data;
    uint8_t* pic_buffer;
    int buffer_size = avpkt->size;
    int height = 0;
    int width = 0;
    int ret;
    int hsize = 14;
    int line = 0;
    int n_bytes = 0;
    AVFrame *picture = data;
    AVFrame *pic = &context->picture;
    avctx->pix_fmt = AV_PIX_FMT_RGB24;

    // ensure the image has the correct header size
    if(buffer_size < hsize)
    {
        av_log(avctx, AV_LOG_ERROR, "Image is not a .utah image(invalid hsize size)\n");
        return AVERROR_INVALIDDATA;
    }

    // ensure the image is a utah image
    if(bytestream_get_byte(&buffer) != 'U' || bytestream_get_byte(&buffer)!='T')
    {
        av_log(avctx, AV_LOG_ERROR, "Invalid .utah image\n");
        return AVERROR_INVALIDDATA;
    }

    height = bytestream_get_le32(&buffer);// Get the height from the packet buffer
    width = bytestream_get_le32(&buffer);// get the width from the packet buffer
    avctx->height = height;

    line = bytestream_get_le32(&buffer);
    avctx->width =width;

    // get the number of bytes
    n_bytes = height*line + hsize; 
    if(n_bytes != buffer_size)
    {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size");
        return AVERROR_INVALIDDATA;
    }

    if (pic->data[0])
    {
        avctx->release_buffer(avctx, pic);
    }

    pic->reference = 0;
    if ((ret = ff_get_buffer(avctx, pic)) < 0) {
          return ret;
      }

    memset(pic->data[0], 0, height*pic->linesize[0]);
    pic_buffer = pic->data[0];

    for(int row = 0; row<fheight;row++)
    {
    	memcpy(pic_buffer, buffer, line);
    	pic_buffer += pic->linesize[0];
    	buffer += line;
    }

    *picture = context->picture;
    *got_frame = 1;

    return buffer_size;
}