Exemple #1
0
static int dxv_decompress_dxt1(AVCodecContext *avctx)
{
    DXVContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    uint32_t value, prev, op;
    int idx = 0, state = 0;
    int pos = 2;

    /* Copy the first two elements */
    AV_WL32(ctx->tex_data, bytestream2_get_le32(gbc));
    AV_WL32(ctx->tex_data + 4, bytestream2_get_le32(gbc));

    /* Process input until the whole texture has been filled */
    while (pos < ctx->tex_size / 4) {
        CHECKPOINT(2);

        /* Copy two elements from a previous offset or from the input buffer */
        if (op) {
            prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;

            prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;
        } else {
            CHECKPOINT(2);

            if (op)
                prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
            else
                prev = bytestream2_get_le32(gbc);
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;

            CHECKPOINT(2);

            if (op)
                prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
            else
                prev = bytestream2_get_le32(gbc);
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;
        }
    }

    return 0;
}
static int hq_hqa_decode_frame(AVCodecContext *avctx, void *data,
                               int *got_frame, AVPacket *avpkt)
{
    HQContext *ctx = avctx->priv_data;
    AVFrame *pic = data;
    uint32_t info_tag;
    unsigned int data_size;
    int ret;
    unsigned tag;

    bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);
    if (bytestream2_get_bytes_left(&ctx->gbc) < 4 + 4) {
        av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", avpkt->size);
        return AVERROR_INVALIDDATA;
    }

    info_tag = bytestream2_peek_le32(&ctx->gbc);
    if (info_tag == MKTAG('I', 'N', 'F', 'O')) {
        int info_size;
        bytestream2_skip(&ctx->gbc, 4);
        info_size = bytestream2_get_le32(&ctx->gbc);
        if (bytestream2_get_bytes_left(&ctx->gbc) < info_size) {
            av_log(avctx, AV_LOG_ERROR, "Invalid INFO size (%d).\n", info_size);
            return AVERROR_INVALIDDATA;
        }
        ff_canopus_parse_info_tag(avctx, ctx->gbc.buffer, info_size);

        bytestream2_skip(&ctx->gbc, info_size);
    }

    data_size = bytestream2_get_bytes_left(&ctx->gbc);
    if (data_size < 4) {
        av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", data_size);
        return AVERROR_INVALIDDATA;
    }

    /* HQ defines dimensions and number of slices, and thus slice traversal
     * order. HQA has no size constraint and a fixed number of slices, so it
     * needs a separate scheme for it. */
    tag = bytestream2_get_le32(&ctx->gbc);
    if ((tag & 0x00FFFFFF) == (MKTAG('U', 'V', 'C', ' ') & 0x00FFFFFF)) {
        ret = hq_decode_frame(ctx, pic, tag >> 24, data_size);
    } else if (tag == MKTAG('H', 'Q', 'A', '1')) {
Exemple #3
0
/*
 * To be a valid APNG file, we mandate, in this order:
 *     PNGSIG
 *     IHDR
 *     ...
 *     acTL
 *     ...
 *     IDAT
 */
static int apng_probe(AVProbeData *p)
{
    GetByteContext gb;
    int state = 0;
    uint32_t len, tag;

    bytestream2_init(&gb, p->buf, p->buf_size);

    if (bytestream2_get_be64(&gb) != PNGSIG)
        return 0;

    for (;;) {
        len = bytestream2_get_be32(&gb);
        if (len > 0x7fffffff)
            return 0;

        tag = bytestream2_get_le32(&gb);
        /* we don't check IDAT size, as this is the last tag
         * we check, and it may be larger than the probe buffer */
        if (tag != MKTAG('I', 'D', 'A', 'T') &&
            len > bytestream2_get_bytes_left(&gb))
            return 0;

        switch (tag) {
        case MKTAG('I', 'H', 'D', 'R'):
            if (len != 13)
                return 0;
            if (av_image_check_size(bytestream2_get_be32(&gb), bytestream2_get_be32(&gb), 0, NULL))
                return 0;
            bytestream2_skip(&gb, 9);
            state++;
            break;
        case MKTAG('a', 'c', 'T', 'L'):
            if (state != 1 ||
                len != 8 ||
                bytestream2_get_be32(&gb) == 0) /* 0 is not a valid value for number of frames */
                return 0;
            bytestream2_skip(&gb, 8);
            state++;
            break;
        case MKTAG('I', 'D', 'A', 'T'):
            if (state != 2)
                return 0;
            goto end;
        default:
            /* skip other tags */
            bytestream2_skip(&gb, len + 4);
            break;
        }
    }

end:
    return AVPROBE_SCORE_MAX;
}
Exemple #4
0
/* The first three bytes are the size of the section past the header, or zero
 * if the length is stored in the next long word. The fourth byte in the first
 * long word indicates the type of the current section. */
static int parse_section_header(GetByteContext *gbc, int *section_size,
                                enum HapSectionType *section_type)
{
    if (bytestream2_get_bytes_left(gbc) < 4)
        return AVERROR_INVALIDDATA;

    *section_size = bytestream2_get_le24(gbc);
    *section_type = bytestream2_get_byte(gbc);

    if (*section_size == 0) {
        if (bytestream2_get_bytes_left(gbc) < 4)
            return AVERROR_INVALIDDATA;

        *section_size = bytestream2_get_le32(gbc);
    }

    if (*section_size > bytestream2_get_bytes_left(gbc) || *section_size < 0)
        return AVERROR_INVALIDDATA;
    else
        return 0;
}
Exemple #5
0
/* The first three bytes are the size of the section past the header, or zero
 * if the length is stored in the next long word. The fourth byte in the first
 * long word indicates the type of the current section. */
static int parse_section_header(AVCodecContext *avctx)
{
    HapContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    int length;

    if (bytestream2_get_bytes_left(gbc) < 4)
        return AVERROR_INVALIDDATA;

    length = bytestream2_get_le24(gbc);

    ctx->section_type = bytestream2_get_byte(gbc);

    if (length == 0) {
        if (bytestream2_get_bytes_left(gbc) < 4)
            return AVERROR_INVALIDDATA;
        length = bytestream2_get_le32(gbc);
    }

    if (length > bytestream2_get_bytes_left(gbc) || length == 0)
        return AVERROR_INVALIDDATA;

    return length;
}
Exemple #6
0
static int decode_frame(AVCodecContext *avctx, void *data,
                            int *data_size, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    C93DecoderContext * const c93 = avctx->priv_data;
    AVFrame * const newpic = &c93->pictures[c93->currentpic];
    AVFrame * const oldpic = &c93->pictures[c93->currentpic^1];
    AVFrame *picture = data;
    GetByteContext gb;
    uint8_t *out;
    int stride, i, x, y, b, bt = 0;

    c93->currentpic ^= 1;

    newpic->reference = 3;
    newpic->buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
                         FF_BUFFER_HINTS_REUSABLE | FF_BUFFER_HINTS_READABLE;
    if (avctx->reget_buffer(avctx, newpic)) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    stride = newpic->linesize[0];

    bytestream2_init(&gb, buf, buf_size);
    b = bytestream2_get_byte(&gb);
    if (b & C93_FIRST_FRAME) {
        newpic->pict_type = AV_PICTURE_TYPE_I;
        newpic->key_frame = 1;
    } else {
        newpic->pict_type = AV_PICTURE_TYPE_P;
        newpic->key_frame = 0;
    }

    for (y = 0; y < HEIGHT; y += 8) {
        out = newpic->data[0] + y * stride;
        for (x = 0; x < WIDTH; x += 8) {
            uint8_t *copy_from = oldpic->data[0];
            unsigned int offset, j;
            uint8_t cols[4], grps[4];
            C93BlockType block_type;

            if (!bt)
                bt = bytestream2_get_byte(&gb);

            block_type= bt & 0x0F;
            switch (block_type) {
            case C93_8X8_FROM_PREV:
                offset = bytestream2_get_le16(&gb);
                if (copy_block(avctx, out, copy_from, offset, 8, stride))
                    return -1;
                break;

            case C93_4X4_FROM_CURR:
                copy_from = newpic->data[0];
            case C93_4X4_FROM_PREV:
                for (j = 0; j < 8; j += 4) {
                    for (i = 0; i < 8; i += 4) {
                        offset = bytestream2_get_le16(&gb);
                        if (copy_block(avctx, &out[j*stride+i],
                                           copy_from, offset, 4, stride))
                            return -1;
                    }
                }
                break;

            case C93_8X8_2COLOR:
                bytestream2_get_buffer(&gb, cols, 2);
                for (i = 0; i < 8; i++) {
                    draw_n_color(out + i*stride, stride, 8, 1, 1, cols,
                                     NULL, bytestream2_get_byte(&gb));
                }

                break;

            case C93_4X4_2COLOR:
            case C93_4X4_4COLOR:
            case C93_4X4_4COLOR_GRP:
                for (j = 0; j < 8; j += 4) {
                    for (i = 0; i < 8; i += 4) {
                        if (block_type == C93_4X4_2COLOR) {
                            bytestream2_get_buffer(&gb, cols, 2);
                            draw_n_color(out + i + j*stride, stride, 4, 4,
                                    1, cols, NULL, bytestream2_get_le16(&gb));
                        } else if (block_type == C93_4X4_4COLOR) {
                            bytestream2_get_buffer(&gb, cols, 4);
                            draw_n_color(out + i + j*stride, stride, 4, 4,
                                    2, cols, NULL, bytestream2_get_le32(&gb));
                        } else {
                            bytestream2_get_buffer(&gb, grps, 4);
                            draw_n_color(out + i + j*stride, stride, 4, 4,
                                    1, cols, grps, bytestream2_get_le16(&gb));
                        }
                    }
                }
                break;

            case C93_NOOP:
                break;

            case C93_8X8_INTRA:
                for (j = 0; j < 8; j++)
                    bytestream2_get_buffer(&gb, out + j*stride, 8);
                break;

            default:
                av_log(avctx, AV_LOG_ERROR, "unexpected type %x at %dx%d\n",
                       block_type, x, y);
                return -1;
            }
            bt >>= 4;
            out += 8;
        }
    }

    if (b & C93_HAS_PALETTE) {
        uint32_t *palette = (uint32_t *) newpic->data[1];
        for (i = 0; i < 256; i++) {
            palette[i] = 0xFFU << 24 | bytestream2_get_be24(&gb);
        }
    } else {
        if (oldpic->data[1])
            memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
    }

    *picture = *newpic;
    *data_size = sizeof(AVFrame);

    return buf_size;
}
Exemple #7
0
static int decode_frame(AVCodecContext *avctx, void *data,
                        int *got_frame, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    C93DecoderContext * const c93 = avctx->priv_data;
    AVFrame * const newpic = c93->pictures[c93->currentpic];
    AVFrame * const oldpic = c93->pictures[c93->currentpic^1];
    GetByteContext gb;
    uint8_t *out;
    int stride, ret, i, x, y, b, bt = 0;

    if ((ret = ff_set_dimensions(avctx, WIDTH, HEIGHT)) < 0)
        return ret;

    c93->currentpic ^= 1;

    if ((ret = ff_reget_buffer(avctx, newpic)) < 0)
        return ret;

    stride = newpic->linesize[0];

    bytestream2_init(&gb, buf, buf_size);
    b = bytestream2_get_byte(&gb);
    if (b & C93_FIRST_FRAME) {
        newpic->pict_type = AV_PICTURE_TYPE_I;
        newpic->key_frame = 1;
    } else {
        newpic->pict_type = AV_PICTURE_TYPE_P;
        newpic->key_frame = 0;
    }

    for (y = 0; y < HEIGHT; y += 8) {
        out = newpic->data[0] + y * stride;
        for (x = 0; x < WIDTH; x += 8) {
            uint8_t *copy_from = oldpic->data[0];
            unsigned int offset, j;
            uint8_t cols[4], grps[4];
            C93BlockType block_type;

            if (!bt)
                bt = bytestream2_get_byte(&gb);

            block_type= bt & 0x0F;
            switch (block_type) {
            case C93_8X8_FROM_PREV:
                offset = bytestream2_get_le16(&gb);
                if ((ret = copy_block(avctx, out, copy_from, offset, 8, stride)) < 0)
                    return ret;
                break;

            case C93_4X4_FROM_CURR:
                copy_from = newpic->data[0];
            case C93_4X4_FROM_PREV:
                for (j = 0; j < 8; j += 4) {
                    for (i = 0; i < 8; i += 4) {
                        int offset = bytestream2_get_le16(&gb);
                        int from_x = offset % WIDTH;
                        int from_y = offset / WIDTH;
                        if (block_type == C93_4X4_FROM_CURR && from_y == y+j &&
                            (FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) {
                            avpriv_request_sample(avctx, "block overlap %d %d %d %d\n", from_x, x+i, from_y, y+j);
                            return AVERROR_INVALIDDATA;
                        }
                        if ((ret = copy_block(avctx, &out[j*stride+i],
                                              copy_from, offset, 4, stride)) < 0)
                            return ret;
                    }
                }
                break;

            case C93_8X8_2COLOR:
                bytestream2_get_buffer(&gb, cols, 2);
                for (i = 0; i < 8; i++) {
                    draw_n_color(out + i*stride, stride, 8, 1, 1, cols,
                                     NULL, bytestream2_get_byte(&gb));
                }

                break;

            case C93_4X4_2COLOR:
            case C93_4X4_4COLOR:
            case C93_4X4_4COLOR_GRP:
                for (j = 0; j < 8; j += 4) {
                    for (i = 0; i < 8; i += 4) {
                        if (block_type == C93_4X4_2COLOR) {
                            bytestream2_get_buffer(&gb, cols, 2);
                            draw_n_color(out + i + j*stride, stride, 4, 4,
                                    1, cols, NULL, bytestream2_get_le16(&gb));
                        } else if (block_type == C93_4X4_4COLOR) {
                            bytestream2_get_buffer(&gb, cols, 4);
                            draw_n_color(out + i + j*stride, stride, 4, 4,
                                    2, cols, NULL, bytestream2_get_le32(&gb));
                        } else {
                            bytestream2_get_buffer(&gb, grps, 4);
                            draw_n_color(out + i + j*stride, stride, 4, 4,
                                    1, cols, grps, bytestream2_get_le16(&gb));
                        }
                    }
                }
                break;

            case C93_NOOP:
                break;

            case C93_8X8_INTRA:
                for (j = 0; j < 8; j++)
                    bytestream2_get_buffer(&gb, out + j*stride, 8);
                break;

            default:
                av_log(avctx, AV_LOG_ERROR, "unexpected type %x at %dx%d\n",
                       block_type, x, y);
                return AVERROR_INVALIDDATA;
            }
            bt >>= 4;
            out += 8;
        }
    }

    if (b & C93_HAS_PALETTE) {
        uint32_t *palette = (uint32_t *) newpic->data[1];
        for (i = 0; i < 256; i++) {
            palette[i] = 0xFFU << 24 | bytestream2_get_be24(&gb);
        }
        newpic->palette_has_changed = 1;
    } else {
        if (oldpic->data[1])
            memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
    }

    if ((ret = av_frame_ref(data, newpic)) < 0)
        return ret;
    *got_frame = 1;

    return buf_size;
}
Exemple #8
0
unsigned ff_tget_long(GetByteContext *gb, int le)
{
    unsigned v = le ? bytestream2_get_le32(gb) : bytestream2_get_be32(gb);
    return v;
}
Exemple #9
0
int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) {
    const uint64_t fuzz_tag = FUZZ_TAG;
    FuzzDataBuffer buffer;
    const uint8_t *last = data;
    const uint8_t *end = data + size;
    uint32_t it = 0;
    int (*decode_handler)(AVCodecContext *avctx, AVFrame *picture,
                          int *got_picture_ptr,
                          const AVPacket *avpkt) = NULL;

    if (!c) {
#ifdef FFMPEG_DECODER
#define DECODER_SYMBOL0(CODEC) ff_##CODEC##_decoder
#define DECODER_SYMBOL(CODEC) DECODER_SYMBOL0(CODEC)
        extern AVCodec DECODER_SYMBOL(FFMPEG_DECODER);
        avcodec_register(&DECODER_SYMBOL(FFMPEG_DECODER));

        c = &DECODER_SYMBOL(FFMPEG_DECODER);
#else
        avcodec_register_all();
        c = AVCodecInitialize(FFMPEG_CODEC);  // Done once.
#endif
        av_log_set_level(AV_LOG_PANIC);
    }

    // Unsupported
    if (c->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
        return 0;

    switch (c->type) {
    case AVMEDIA_TYPE_AUDIO   : decode_handler = avcodec_decode_audio4; break;
    case AVMEDIA_TYPE_VIDEO   : decode_handler = avcodec_decode_video2; break;
    case AVMEDIA_TYPE_SUBTITLE: decode_handler = subtitle_handler     ; break;
    }

    AVCodecContext* ctx = avcodec_alloc_context3(NULL);
    if (!ctx)
        error("Failed memory allocation");

    ctx->max_pixels = 4096 * 4096; //To reduce false positive OOM and hangs

    if (size > 1024) {
        GetByteContext gbc;
        bytestream2_init(&gbc, data + size - 1024, 1024);
        ctx->width                              = bytestream2_get_le32(&gbc);
        ctx->height                             = bytestream2_get_le32(&gbc);
        ctx->bit_rate                           = bytestream2_get_le64(&gbc);
        ctx->bits_per_coded_sample              = bytestream2_get_le32(&gbc);
        if (av_image_check_size(ctx->width, ctx->height, 0, ctx))
            ctx->width = ctx->height = 0;
        size -= 1024;
    }

    int res = avcodec_open2(ctx, c, NULL);
    if (res < 0) {
        av_free(ctx);
        return 0; // Failure of avcodec_open2() does not imply that a issue was found
    }

    FDBCreate(&buffer);
    int got_frame;
    AVFrame *frame = av_frame_alloc();
    if (!frame)
        error("Failed memory allocation");

    // Read very simple container
    AVPacket avpkt;
    while (data < end && it < maxiteration) {
        // Search for the TAG
        while (data + sizeof(fuzz_tag) < end) {
            if (data[0] == (fuzz_tag & 0xFF) && AV_RN64(data) == fuzz_tag)
                break;
            data++;
        }
        if (data + sizeof(fuzz_tag) > end)
            data = end;

        FDBPrepare(&buffer, &avpkt, last, data - last);
        data += sizeof(fuzz_tag);
        last = data;

        // Iterate through all data
        while (avpkt.size > 0 && it++ < maxiteration) {
            av_frame_unref(frame);
            int ret = decode_handler(ctx, frame, &got_frame, &avpkt);

            if (it > 20)
                ctx->error_concealment = 0;

            if (ret <= 0 || ret > avpkt.size)
               break;
            if (ctx->codec_type != AVMEDIA_TYPE_AUDIO)
                ret = avpkt.size;
            avpkt.data += ret;
            avpkt.size -= ret;
        }
    }

    av_init_packet(&avpkt);
    avpkt.data = NULL;
    avpkt.size = 0;

    do {
        got_frame = 0;
        decode_handler(ctx, frame, &got_frame, &avpkt);
    } while (got_frame == 1 && it++ < maxiteration);

    av_frame_free(&frame);
    avcodec_free_context(&ctx);
    av_freep(&ctx);
    FDBDesroy(&buffer);
    return 0;
}
Exemple #10
0
static int parse_pixel_format(AVCodecContext *avctx)
{
    DDSContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    char buf[32];
    uint32_t flags, fourcc, gimp_tag;
    enum DDSDXGIFormat dxgi;
    int size, bpp, r, g, b, a;
    int alpha_exponent, ycocg_classic, ycocg_scaled, normal_map, array;

    /* Alternative DDS implementations use reserved1 as custom header. */
    bytestream2_skip(gbc, 4 * 3);
    gimp_tag = bytestream2_get_le32(gbc);
    alpha_exponent = gimp_tag == MKTAG('A', 'E', 'X', 'P');
    ycocg_classic  = gimp_tag == MKTAG('Y', 'C', 'G', '1');
    ycocg_scaled   = gimp_tag == MKTAG('Y', 'C', 'G', '2');
    bytestream2_skip(gbc, 4 * 7);

    /* Now the real DDPF starts. */
    size = bytestream2_get_le32(gbc);
    if (size != 32) {
        av_log(avctx, AV_LOG_ERROR, "Invalid pixel format header %d.\n", size);
        return AVERROR_INVALIDDATA;
    }
    flags = bytestream2_get_le32(gbc);
    ctx->compressed = flags & DDPF_FOURCC;
    ctx->paletted   = flags & DDPF_PALETTE;
    normal_map      = flags & DDPF_NORMALMAP;
    fourcc = bytestream2_get_le32(gbc);

    if (ctx->compressed && ctx->paletted) {
        av_log(avctx, AV_LOG_WARNING,
               "Disabling invalid palette flag for compressed dds.\n");
        ctx->paletted = 0;
    }

    bpp = bytestream2_get_le32(gbc); // rgbbitcount
    r   = bytestream2_get_le32(gbc); // rbitmask
    g   = bytestream2_get_le32(gbc); // gbitmask
    b   = bytestream2_get_le32(gbc); // bbitmask
    a   = bytestream2_get_le32(gbc); // abitmask

    bytestream2_skip(gbc, 4); // caps
    bytestream2_skip(gbc, 4); // caps2
    bytestream2_skip(gbc, 4); // caps3
    bytestream2_skip(gbc, 4); // caps4
    bytestream2_skip(gbc, 4); // reserved2

    av_get_codec_tag_string(buf, sizeof(buf), fourcc);
    av_log(avctx, AV_LOG_VERBOSE, "fourcc %s bpp %d "
           "r 0x%x g 0x%x b 0x%x a 0x%x\n", buf, bpp, r, g, b, a);
    if (gimp_tag) {
        av_get_codec_tag_string(buf, sizeof(buf), gimp_tag);
        av_log(avctx, AV_LOG_VERBOSE, "and GIMP-DDS tag %s\n", buf);
    }

    if (ctx->compressed)
        avctx->pix_fmt = AV_PIX_FMT_RGBA;

    if (ctx->compressed) {
        switch (fourcc) {
        case MKTAG('D', 'X', 'T', '1'):
            ctx->tex_ratio = 8;
            ctx->tex_funct = ctx->texdsp.dxt1a_block;
            break;
        case MKTAG('D', 'X', 'T', '2'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxt2_block;
            break;
        case MKTAG('D', 'X', 'T', '3'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxt3_block;
            break;
        case MKTAG('D', 'X', 'T', '4'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxt4_block;
            break;
        case MKTAG('D', 'X', 'T', '5'):
            ctx->tex_ratio = 16;
            if (ycocg_scaled)
                ctx->tex_funct = ctx->texdsp.dxt5ys_block;
            else if (ycocg_classic)
                ctx->tex_funct = ctx->texdsp.dxt5y_block;
            else
                ctx->tex_funct = ctx->texdsp.dxt5_block;
            break;
        case MKTAG('R', 'X', 'G', 'B'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxt5_block;
            /* This format may be considered as a normal map,
             * but it is handled differently in a separate postproc. */
            ctx->postproc = DDS_SWIZZLE_RXGB;
            normal_map = 0;
            break;
        case MKTAG('A', 'T', 'I', '1'):
        case MKTAG('B', 'C', '4', 'U'):
            ctx->tex_ratio = 8;
            ctx->tex_funct = ctx->texdsp.rgtc1u_block;
            break;
        case MKTAG('B', 'C', '4', 'S'):
            ctx->tex_ratio = 8;
            ctx->tex_funct = ctx->texdsp.rgtc1s_block;
            break;
        case MKTAG('A', 'T', 'I', '2'):
            /* RGT2 variant with swapped R and G (3Dc)*/
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.dxn3dc_block;
            break;
        case MKTAG('B', 'C', '5', 'U'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.rgtc2u_block;
            break;
        case MKTAG('B', 'C', '5', 'S'):
            ctx->tex_ratio = 16;
            ctx->tex_funct = ctx->texdsp.rgtc2s_block;
            break;
        case MKTAG('U', 'Y', 'V', 'Y'):
            ctx->compressed = 0;
            avctx->pix_fmt = AV_PIX_FMT_UYVY422;
            break;
        case MKTAG('Y', 'U', 'Y', '2'):
            ctx->compressed = 0;
            avctx->pix_fmt = AV_PIX_FMT_YUYV422;
            break;
        case MKTAG('P', '8', ' ', ' '):
            /* ATI Palette8, same as normal palette */
            ctx->compressed = 0;
            ctx->paletted   = 1;
            avctx->pix_fmt  = AV_PIX_FMT_PAL8;
            break;
        case MKTAG('D', 'X', '1', '0'):
            /* DirectX 10 extra header */
            dxgi = bytestream2_get_le32(gbc);
            bytestream2_skip(gbc, 4); // resourceDimension
            bytestream2_skip(gbc, 4); // miscFlag
            array = bytestream2_get_le32(gbc);
            bytestream2_skip(gbc, 4); // miscFlag2

            if (array != 0)
                av_log(avctx, AV_LOG_VERBOSE,
                       "Found array of size %d (ignored).\n", array);

            /* Only BC[1-5] are actually compressed. */
            ctx->compressed = (dxgi >= 70) && (dxgi <= 84);

            av_log(avctx, AV_LOG_VERBOSE, "DXGI format %d.\n", dxgi);
            switch (dxgi) {
            /* RGB types. */
            case DXGI_FORMAT_R16G16B16A16_TYPELESS:
            case DXGI_FORMAT_R16G16B16A16_FLOAT:
            case DXGI_FORMAT_R16G16B16A16_UNORM:
            case DXGI_FORMAT_R16G16B16A16_UINT:
            case DXGI_FORMAT_R16G16B16A16_SNORM:
            case DXGI_FORMAT_R16G16B16A16_SINT:
                avctx->pix_fmt = AV_PIX_FMT_BGRA64;
                break;
            case DXGI_FORMAT_R8G8B8A8_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_R8G8B8A8_TYPELESS:
            case DXGI_FORMAT_R8G8B8A8_UNORM:
            case DXGI_FORMAT_R8G8B8A8_UINT:
            case DXGI_FORMAT_R8G8B8A8_SNORM:
            case DXGI_FORMAT_R8G8B8A8_SINT:
                avctx->pix_fmt = AV_PIX_FMT_BGRA;
                break;
            case DXGI_FORMAT_B8G8R8A8_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_B8G8R8A8_TYPELESS:
            case DXGI_FORMAT_B8G8R8A8_UNORM:
                avctx->pix_fmt = AV_PIX_FMT_RGBA;
                break;
            case DXGI_FORMAT_B8G8R8X8_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_B8G8R8X8_TYPELESS:
            case DXGI_FORMAT_B8G8R8X8_UNORM:
                avctx->pix_fmt = AV_PIX_FMT_RGBA; // opaque
                break;
            case DXGI_FORMAT_B5G6R5_UNORM:
                avctx->pix_fmt = AV_PIX_FMT_RGB565LE;
                break;
            /* Texture types. */
            case DXGI_FORMAT_BC1_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_BC1_TYPELESS:
            case DXGI_FORMAT_BC1_UNORM:
                ctx->tex_ratio = 8;
                ctx->tex_funct = ctx->texdsp.dxt1a_block;
                break;
            case DXGI_FORMAT_BC2_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_BC2_TYPELESS:
            case DXGI_FORMAT_BC2_UNORM:
                ctx->tex_ratio = 16;
                ctx->tex_funct = ctx->texdsp.dxt3_block;
                break;
            case DXGI_FORMAT_BC3_UNORM_SRGB:
                avctx->colorspace = AVCOL_SPC_RGB;
            case DXGI_FORMAT_BC3_TYPELESS:
            case DXGI_FORMAT_BC3_UNORM:
                ctx->tex_ratio = 16;
                ctx->tex_funct = ctx->texdsp.dxt5_block;
                break;
            case DXGI_FORMAT_BC4_TYPELESS:
            case DXGI_FORMAT_BC4_UNORM:
                ctx->tex_ratio = 8;
                ctx->tex_funct = ctx->texdsp.rgtc1u_block;
                break;
            case DXGI_FORMAT_BC4_SNORM:
                ctx->tex_ratio = 8;
                ctx->tex_funct = ctx->texdsp.rgtc1s_block;
                break;
            case DXGI_FORMAT_BC5_TYPELESS:
            case DXGI_FORMAT_BC5_UNORM:
                ctx->tex_ratio = 16;
                ctx->tex_funct = ctx->texdsp.rgtc2u_block;
                break;
            case DXGI_FORMAT_BC5_SNORM:
                ctx->tex_ratio = 16;
                ctx->tex_funct = ctx->texdsp.rgtc2s_block;
                break;
            default:
                av_log(avctx, AV_LOG_ERROR,
                       "Unsupported DXGI format %d.\n", dxgi);
                return AVERROR_INVALIDDATA;
            }
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Unsupported %s fourcc.\n", buf);
            return AVERROR_INVALIDDATA;
        }
    } else if (ctx->paletted) {
        if (bpp == 8) {
            avctx->pix_fmt = AV_PIX_FMT_PAL8;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Unsupported palette bpp %d.\n", bpp);
            return AVERROR_INVALIDDATA;
        }
    } else {
        /*  8 bpp */
        if (bpp == 8 && r == 0xff && g == 0 && b == 0 && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_GRAY8;
        /* 16 bpp */
        else if (bpp == 16 && r == 0xff && g == 0 && b == 0 && a == 0xff00)
            avctx->pix_fmt = AV_PIX_FMT_YA8;
        else if (bpp == 16 && r == 0xffff && g == 0 && b == 0 && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
        else if (bpp == 16 && r == 0xf800 && g == 0x7e0 && b == 0x1f && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_RGB565LE;
        /* 24 bpp */
        else if (bpp == 24 && r == 0xff0000 && g == 0xff00 && b == 0xff && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_BGR24;
        /* 32 bpp */
        else if (bpp == 32 && r == 0xff0000 && g == 0xff00 && b == 0xff && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_BGR0; // opaque
        else if (bpp == 32 && r == 0xff && g == 0xff00 && b == 0xff0000 && a == 0)
            avctx->pix_fmt = AV_PIX_FMT_RGB0; // opaque
        else if (bpp == 32 && r == 0xff0000 && g == 0xff00 && b == 0xff && a == 0xff000000)
            avctx->pix_fmt = AV_PIX_FMT_BGRA;
        else if (bpp == 32 && r == 0xff && g == 0xff00 && b == 0xff0000 && a == 0xff000000)
            avctx->pix_fmt = AV_PIX_FMT_RGBA;
        /* give up */
        else {
            av_log(avctx, AV_LOG_ERROR, "Unknown pixel format "
                   "[bpp %d r 0x%x g 0x%x b 0x%x a 0x%x].\n", bpp, r, g, b, a);
            return AVERROR_INVALIDDATA;
        }
    }

    /* Set any remaining post-proc that should happen before frame is ready. */
    if (alpha_exponent)
        ctx->postproc = DDS_ALPHA_EXP;
    else if (normal_map)
        ctx->postproc = DDS_NORMAL_MAP;
    else if (ycocg_classic && !ctx->compressed)
        ctx->postproc = DDS_RAW_YCOCG;
    else if (avctx->pix_fmt == AV_PIX_FMT_YA8)
        ctx->postproc = DDS_SWAP_ALPHA;

    /* ATI/NVidia variants sometimes add swizzling in bpp. */
    switch (bpp) {
    case MKTAG('A', '2', 'X', 'Y'):
        ctx->postproc = DDS_SWIZZLE_A2XY;
        break;
    case MKTAG('x', 'G', 'B', 'R'):
        ctx->postproc = DDS_SWIZZLE_XGBR;
        break;
    case MKTAG('x', 'R', 'B', 'G'):
        ctx->postproc = DDS_SWIZZLE_XRBG;
        break;
    case MKTAG('R', 'B', 'x', 'G'):
        ctx->postproc = DDS_SWIZZLE_RBXG;
        break;
    case MKTAG('R', 'G', 'x', 'B'):
        ctx->postproc = DDS_SWIZZLE_RGXB;
        break;
    case MKTAG('R', 'x', 'B', 'G'):
        ctx->postproc = DDS_SWIZZLE_RXBG;
        break;
    case MKTAG('x', 'G', 'x', 'R'):
        ctx->postproc = DDS_SWIZZLE_XGXR;
        break;
    case MKTAG('A', '2', 'D', '5'):
        ctx->postproc = DDS_NORMAL_MAP;
        break;
    }

    return 0;
}
Exemple #11
0
static int flic_decode_frame_8BPP(AVCodecContext *avctx,
                                  void *data, int *got_frame,
                                  const uint8_t *buf, int buf_size)
{
    FlicDecodeContext *s = avctx->priv_data;

    GetByteContext g2;
    int stream_ptr_after_color_chunk;
    int pixel_ptr;
    int palette_ptr;
    unsigned char palette_idx1;
    unsigned char palette_idx2;

    unsigned int frame_size;
    int num_chunks;

    unsigned int chunk_size;
    int chunk_type;

    int i, j, ret;

    int color_packets;
    int color_changes;
    int color_shift;
    unsigned char r, g, b;

    int lines;
    int compressed_lines;
    int starting_line;
    signed short line_packets;
    int y_ptr;
    int byte_run;
    int pixel_skip;
    int pixel_countdown;
    unsigned char *pixels;
    unsigned int pixel_limit;

    bytestream2_init(&g2, buf, buf_size);

    if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return ret;
    }

    pixels = s->frame->data[0];
    pixel_limit = s->avctx->height * s->frame->linesize[0];
    frame_size = bytestream2_get_le32(&g2);
    bytestream2_skip(&g2, 2); /* skip the magic number */
    num_chunks = bytestream2_get_le16(&g2);
    bytestream2_skip(&g2, 8);  /* skip padding */

    frame_size -= 16;

    /* iterate through the chunks */
    while ((frame_size > 0) && (num_chunks > 0)) {
        chunk_size = bytestream2_get_le32(&g2);
        chunk_type = bytestream2_get_le16(&g2);

        switch (chunk_type) {
        case FLI_256_COLOR:
        case FLI_COLOR:
            stream_ptr_after_color_chunk = bytestream2_tell(&g2) + chunk_size - 6;

            /* check special case: If this file is from the Magic Carpet
             * game and uses 6-bit colors even though it reports 256-color
             * chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during
             * initialization) */
            if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE))
                color_shift = 0;
            else
                color_shift = 2;
            /* set up the palette */
            color_packets = bytestream2_get_le16(&g2);
            palette_ptr = 0;
            for (i = 0; i < color_packets; i++) {
                /* first byte is how many colors to skip */
                palette_ptr += bytestream2_get_byte(&g2);

                /* next byte indicates how many entries to change */
                color_changes = bytestream2_get_byte(&g2);

                /* if there are 0 color changes, there are actually 256 */
                if (color_changes == 0)
                    color_changes = 256;

                for (j = 0; j < color_changes; j++) {
                    unsigned int entry;

                    /* wrap around, for good measure */
                    if ((unsigned)palette_ptr >= 256)
                        palette_ptr = 0;

                    r = bytestream2_get_byte(&g2) << color_shift;
                    g = bytestream2_get_byte(&g2) << color_shift;
                    b = bytestream2_get_byte(&g2) << color_shift;
                    entry = (r << 16) | (g << 8) | b;
                    if (s->palette[palette_ptr] != entry)
                        s->new_palette = 1;
                    s->palette[palette_ptr++] = entry;
                }
            }

            /* color chunks sometimes have weird 16-bit alignment issues;
             * therefore, take the hardline approach and skip
             * to the value calculated w.r.t. the size specified by the color
             * chunk header */
            if (stream_ptr_after_color_chunk - bytestream2_tell(&g2) > 0)
                bytestream2_skip(&g2, stream_ptr_after_color_chunk - bytestream2_tell(&g2));

            break;

        case FLI_DELTA:
            y_ptr = 0;
            compressed_lines = bytestream2_get_le16(&g2);
            while (compressed_lines > 0) {
                line_packets = bytestream2_get_le16(&g2);
                if ((line_packets & 0xC000) == 0xC000) {
                    // line skip opcode
                    line_packets = -line_packets;
                    y_ptr += line_packets * s->frame->linesize[0];
                } else if ((line_packets & 0xC000) == 0x4000) {
                    av_log(avctx, AV_LOG_ERROR, "Undefined opcode (%x) in DELTA_FLI\n", line_packets);
                } else if ((line_packets & 0xC000) == 0x8000) {
                    // "last byte" opcode
                    pixel_ptr= y_ptr + s->frame->linesize[0] - 1;
                    CHECK_PIXEL_PTR(0);
                    pixels[pixel_ptr] = line_packets & 0xff;
                } else {
                    compressed_lines--;
                    pixel_ptr = y_ptr;
                    CHECK_PIXEL_PTR(0);
                    pixel_countdown = s->avctx->width;
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        pixel_skip = bytestream2_get_byte(&g2);
                        pixel_ptr += pixel_skip;
                        pixel_countdown -= pixel_skip;
                        byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                        if (byte_run < 0) {
                            byte_run = -byte_run;
                            palette_idx1 = bytestream2_get_byte(&g2);
                            palette_idx2 = bytestream2_get_byte(&g2);
                            CHECK_PIXEL_PTR(byte_run * 2);
                            for (j = 0; j < byte_run; j++, pixel_countdown -= 2) {
                                pixels[pixel_ptr++] = palette_idx1;
                                pixels[pixel_ptr++] = palette_idx2;
                            }
                        } else {
                            CHECK_PIXEL_PTR(byte_run * 2);
                            for (j = 0; j < byte_run * 2; j++, pixel_countdown--) {
                                pixels[pixel_ptr++] = bytestream2_get_byte(&g2);
                            }
                        }
                    }

                    y_ptr += s->frame->linesize[0];
                }
            }
            break;

        case FLI_LC:
            /* line compressed */
            starting_line = bytestream2_get_le16(&g2);
            y_ptr = 0;
            y_ptr += starting_line * s->frame->linesize[0];

            compressed_lines = bytestream2_get_le16(&g2);
            while (compressed_lines > 0) {
                pixel_ptr = y_ptr;
                CHECK_PIXEL_PTR(0);
                pixel_countdown = s->avctx->width;
                line_packets = bytestream2_get_byte(&g2);
                if (line_packets > 0) {
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        pixel_skip = bytestream2_get_byte(&g2);
                        pixel_ptr += pixel_skip;
                        pixel_countdown -= pixel_skip;
                        byte_run = sign_extend(bytestream2_get_byte(&g2),8);
                        if (byte_run > 0) {
                            CHECK_PIXEL_PTR(byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                pixels[pixel_ptr++] = bytestream2_get_byte(&g2);
                            }
                        } else if (byte_run < 0) {
                            byte_run = -byte_run;
                            palette_idx1 = bytestream2_get_byte(&g2);
                            CHECK_PIXEL_PTR(byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                pixels[pixel_ptr++] = palette_idx1;
                            }
                        }
                    }
                }

                y_ptr += s->frame->linesize[0];
                compressed_lines--;
            }
            break;

        case FLI_BLACK:
            /* set the whole frame to color 0 (which is usually black) */
            memset(pixels, 0,
                s->frame->linesize[0] * s->avctx->height);
            break;

        case FLI_BRUN:
            /* Byte run compression: This chunk type only occurs in the first
             * FLI frame and it will update the entire frame. */
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                 bytestream2_skip(&g2, 1);
                pixel_countdown = s->avctx->width;
                while (pixel_countdown > 0) {
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (!byte_run) {
                        av_log(avctx, AV_LOG_ERROR, "Invalid byte run value.\n");
                        return AVERROR_INVALIDDATA;
                    }

                    if (byte_run > 0) {
                        palette_idx1 = bytestream2_get_byte(&g2);
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    } else {  /* copy bytes if byte_run < 0 */
                        byte_run = -byte_run;
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = bytestream2_get_byte(&g2);
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    }
                }

                y_ptr += s->frame->linesize[0];
            }
            break;

        case FLI_COPY:
            /* copy the chunk (uncompressed frame) */
            if (chunk_size - 6 > s->avctx->width * s->avctx->height) {
                av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
                       "bigger than image, skipping chunk\n", chunk_size - 6);
                bytestream2_skip(&g2, chunk_size - 6);
            } else {
                for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height;
                     y_ptr += s->frame->linesize[0]) {
                    bytestream2_get_buffer(&g2, &pixels[y_ptr],
                                           s->avctx->width);
                }
            }
            break;

        case FLI_MINI:
            /* some sort of a thumbnail? disregard this chunk... */
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type);
            break;
        }

        frame_size -= chunk_size;
        num_chunks--;
    }

    /* by the end of the chunk, the stream ptr should equal the frame
     * size (minus 1, possibly); if it doesn't, issue a warning */
    if ((bytestream2_get_bytes_left(&g2) != 0) &&
        (bytestream2_get_bytes_left(&g2) != 1))
        av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
               "and final chunk ptr = %d\n", buf_size,
               buf_size - bytestream2_get_bytes_left(&g2));

    /* make the palette available on the way out */
    memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
    if (s->new_palette) {
        s->frame->palette_has_changed = 1;
        s->new_palette = 0;
    }

    if ((ret = av_frame_ref(data, s->frame)) < 0)
        return ret;

    *got_frame = 1;

    return buf_size;
}
Exemple #12
0
static int dxv_decompress_dxt5(AVCodecContext *avctx)
{
    DXVContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    uint32_t value, op;
    int idx, prev, state = 0;
    int pos = 4;
    int run = 0;
    int probe, check;

    /* Copy the first four elements */
    AV_WL32(ctx->tex_data +  0, bytestream2_get_le32(gbc));
    AV_WL32(ctx->tex_data +  4, bytestream2_get_le32(gbc));
    AV_WL32(ctx->tex_data +  8, bytestream2_get_le32(gbc));
    AV_WL32(ctx->tex_data + 12, bytestream2_get_le32(gbc));

    /* Process input until the whole texture has been filled */
    while (pos < ctx->tex_size / 4) {
        if (run) {
            run--;

            prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;
            prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;
        } else {
            if (state == 0) {
                value = bytestream2_get_le32(gbc);
                state = 16;
            }
            op = value & 0x3;
            value >>= 2;
            state--;

            switch (op) {
            case 0:
                /* Long copy */
                check = bytestream2_get_byte(gbc) + 1;
                if (check == 256) {
                    do {
                        probe = bytestream2_get_le16(gbc);
                        check += probe;
                    } while (probe == 0xFFFF);
                }
                while (check && pos < ctx->tex_size / 4) {
                    prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
                    AV_WL32(ctx->tex_data + 4 * pos, prev);
                    pos++;

                    prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
                    AV_WL32(ctx->tex_data + 4 * pos, prev);
                    pos++;

                    prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
                    AV_WL32(ctx->tex_data + 4 * pos, prev);
                    pos++;

                    prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
                    AV_WL32(ctx->tex_data + 4 * pos, prev);
                    pos++;

                    check--;
                }

                /* Restart (or exit) the loop */
                continue;
                break;
            case 1:
                /* Load new run value */
                run = bytestream2_get_byte(gbc);
                if (run == 255) {
                    do {
                        probe = bytestream2_get_le16(gbc);
                        run += probe;
                    } while (probe == 0xFFFF);
                }

                /* Copy two dwords from previous data */
                prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
                AV_WL32(ctx->tex_data + 4 * pos, prev);
                pos++;

                prev = AV_RL32(ctx->tex_data + 4 * (pos - 4));
                AV_WL32(ctx->tex_data + 4 * pos, prev);
                pos++;
                break;
            case 2:
                /* Copy two dwords from a previous index */
                idx = 8 + bytestream2_get_le16(gbc);
                prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
                AV_WL32(ctx->tex_data + 4 * pos, prev);
                pos++;

                prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
                AV_WL32(ctx->tex_data + 4 * pos, prev);
                pos++;
                break;
            case 3:
                /* Copy two dwords from input */
                prev = bytestream2_get_le32(gbc);
                AV_WL32(ctx->tex_data + 4 * pos, prev);
                pos++;

                prev = bytestream2_get_le32(gbc);
                AV_WL32(ctx->tex_data + 4 * pos, prev);
                pos++;
                break;
            }
        }

        CHECKPOINT(4);

        /* Copy two elements from a previous offset or from the input buffer */
        if (op) {
            prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;

            prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;
        } else {
            CHECKPOINT(4);

            if (op)
                prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
            else
                prev = bytestream2_get_le32(gbc);
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;

            CHECKPOINT(4);

            if (op)
                prev = AV_RL32(ctx->tex_data + 4 * (pos - idx));
            else
                prev = bytestream2_get_le32(gbc);
            AV_WL32(ctx->tex_data + 4 * pos, prev);
            pos++;
        }
    }

    return 0;
}
Exemple #13
0
static int magy_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
{
    MagicYUVContext *s = avctx->priv_data;
    ThreadFrame frame = { .f = data };
    AVFrame *p = data;
    GetByteContext gbyte;
    GetBitContext gbit;
    uint32_t first_offset, offset, next_offset, header_size, slice_width;
    int width, height, format, version, table_size;
    int ret, i, j;

    bytestream2_init(&gbyte, avpkt->data, avpkt->size);
    if (bytestream2_get_le32(&gbyte) != MKTAG('M', 'A', 'G', 'Y'))
        return AVERROR_INVALIDDATA;

    header_size = bytestream2_get_le32(&gbyte);
    if (header_size < 32 || header_size >= avpkt->size) {
        av_log(avctx, AV_LOG_ERROR,
               "header or packet too small %"PRIu32"\n", header_size);
        return AVERROR_INVALIDDATA;
    }

    version = bytestream2_get_byte(&gbyte);
    if (version != 7) {
        avpriv_request_sample(avctx, "Version %d", version);
        return AVERROR_PATCHWELCOME;
    }

    s->hshift[1] =
    s->vshift[1] =
    s->hshift[2] =
    s->vshift[2] = 0;
    s->decorrelate = 0;
    s->max = 256;
    s->huff_build = huff_build;
    s->magy_decode_slice = magy_decode_slice;

    format = bytestream2_get_byte(&gbyte);
    switch (format) {
    case 0x65:
        avctx->pix_fmt = AV_PIX_FMT_GBRP;
        s->decorrelate = 1;
        break;
    case 0x66:
        avctx->pix_fmt = AV_PIX_FMT_GBRAP;
        s->decorrelate = 1;
        break;
    case 0x67:
        avctx->pix_fmt = AV_PIX_FMT_YUV444P;
        break;
    case 0x68:
        avctx->pix_fmt = AV_PIX_FMT_YUV422P;
        s->hshift[1] =
        s->hshift[2] = 1;
        break;
    case 0x69:
        avctx->pix_fmt = AV_PIX_FMT_YUV420P;
        s->hshift[1] =
        s->vshift[1] =
        s->hshift[2] =
        s->vshift[2] = 1;
        break;
    case 0x6a:
        avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
        break;
    case 0x6b:
        avctx->pix_fmt = AV_PIX_FMT_GRAY8;
        break;
    case 0x6c:
        avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
        s->hshift[1] =
        s->hshift[2] = 1;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    case 0x6d:
        avctx->pix_fmt = AV_PIX_FMT_GBRP10;
        s->decorrelate = 1;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    case 0x6e:
        avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
        s->decorrelate = 1;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    case 0x73:
        avctx->pix_fmt = AV_PIX_FMT_GRAY10;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    default:
        avpriv_request_sample(avctx, "Format 0x%X", format);
        return AVERROR_PATCHWELCOME;
    }
    s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);

    bytestream2_skip(&gbyte, 2);
    s->interlaced = !!(bytestream2_get_byte(&gbyte) & 2);
    bytestream2_skip(&gbyte, 3);

    width  = bytestream2_get_le32(&gbyte);
    height = bytestream2_get_le32(&gbyte);
    ret = ff_set_dimensions(avctx, width, height);
    if (ret < 0)
        return ret;

    slice_width = bytestream2_get_le32(&gbyte);
    if (slice_width != avctx->coded_width) {
        avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
        return AVERROR_PATCHWELCOME;
    }
    s->slice_height = bytestream2_get_le32(&gbyte);
    if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
        av_log(avctx, AV_LOG_ERROR,
               "invalid slice height: %d\n", s->slice_height);
        return AVERROR_INVALIDDATA;
    }

    bytestream2_skip(&gbyte, 4);

    s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
    if (s->nb_slices > INT_MAX / sizeof(Slice)) {
        av_log(avctx, AV_LOG_ERROR,
               "invalid number of slices: %d\n", s->nb_slices);
        return AVERROR_INVALIDDATA;
    }

    for (i = 0; i < s->planes; i++) {
        av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
        if (!s->slices[i])
            return AVERROR(ENOMEM);

        offset = bytestream2_get_le32(&gbyte);
        if (offset >= avpkt->size - header_size)
            return AVERROR_INVALIDDATA;

        if (i == 0)
            first_offset = offset;

        for (j = 0; j < s->nb_slices - 1; j++) {
            s->slices[i][j].start = offset + header_size;

            next_offset = bytestream2_get_le32(&gbyte);
            if (next_offset <= offset || next_offset >= avpkt->size - header_size)
                return AVERROR_INVALIDDATA;

            s->slices[i][j].size = next_offset - offset;
            offset = next_offset;
        }

        s->slices[i][j].start = offset + header_size;
        s->slices[i][j].size  = avpkt->size - s->slices[i][j].start;
    }

    if (bytestream2_get_byte(&gbyte) != s->planes)
        return AVERROR_INVALIDDATA;

    bytestream2_skip(&gbyte, s->nb_slices * s->planes);

    table_size = header_size + first_offset - bytestream2_tell(&gbyte);
    if (table_size < 2)
        return AVERROR_INVALIDDATA;

    ret = init_get_bits8(&gbit, avpkt->data + bytestream2_tell(&gbyte), table_size);
    if (ret < 0)
        return ret;

    ret = build_huffman(avctx, &gbit, s->max);
    if (ret < 0)
        return ret;

    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
        return ret;

    s->buf = avpkt->data;
    s->p = p;
    avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);

    if (avctx->pix_fmt == AV_PIX_FMT_GBRP   ||
        avctx->pix_fmt == AV_PIX_FMT_GBRAP  ||
        avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
        avctx->pix_fmt == AV_PIX_FMT_GBRAP10) {
        FFSWAP(uint8_t*, p->data[0], p->data[1]);
        FFSWAP(int, p->linesize[0], p->linesize[1]);
    }
Exemple #14
0
static int rscc_decode_frame(AVCodecContext *avctx, void *data,
                                     int *got_frame, AVPacket *avpkt)
{
    RsccContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    GetByteContext tiles_gbc;
    AVFrame *frame = data;
    const uint8_t *pixels, *raw;
    uint8_t *inflated_tiles = NULL;
    int tiles_nb, packed_size, pixel_size = 0;
    int i, ret = 0;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    /* Size check */
    if (bytestream2_get_bytes_left(gbc) < 12) {
        av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
        return AVERROR_INVALIDDATA;
    }

    /* Read number of tiles, and allocate the array */
    tiles_nb = bytestream2_get_le16(gbc);
    av_fast_malloc(&ctx->tiles, &ctx->tiles_size,
                   tiles_nb * sizeof(*ctx->tiles));
    if (!ctx->tiles) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    av_log(avctx, AV_LOG_DEBUG, "Frame with %d tiles.\n", tiles_nb);

    /* When there are more than 5 tiles, they are packed together with
     * a size header. When that size does not match the number of tiles
     * times the tile size, it means it needs to be inflated as well */
    if (tiles_nb > 5) {
        uLongf packed_tiles_size;

        if (tiles_nb < 32)
            packed_tiles_size = bytestream2_get_byte(gbc);
        else
            packed_tiles_size = bytestream2_get_le16(gbc);

        ff_dlog(avctx, "packed tiles of size %lu.\n", packed_tiles_size);

        /* If necessary, uncompress tiles, and hijack the bytestream reader */
        if (packed_tiles_size != tiles_nb * TILE_SIZE) {
            uLongf length = tiles_nb * TILE_SIZE;
            inflated_tiles = av_malloc(length);
            if (!inflated_tiles) {
                ret = AVERROR(ENOMEM);
                goto end;
            }

            ret = uncompress(inflated_tiles, &length,
                             gbc->buffer, packed_tiles_size);
            if (ret) {
                av_log(avctx, AV_LOG_ERROR, "Tile deflate error %d.\n", ret);
                ret = AVERROR_UNKNOWN;
                goto end;
            }

            /* Skip the compressed tile section in the main byte reader,
             * and point it to read the newly uncompressed data */
            bytestream2_skip(gbc, packed_tiles_size);
            bytestream2_init(&tiles_gbc, inflated_tiles, length);
            gbc = &tiles_gbc;
        }
    }

    /* Fill in array of tiles, keeping track of how many pixels are updated */
    for (i = 0; i < tiles_nb; i++) {
        ctx->tiles[i].x = bytestream2_get_le16(gbc);
        ctx->tiles[i].w = bytestream2_get_le16(gbc);
        ctx->tiles[i].y = bytestream2_get_le16(gbc);
        ctx->tiles[i].h = bytestream2_get_le16(gbc);

        pixel_size += ctx->tiles[i].w * ctx->tiles[i].h * ctx->component_size;

        ff_dlog(avctx, "tile %d orig(%d,%d) %dx%d.\n", i,
                ctx->tiles[i].x, ctx->tiles[i].y,
                ctx->tiles[i].w, ctx->tiles[i].h);

        if (ctx->tiles[i].w == 0 || ctx->tiles[i].h == 0) {
            av_log(avctx, AV_LOG_ERROR,
                   "invalid tile %d at (%d.%d) with size %dx%d.\n", i,
                   ctx->tiles[i].x, ctx->tiles[i].y,
                   ctx->tiles[i].w, ctx->tiles[i].h);
            ret = AVERROR_INVALIDDATA;
            goto end;
        } else if (ctx->tiles[i].x + ctx->tiles[i].w > avctx->width ||
                   ctx->tiles[i].y + ctx->tiles[i].h > avctx->height) {
            av_log(avctx, AV_LOG_ERROR,
                   "out of bounds tile %d at (%d.%d) with size %dx%d.\n", i,
                   ctx->tiles[i].x, ctx->tiles[i].y,
                   ctx->tiles[i].w, ctx->tiles[i].h);
            ret = AVERROR_INVALIDDATA;
            goto end;
        }
    }

    /* Reset the reader in case it had been modified before */
    gbc = &ctx->gbc;

    /* Extract how much pixel data the tiles contain */
    if (pixel_size < 0x100)
        packed_size = bytestream2_get_byte(gbc);
    else if (pixel_size < 0x10000)
        packed_size = bytestream2_get_le16(gbc);
    else if (pixel_size < 0x1000000)
        packed_size = bytestream2_get_le24(gbc);
    else
        packed_size = bytestream2_get_le32(gbc);

    ff_dlog(avctx, "pixel_size %d packed_size %d.\n", pixel_size, packed_size);

    if (packed_size < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid tile size %d\n", packed_size);
        ret = AVERROR_INVALIDDATA;
        goto end;
    }

    /* Get pixels buffer, it may be deflated or just raw */
    if (pixel_size == packed_size) {
        if (bytestream2_get_bytes_left(gbc) < pixel_size) {
            av_log(avctx, AV_LOG_ERROR, "Insufficient input for %d\n", pixel_size);
            ret = AVERROR_INVALIDDATA;
            goto end;
        }
        pixels = gbc->buffer;
    } else {
        uLongf len = ctx->inflated_size;
        if (bytestream2_get_bytes_left(gbc) < packed_size) {
            av_log(avctx, AV_LOG_ERROR, "Insufficient input for %d\n", packed_size);
            ret = AVERROR_INVALIDDATA;
            goto end;
        }
        ret = uncompress(ctx->inflated_buf, &len, gbc->buffer, packed_size);
        if (ret) {
            av_log(avctx, AV_LOG_ERROR, "Pixel deflate error %d.\n", ret);
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        pixels = ctx->inflated_buf;
    }

    /* Allocate when needed */
    ret = ff_reget_buffer(avctx, ctx->reference);
    if (ret < 0)
        goto end;

    /* Pointer to actual pixels, will be updated when data is consumed */
    raw = pixels;
    for (i = 0; i < tiles_nb; i++) {
        uint8_t *dst = ctx->reference->data[0] + ctx->reference->linesize[0] *
                       (avctx->height - ctx->tiles[i].y - 1) +
                       ctx->tiles[i].x * ctx->component_size;
        av_image_copy_plane(dst, -1 * ctx->reference->linesize[0],
                            raw, ctx->tiles[i].w * ctx->component_size,
                            ctx->tiles[i].w * ctx->component_size,
                            ctx->tiles[i].h);
        raw += ctx->tiles[i].w * ctx->component_size * ctx->tiles[i].h;
    }

    /* Frame is ready to be output */
    ret = av_frame_ref(frame, ctx->reference);
    if (ret < 0)
        goto end;

    /* Keyframe when the number of pixels updated matches the whole surface */
    if (pixel_size == ctx->inflated_size) {
        frame->pict_type = AV_PICTURE_TYPE_I;
        frame->key_frame = 1;
    } else {
        frame->pict_type = AV_PICTURE_TYPE_P;
    }
    *got_frame = 1;

end:
    av_free(inflated_tiles);
    return ret;
}
Exemple #15
0
static int flic_decode_frame_24BPP(AVCodecContext *avctx,
                                   void *data, int *got_frame,
                                   const uint8_t *buf, int buf_size)
{
    FlicDecodeContext *s = avctx->priv_data;

    GetByteContext g2;
    int pixel_ptr;
    unsigned char palette_idx1;

    unsigned int frame_size;
    int num_chunks;

    unsigned int chunk_size;
    int chunk_type;

    int i, j, ret;

    int lines;
    int compressed_lines;
    signed short line_packets;
    int y_ptr;
    int byte_run;
    int pixel_skip;
    int pixel_countdown;
    unsigned char *pixels;
    int pixel;
    unsigned int pixel_limit;

    bytestream2_init(&g2, buf, buf_size);

    if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
        return ret;

    pixels = s->frame->data[0];
    pixel_limit = s->avctx->height * s->frame->linesize[0];

    frame_size = bytestream2_get_le32(&g2);
    bytestream2_skip(&g2, 2);  /* skip the magic number */
    num_chunks = bytestream2_get_le16(&g2);
    bytestream2_skip(&g2, 8);  /* skip padding */
    if (frame_size > buf_size)
        frame_size = buf_size;

    if (frame_size < 16)
        return AVERROR_INVALIDDATA;
    frame_size -= 16;

    /* iterate through the chunks */
    while ((frame_size > 0) && (num_chunks > 0) &&
            bytestream2_get_bytes_left(&g2) >= 4) {
        int stream_ptr_after_chunk;
        chunk_size = bytestream2_get_le32(&g2);
        if (chunk_size > frame_size) {
            av_log(avctx, AV_LOG_WARNING,
                   "Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
            chunk_size = frame_size;
        }
        stream_ptr_after_chunk = bytestream2_tell(&g2) - 4 + chunk_size;

        chunk_type = bytestream2_get_le16(&g2);


        switch (chunk_type) {
        case FLI_256_COLOR:
        case FLI_COLOR:
            /* For some reason, it seems that non-palettized flics do
             * include one of these chunks in their first frame.
             * Why I do not know, it seems rather extraneous. */
            ff_dlog(avctx,
                    "Unexpected Palette chunk %d in non-palettized FLC\n",
                    chunk_type);
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_DELTA:
        case FLI_DTA_LC:
            y_ptr = 0;
            compressed_lines = bytestream2_get_le16(&g2);
            while (compressed_lines > 0) {
                if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk)
                    break;
                if (y_ptr > pixel_limit)
                    return AVERROR_INVALIDDATA;
                line_packets = bytestream2_get_le16(&g2);
                if (line_packets < 0) {
                    line_packets = -line_packets;
                    if (line_packets > s->avctx->height)
                        return AVERROR_INVALIDDATA;
                    y_ptr += line_packets * s->frame->linesize[0];
                } else {
                    compressed_lines--;
                    pixel_ptr = y_ptr;
                    CHECK_PIXEL_PTR(0);
                    pixel_countdown = s->avctx->width;
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk)
                            break;
                        pixel_skip = bytestream2_get_byte(&g2);
                        pixel_ptr += (pixel_skip*3); /* Pixel is 3 bytes wide */
                        pixel_countdown -= pixel_skip;
                        byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                        if (byte_run < 0) {
                            byte_run = -byte_run;
                            pixel    = bytestream2_get_le24(&g2);
                            CHECK_PIXEL_PTR(3 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown -= 1) {
                                AV_WL24(&pixels[pixel_ptr], pixel);
                                pixel_ptr += 3;
                            }
                        } else {
                            if (bytestream2_tell(&g2) + 2*byte_run > stream_ptr_after_chunk)
                                break;
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                pixel = bytestream2_get_le24(&g2);
                                AV_WL24(&pixels[pixel_ptr], pixel);
                                pixel_ptr += 3;
                            }
                        }
                    }

                    y_ptr += s->frame->linesize[0];
                }
            }
            break;

        case FLI_LC:
            av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-palettized FLC\n");
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_BLACK:
            /* set the whole frame to 0x00 which is black for 24 bit mode. */
            memset(pixels, 0x00,
                   s->frame->linesize[0] * s->avctx->height);
            break;

        case FLI_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = (s->avctx->width * 3);

                while (pixel_countdown > 0) {
                    if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
                        break;
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        palette_idx1 = bytestream2_get_byte(&g2);
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n",
                                       pixel_countdown, lines);
                        }
                    } else {  /* copy bytes if byte_run < 0 */
                        byte_run = -byte_run;
                        if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk)
                            break;
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            palette_idx1 = bytestream2_get_byte(&g2);
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    }
                }

                y_ptr += s->frame->linesize[0];
            }
            break;

        case FLI_DTA_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */

                while (pixel_countdown > 0) {
                    if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
                        break;
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        pixel = bytestream2_get_le24(&g2);
                        CHECK_PIXEL_PTR(3 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            AV_WL24(pixels + pixel_ptr, pixel);
                            pixel_ptr += 3;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    } else {  /* copy pixels if byte_run < 0 */
                        byte_run = -byte_run;
                        if (bytestream2_tell(&g2) + 3 * byte_run > stream_ptr_after_chunk)
                            break;
                        CHECK_PIXEL_PTR(3 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixel = bytestream2_get_le24(&g2);
                            AV_WL24(pixels + pixel_ptr, pixel);
                            pixel_ptr  += 3;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    }
                }

                y_ptr += s->frame->linesize[0];
            }
            break;

        case FLI_COPY:
        case FLI_DTA_COPY:
            /* copy the chunk (uncompressed frame) */
            if (chunk_size - 6 > (unsigned int)(FFALIGN(s->avctx->width, 2) * s->avctx->height)*3) {
                av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
                       "bigger than image, skipping chunk\n", chunk_size - 6);
                bytestream2_skip(&g2, chunk_size - 6);
            } else {
                for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height;
                     y_ptr += s->frame->linesize[0]) {

                    pixel_countdown = s->avctx->width;
                    pixel_ptr = 0;
                    while (pixel_countdown > 0) {
                        pixel = bytestream2_get_le24(&g2);
                        AV_WL24(&pixels[y_ptr + pixel_ptr], pixel);
                        pixel_ptr += 3;
                        pixel_countdown--;
                    }
                    if (s->avctx->width & 1)
                        bytestream2_skip(&g2, 3);
                }
            }
            break;

        case FLI_MINI:
            /* some sort of a thumbnail? disregard this chunk... */
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type);
            break;
        }

        frame_size -= chunk_size;
        num_chunks--;
    }

    /* by the end of the chunk, the stream ptr should equal the frame
     * size (minus 1, possibly); if it doesn't, issue a warning */
    if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1))
        av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
               "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));

    if ((ret = av_frame_ref(data, s->frame)) < 0)
        return ret;

    *got_frame = 1;

    return buf_size;
}
Exemple #16
0
static int hap_parse_decode_instructions(HapContext *ctx, int size)
{
    GetByteContext *gbc = &ctx->gbc;
    int section_size;
    enum HapSectionType section_type;
    int is_first_table = 1, had_offsets = 0, had_compressors = 0, had_sizes = 0;
    int i, ret;

    while (size > 0) {
        int stream_remaining = bytestream2_get_bytes_left(gbc);
        ret = parse_section_header(gbc, &section_size, &section_type);
        if (ret != 0)
            return ret;

        size -= stream_remaining - bytestream2_get_bytes_left(gbc);

        switch (section_type) {
            case HAP_ST_COMPRESSOR_TABLE:
                ret = ff_hap_set_chunk_count(ctx, section_size, is_first_table);
                if (ret != 0)
                    return ret;
                for (i = 0; i < section_size; i++) {
                    ctx->chunks[i].compressor = bytestream2_get_byte(gbc) << 4;
                }
                had_compressors = 1;
                is_first_table = 0;
                break;
            case HAP_ST_SIZE_TABLE:
                ret = ff_hap_set_chunk_count(ctx, section_size / 4, is_first_table);
                if (ret != 0)
                    return ret;
                for (i = 0; i < section_size / 4; i++) {
                    ctx->chunks[i].compressed_size = bytestream2_get_le32(gbc);
                }
                had_sizes = 1;
                is_first_table = 0;
                break;
            case HAP_ST_OFFSET_TABLE:
                ret = ff_hap_set_chunk_count(ctx, section_size / 4, is_first_table);
                if (ret != 0)
                    return ret;
                for (i = 0; i < section_size / 4; i++) {
                    ctx->chunks[i].compressed_offset = bytestream2_get_le32(gbc);
                }
                had_offsets = 1;
                is_first_table = 0;
                break;
            default:
                break;
        }
        size -= section_size;
    }

    if (!had_sizes || !had_compressors)
        return AVERROR_INVALIDDATA;

    /* The offsets table is optional. If not present than calculate offsets by
     * summing the sizes of preceding chunks. */
    if (!had_offsets) {
        size_t running_size = 0;
        for (i = 0; i < ctx->chunk_count; i++) {
            ctx->chunks[i].compressed_offset = running_size;
            running_size += ctx->chunks[i].compressed_size;
        }
    }

    return 0;
}
Exemple #17
0
static int dxv_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DXVContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    GetByteContext *gbc = &ctx->gbc;
    int (*decompress_tex)(AVCodecContext *avctx);
    const char *msgcomp, *msgtext;
    uint32_t tag;
    int version_major, version_minor = 0;
    int size = 0, old_type = 0;
    int ret;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    tag = bytestream2_get_le32(gbc);
    switch (tag) {
    case MKBETAG('D', 'X', 'T', '1'):
        decompress_tex = dxv_decompress_dxt1;
        ctx->tex_funct = ctx->texdsp.dxt1_block;
        ctx->tex_rat   = 8;
        ctx->tex_step  = 8;
        msgcomp = "DXTR1";
        msgtext = "DXT1";
        break;
    case MKBETAG('D', 'X', 'T', '5'):
        decompress_tex = dxv_decompress_dxt5;
        ctx->tex_funct = ctx->texdsp.dxt5_block;
        ctx->tex_rat   = 4;
        ctx->tex_step  = 16;
        msgcomp = "DXTR5";
        msgtext = "DXT5";
        break;
    case MKBETAG('Y', 'C', 'G', '6'):
    case MKBETAG('Y', 'G', '1', '0'):
        avpriv_report_missing_feature(avctx, "Tag 0x%08X", tag);
        return AVERROR_PATCHWELCOME;
    default:
        /* Old version does not have a real header, just size and type. */
        size = tag & 0x00FFFFFF;
        old_type = tag >> 24;
        version_major = (old_type & 0x0F) - 1;

        if (old_type & 0x80) {
            msgcomp = "RAW";
            decompress_tex = dxv_decompress_raw;
        } else {
            msgcomp = "LZF";
            decompress_tex = dxv_decompress_lzf;
        }

        if (old_type & 0x40) {
            msgtext = "DXT5";

            ctx->tex_funct = ctx->texdsp.dxt5_block;
            ctx->tex_step  = 16;
        } else if (old_type & 0x20 || version_major == 1) {
            msgtext = "DXT1";

            ctx->tex_funct = ctx->texdsp.dxt1_block;
            ctx->tex_step  = 8;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Unsupported header (0x%08X)\n.", tag);
            return AVERROR_INVALIDDATA;
        }
        ctx->tex_rat = 1;
        break;
    }

    /* New header is 12 bytes long. */
    if (!old_type) {
        version_major = bytestream2_get_byte(gbc) - 1;
        version_minor = bytestream2_get_byte(gbc);

        /* Encoder copies texture data when compression is not advantageous. */
        if (bytestream2_get_byte(gbc)) {
            msgcomp = "RAW";
            ctx->tex_rat = 1;
            decompress_tex = dxv_decompress_raw;
        }

        bytestream2_skip(gbc, 1); // unknown
        size = bytestream2_get_le32(gbc);
    }
    av_log(avctx, AV_LOG_DEBUG,
           "%s compression with %s texture (version %d.%d)\n",
           msgcomp, msgtext, version_major, version_minor);

    if (size != bytestream2_get_bytes_left(gbc)) {
        av_log(avctx, AV_LOG_ERROR,
               "Incomplete or invalid file (header %d, left %d).\n",
               size, bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    ctx->tex_size = avctx->coded_width * avctx->coded_height * 4 / ctx->tex_rat;
    ret = av_reallocp(&ctx->tex_data, ctx->tex_size);
    if (ret < 0)
        return ret;

    /* Decompress texture out of the intermediate compression. */
    ret = decompress_tex(avctx);
    if (ret < 0)
        return ret;

    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    ff_thread_finish_setup(avctx);

    /* Now decompress the texture with the standard functions. */
    avctx->execute2(avctx, decompress_texture_thread,
                    tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output. */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Exemple #18
0
static int flic_decode_frame_15_16BPP(AVCodecContext *avctx,
                                      void *data, int *got_frame,
                                      const uint8_t *buf, int buf_size)
{
    /* Note, the only difference between the 15Bpp and 16Bpp */
    /* Format is the pixel format, the packets are processed the same. */
    FlicDecodeContext *s = avctx->priv_data;

    GetByteContext g2;
    int pixel_ptr;
    unsigned char palette_idx1;

    unsigned int frame_size;
    int num_chunks;

    unsigned int chunk_size;
    int chunk_type;

    int i, j, ret;

    int lines;
    int compressed_lines;
    signed short line_packets;
    int y_ptr;
    int byte_run;
    int pixel_skip;
    int pixel_countdown;
    unsigned char *pixels;
    int pixel;
    unsigned int pixel_limit;

    bytestream2_init(&g2, buf, buf_size);

    if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return ret;
    }

    pixels = s->frame->data[0];
    pixel_limit = s->avctx->height * s->frame->linesize[0];

    frame_size = bytestream2_get_le32(&g2);
    bytestream2_skip(&g2, 2);  /* skip the magic number */
    num_chunks = bytestream2_get_le16(&g2);
    bytestream2_skip(&g2, 8);  /* skip padding */

    frame_size -= 16;

    /* iterate through the chunks */
    while ((frame_size > 0) && (num_chunks > 0)) {
        chunk_size = bytestream2_get_le32(&g2);
        chunk_type = bytestream2_get_le16(&g2);

        switch (chunk_type) {
        case FLI_256_COLOR:
        case FLI_COLOR:
            /* For some reason, it seems that non-palettized flics do
             * include one of these chunks in their first frame.
             * Why I do not know, it seems rather extraneous. */
            ff_dlog(avctx,
                    "Unexpected Palette chunk %d in non-palettized FLC\n",
                    chunk_type);
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_DELTA:
        case FLI_DTA_LC:
            y_ptr = 0;
            compressed_lines = bytestream2_get_le16(&g2);
            while (compressed_lines > 0) {
                line_packets = bytestream2_get_le16(&g2);
                if (line_packets < 0) {
                    line_packets = -line_packets;
                    y_ptr += line_packets * s->frame->linesize[0];
                } else {
                    compressed_lines--;
                    pixel_ptr = y_ptr;
                    CHECK_PIXEL_PTR(0);
                    pixel_countdown = s->avctx->width;
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        pixel_skip = bytestream2_get_byte(&g2);
                        pixel_ptr += (pixel_skip*2); /* Pixel is 2 bytes wide */
                        pixel_countdown -= pixel_skip;
                        byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                        if (byte_run < 0) {
                            byte_run = -byte_run;
                            pixel    = bytestream2_get_le16(&g2);
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown -= 2) {
                                *((signed short*)(&pixels[pixel_ptr])) = pixel;
                                pixel_ptr += 2;
                            }
                        } else {
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2);
                                pixel_ptr += 2;
                            }
                        }
                    }

                    y_ptr += s->frame->linesize[0];
                }
            }
            break;

        case FLI_LC:
            av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-palettized FLC\n");
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_BLACK:
            /* set the whole frame to 0x0000 which is black in both 15Bpp and 16Bpp modes. */
            memset(pixels, 0x0000,
                   s->frame->linesize[0] * s->avctx->height);
            break;

        case FLI_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = (s->avctx->width * 2);

                while (pixel_countdown > 0) {
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        palette_idx1 = bytestream2_get_byte(&g2);
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n",
                                       pixel_countdown, lines);
                        }
                    } else {  /* copy bytes if byte_run < 0 */
                        byte_run = -byte_run;
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            palette_idx1 = bytestream2_get_byte(&g2);
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    }
                }

                /* Now FLX is strange, in that it is "byte" as opposed to "pixel" run length compressed.
                 * This does not give us any good opportunity to perform word endian conversion
                 * during decompression. So if it is required (i.e., this is not a LE target, we do
                 * a second pass over the line here, swapping the bytes.
                 */
#if HAVE_BIGENDIAN
                pixel_ptr = y_ptr;
                pixel_countdown = s->avctx->width;
                while (pixel_countdown > 0) {
                    *((signed short*)(&pixels[pixel_ptr])) = AV_RL16(&buf[pixel_ptr]);
                    pixel_ptr += 2;
                }
#endif
                y_ptr += s->frame->linesize[0];
            }
            break;

        case FLI_DTA_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */

                while (pixel_countdown > 0) {
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        pixel    = bytestream2_get_le16(&g2);
                        CHECK_PIXEL_PTR(2 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            *((signed short*)(&pixels[pixel_ptr])) = pixel;
                            pixel_ptr += 2;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    } else {  /* copy pixels if byte_run < 0 */
                        byte_run = -byte_run;
                        CHECK_PIXEL_PTR(2 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            *((signed short*)(&pixels[pixel_ptr])) = bytestream2_get_le16(&g2);
                            pixel_ptr  += 2;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    }
                }

                y_ptr += s->frame->linesize[0];
            }
            break;

        case FLI_COPY:
        case FLI_DTA_COPY:
            /* copy the chunk (uncompressed frame) */
            if (chunk_size - 6 > (unsigned int)(s->avctx->width * s->avctx->height)*2) {
                av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
                       "bigger than image, skipping chunk\n", chunk_size - 6);
                bytestream2_skip(&g2, chunk_size - 6);
            } else {

                for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height;
                     y_ptr += s->frame->linesize[0]) {

                    pixel_countdown = s->avctx->width;
                    pixel_ptr = 0;
                    while (pixel_countdown > 0) {
                      *((signed short*)(&pixels[y_ptr + pixel_ptr])) = bytestream2_get_le16(&g2);
                      pixel_ptr += 2;
                      pixel_countdown--;
                    }
                }
            }
            break;

        case FLI_MINI:
            /* some sort of a thumbnail? disregard this chunk... */
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type);
            break;
        }

        frame_size -= chunk_size;
        num_chunks--;
    }

    /* by the end of the chunk, the stream ptr should equal the frame
     * size (minus 1, possibly); if it doesn't, issue a warning */
    if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1))
        av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
               "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));

    if ((ret = av_frame_ref(data, s->frame)) < 0)
        return ret;

    *got_frame = 1;

    return buf_size;
}
Exemple #19
0
static int flic_decode_frame_8BPP(AVCodecContext *avctx,
                                  void *data, int *data_size,
                                  const uint8_t *buf, int buf_size)
{
    FlicDecodeContext *s = avctx->priv_data;

    GetByteContext g2;
    int pixel_ptr;
    int palette_ptr;
    unsigned char palette_idx1;
    unsigned char palette_idx2;

    unsigned int frame_size;
    int num_chunks;

    unsigned int chunk_size;
    int chunk_type;

    int i, j;

    int color_packets;
    int color_changes;
    int color_shift;
    unsigned char r, g, b;

    int lines;
    int compressed_lines;
    int starting_line;
    signed short line_packets;
    int y_ptr;
    int byte_run;
    int pixel_skip;
    int pixel_countdown;
    unsigned char *pixels;
    unsigned int pixel_limit;

    bytestream2_init(&g2, buf, buf_size);

    s->frame.reference = 3;
    s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    if (avctx->reget_buffer(avctx, &s->frame) < 0) {
        av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
        return -1;
    }

    pixels = s->frame.data[0];
    pixel_limit = s->avctx->height * s->frame.linesize[0];
    if (buf_size < 16 || buf_size > INT_MAX - (3 * 256 + FF_INPUT_BUFFER_PADDING_SIZE))
        return AVERROR_INVALIDDATA;
    frame_size = bytestream2_get_le32(&g2);
    if (frame_size > buf_size)
        frame_size = buf_size;
    bytestream2_skip(&g2, 2); /* skip the magic number */
    num_chunks = bytestream2_get_le16(&g2);
    bytestream2_skip(&g2, 8);  /* skip padding */

    frame_size -= 16;

    /* iterate through the chunks */
    while ((frame_size >= 6) && (num_chunks > 0)) {
        int stream_ptr_after_chunk;
        chunk_size = bytestream2_get_le32(&g2);
        if (chunk_size > frame_size) {
            av_log(avctx, AV_LOG_WARNING,
                   "Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
            chunk_size = frame_size;
        }
        stream_ptr_after_chunk = bytestream2_tell(&g2) - 4 + chunk_size;

        chunk_type = bytestream2_get_le16(&g2);

        switch (chunk_type) {
        case FLI_256_COLOR:
        case FLI_COLOR:
            /* check special case: If this file is from the Magic Carpet
             * game and uses 6-bit colors even though it reports 256-color
             * chunks in a 0xAF12-type file (fli_type is set to 0xAF13 during
             * initialization) */
            if ((chunk_type == FLI_256_COLOR) && (s->fli_type != FLC_MAGIC_CARPET_SYNTHETIC_TYPE_CODE))
                color_shift = 0;
            else
                color_shift = 2;
            /* set up the palette */
            color_packets = bytestream2_get_le16(&g2);
            palette_ptr = 0;
            for (i = 0; i < color_packets; i++) {
                /* first byte is how many colors to skip */
                palette_ptr += bytestream2_get_byte(&g2);

                /* next byte indicates how many entries to change */
                color_changes = bytestream2_get_byte(&g2);

                /* if there are 0 color changes, there are actually 256 */
                if (color_changes == 0)
                    color_changes = 256;

                if (bytestream2_tell(&g2) + color_changes * 3 > stream_ptr_after_chunk)
                    break;

                for (j = 0; j < color_changes; j++) {
                    unsigned int entry;

                    /* wrap around, for good measure */
                    if ((unsigned)palette_ptr >= 256)
                        palette_ptr = 0;

                    r = bytestream2_get_byte(&g2) << color_shift;
                    g = bytestream2_get_byte(&g2) << color_shift;
                    b = bytestream2_get_byte(&g2) << color_shift;
                    entry = 0xFF << 24 | r << 16 | g << 8 | b;
                    if (color_shift == 2)
                        entry |= entry >> 6 & 0x30303;
                    if (s->palette[palette_ptr] != entry)
                        s->new_palette = 1;
                    s->palette[palette_ptr++] = entry;
                }
            }
            break;

        case FLI_DELTA:
            y_ptr = 0;
            compressed_lines = bytestream2_get_le16(&g2);
            while (compressed_lines > 0) {
                if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk)
                    break;
                line_packets = bytestream2_get_le16(&g2);
                if ((line_packets & 0xC000) == 0xC000) {
                    // line skip opcode
                    line_packets = -line_packets;
                    y_ptr += line_packets * s->frame.linesize[0];
                } else if ((line_packets & 0xC000) == 0x4000) {
                    av_log(avctx, AV_LOG_ERROR, "Undefined opcode (%x) in DELTA_FLI\n", line_packets);
                } else if ((line_packets & 0xC000) == 0x8000) {
                    // "last byte" opcode
                    pixel_ptr= y_ptr + s->frame.linesize[0] - 1;
                    CHECK_PIXEL_PTR(0);
                    pixels[pixel_ptr] = line_packets & 0xff;
                } else {
                    compressed_lines--;
                    pixel_ptr = y_ptr;
                    CHECK_PIXEL_PTR(0);
                    pixel_countdown = s->avctx->width;
                    for (i = 0; i < line_packets; i++) {
                        if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk)
                            break;
                        /* account for the skip bytes */
                        pixel_skip = bytestream2_get_byte(&g2);
                        pixel_ptr += pixel_skip;
                        pixel_countdown -= pixel_skip;
                        byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                        if (byte_run < 0) {
                            byte_run = -byte_run;
                            palette_idx1 = bytestream2_get_byte(&g2);
                            palette_idx2 = bytestream2_get_byte(&g2);
                            CHECK_PIXEL_PTR(byte_run * 2);
                            for (j = 0; j < byte_run; j++, pixel_countdown -= 2) {
                                pixels[pixel_ptr++] = palette_idx1;
                                pixels[pixel_ptr++] = palette_idx2;
                            }
                        } else {
                            CHECK_PIXEL_PTR(byte_run * 2);
                            if (bytestream2_tell(&g2) + byte_run * 2 > stream_ptr_after_chunk)
                                break;
                            for (j = 0; j < byte_run * 2; j++, pixel_countdown--) {
                                pixels[pixel_ptr++] = bytestream2_get_byte(&g2);
                            }
                        }
                    }

                    y_ptr += s->frame.linesize[0];
                }
            }
            break;

        case FLI_LC:
            /* line compressed */
            starting_line = bytestream2_get_le16(&g2);
            y_ptr = 0;
            y_ptr += starting_line * s->frame.linesize[0];

            compressed_lines = bytestream2_get_le16(&g2);
            while (compressed_lines > 0) {
                pixel_ptr = y_ptr;
                CHECK_PIXEL_PTR(0);
                pixel_countdown = s->avctx->width;
                if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
                    break;
                line_packets = bytestream2_get_byte(&g2);
                if (line_packets > 0) {
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
                            break;
                        pixel_skip = bytestream2_get_byte(&g2);
                        pixel_ptr += pixel_skip;
                        pixel_countdown -= pixel_skip;
                        byte_run = sign_extend(bytestream2_get_byte(&g2),8);
                        if (byte_run > 0) {
                            CHECK_PIXEL_PTR(byte_run);
                            if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk)
                                break;
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                pixels[pixel_ptr++] = bytestream2_get_byte(&g2);
                            }
                        } else if (byte_run < 0) {
                            byte_run = -byte_run;
                            palette_idx1 = bytestream2_get_byte(&g2);
                            CHECK_PIXEL_PTR(byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                pixels[pixel_ptr++] = palette_idx1;
                            }
                        }
                    }
                }

                y_ptr += s->frame.linesize[0];
                compressed_lines--;
            }
            break;

        case FLI_BLACK:
            /* set the whole frame to color 0 (which is usually black) */
            memset(pixels, 0,
                s->frame.linesize[0] * s->avctx->height);
            break;

        case FLI_BRUN:
            /* Byte run compression: This chunk type only occurs in the first
             * FLI frame and it will update the entire frame. */
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                 bytestream2_skip(&g2, 1);
                pixel_countdown = s->avctx->width;
                while (pixel_countdown > 0) {
                    if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
                        break;
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        palette_idx1 = bytestream2_get_byte(&g2);
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    } else {  /* copy bytes if byte_run < 0 */
                        byte_run = -byte_run;
                        CHECK_PIXEL_PTR(byte_run);
                        if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk)
                            break;
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = bytestream2_get_byte(&g2);
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    }
                }

                y_ptr += s->frame.linesize[0];
            }
            break;

        case FLI_COPY:
            /* copy the chunk (uncompressed frame) */
            if (chunk_size - 6 != s->avctx->width * s->avctx->height) {
                av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
                       "has incorrect size, skipping chunk\n", chunk_size - 6);
                bytestream2_skip(&g2, chunk_size - 6);
            } else {
                for (y_ptr = 0; y_ptr < s->frame.linesize[0] * s->avctx->height;
                     y_ptr += s->frame.linesize[0]) {
                    bytestream2_get_buffer(&g2, &pixels[y_ptr],
                                           s->avctx->width);
                }
            }
            break;

        case FLI_MINI:
            /* some sort of a thumbnail? disregard this chunk... */
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type);
            break;
        }

        if (stream_ptr_after_chunk - bytestream2_tell(&g2) > 0)
            bytestream2_skip(&g2, stream_ptr_after_chunk - bytestream2_tell(&g2));

        frame_size -= chunk_size;
        num_chunks--;
    }

    /* by the end of the chunk, the stream ptr should equal the frame
     * size (minus 1 or 2, possibly); if it doesn't, issue a warning */
    if (bytestream2_get_bytes_left(&g2) > 2)
        av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
               "and final chunk ptr = %d\n", buf_size,
               buf_size - bytestream2_get_bytes_left(&g2));

    /* make the palette available on the way out */
    memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
    if (s->new_palette) {
        s->frame.palette_has_changed = 1;
        s->new_palette = 0;
    }

    *data_size=sizeof(AVFrame);
    *(AVFrame*)data = s->frame;

    return buf_size;
}
Exemple #20
0
static int dds_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DDSContext *ctx = avctx->priv_data;
    GetByteContext *gbc = &ctx->gbc;
    AVFrame *frame = data;
    int mipmap;
    int ret;

    ff_texturedsp_init(&ctx->texdsp);
    bytestream2_init(gbc, avpkt->data, avpkt->size);

    if (bytestream2_get_bytes_left(gbc) < 128) {
        av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n",
               bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    if (bytestream2_get_le32(gbc) != MKTAG('D', 'D', 'S', ' ') ||
            bytestream2_get_le32(gbc) != 124) { // header size
        av_log(avctx, AV_LOG_ERROR, "Invalid DDS header.\n");
        return AVERROR_INVALIDDATA;
    }

    bytestream2_skip(gbc, 4); // flags

    avctx->height = bytestream2_get_le32(gbc);
    avctx->width  = bytestream2_get_le32(gbc);
    ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Since codec is based on 4x4 blocks, size is aligned to 4. */
    avctx->coded_width  = FFALIGN(avctx->width,  TEXTURE_BLOCK_W);
    avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H);

    bytestream2_skip(gbc, 4); // pitch
    bytestream2_skip(gbc, 4); // depth
    mipmap = bytestream2_get_le32(gbc);
    if (mipmap != 0)
        av_log(avctx, AV_LOG_VERBOSE, "Found %d mipmaps (ignored).\n", mipmap);

    /* Extract pixel format information, considering additional elements
     * in reserved1 and reserved2. */
    ret = parse_pixel_format(avctx);
    if (ret < 0)
        return ret;

    ret = ff_get_buffer(avctx, frame, 0);
    if (ret < 0)
        return ret;

    if (ctx->compressed) {
        int size = (avctx->coded_height / TEXTURE_BLOCK_H) *
                   (avctx->coded_width / TEXTURE_BLOCK_W) * ctx->tex_ratio;
        ctx->slice_count = av_clip(avctx->thread_count, 1,
                                   avctx->coded_height / TEXTURE_BLOCK_H);

        if (bytestream2_get_bytes_left(gbc) < size) {
            av_log(avctx, AV_LOG_ERROR,
                   "Compressed Buffer is too small (%d < %d).\n",
                   bytestream2_get_bytes_left(gbc), size);
            return AVERROR_INVALIDDATA;
        }

        /* Use the decompress function on the texture, one block per thread. */
        ctx->tex_data = gbc->buffer;
        avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count);
    } else {
        int linesize = av_image_get_linesize(avctx->pix_fmt, frame->width, 0);

        if (ctx->paletted) {
            int i;
            /* Use the first 1024 bytes as palette, then copy the rest. */
            bytestream2_get_buffer(gbc, frame->data[1], 256 * 4);
            for (i = 0; i < 256; i++)
                AV_WN32(frame->data[1] + i*4,
                        (frame->data[1][2+i*4]<<0)+
                        (frame->data[1][1+i*4]<<8)+
                        (frame->data[1][0+i*4]<<16)+
                        (frame->data[1][3+i*4]<<24)
                       );

            frame->palette_has_changed = 1;
        }

        if (bytestream2_get_bytes_left(gbc) < frame->height * linesize) {
            av_log(avctx, AV_LOG_ERROR, "Buffer is too small (%d < %d).\n",
                   bytestream2_get_bytes_left(gbc), frame->height * linesize);
            return AVERROR_INVALIDDATA;
        }

        av_image_copy_plane(frame->data[0], frame->linesize[0],
                            gbc->buffer, linesize,
                            linesize, frame->height);
    }

    /* Run any post processing here if needed. */
    if (avctx->pix_fmt == AV_PIX_FMT_BGRA ||
            avctx->pix_fmt == AV_PIX_FMT_RGBA ||
            avctx->pix_fmt == AV_PIX_FMT_RGB0 ||
            avctx->pix_fmt == AV_PIX_FMT_BGR0 ||
            avctx->pix_fmt == AV_PIX_FMT_YA8)
        run_postproc(avctx, frame);

    /* Frame is ready to be output. */
    frame->pict_type = AV_PICTURE_TYPE_I;
    frame->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Exemple #21
0
static int dxv_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DXVContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    GetByteContext *gbc = &ctx->gbc;
    int (*decompress_tex)(AVCodecContext *avctx);
    uint32_t tag;
    int channels, size = 0, old_type = 0;
    int ret;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    tag = bytestream2_get_le32(gbc);
    switch (tag) {
    case MKBETAG('D', 'X', 'T', '1'):
        decompress_tex = dxv_decompress_dxt1;
        ctx->tex_funct = ctx->texdsp.dxt1_block;
        ctx->tex_rat   = 8;
        ctx->tex_step  = 8;
        av_log(avctx, AV_LOG_DEBUG, "DXTR1 compression and DXT1 texture ");
        break;
    case MKBETAG('D', 'X', 'T', '5'):
        decompress_tex = dxv_decompress_dxt5;
        ctx->tex_funct = ctx->texdsp.dxt5_block;
        ctx->tex_rat   = 4;
        ctx->tex_step  = 16;
        av_log(avctx, AV_LOG_DEBUG, "DXTR5 compression and DXT5 texture ");
        break;
    case MKBETAG('Y', 'C', 'G', '6'):
    case MKBETAG('Y', 'G', '1', '0'):
        avpriv_report_missing_feature(avctx, "Tag 0x%08X", tag);
        return AVERROR_PATCHWELCOME;
    default:
        /* Old version does not have a real header, just size and type. */
        size = tag & 0x00FFFFFF;
        old_type = tag >> 24;
        channels = old_type & 0x0F;
        if (old_type & 0x40) {
            av_log(avctx, AV_LOG_DEBUG, "LZF compression and DXT5 texture ");
            ctx->tex_funct = ctx->texdsp.dxt5_block;
            ctx->tex_step  = 16;
        } else if (old_type & 0x20) {
            av_log(avctx, AV_LOG_DEBUG, "LZF compression and DXT1 texture ");
            ctx->tex_funct = ctx->texdsp.dxt1_block;
            ctx->tex_step  = 8;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Unsupported header (0x%08X)\n.", tag);
            return AVERROR_INVALIDDATA;
        }
        decompress_tex = dxv_decompress_lzf;
        ctx->tex_rat = 1;
        break;
    }

    /* New header is 12 bytes long. */
    if (!old_type) {
        channels = bytestream2_get_byte(gbc);
        bytestream2_skip(gbc, 3); // unknown
        size = bytestream2_get_le32(gbc);
    }
    av_log(avctx, AV_LOG_DEBUG, "(%d channels)\n", channels);

    if (size != bytestream2_get_bytes_left(gbc)) {
        av_log(avctx, AV_LOG_ERROR, "Incomplete or invalid file (%u > %u)\n.",
               size, bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    ctx->tex_size = avctx->coded_width * avctx->coded_height * 4 / ctx->tex_rat;
    ret = av_reallocp(&ctx->tex_data, ctx->tex_size);
    if (ret < 0)
        return ret;

    /* Decompress texture out of the intermediate compression. */
    ret = decompress_tex(avctx);
    if (ret < 0)
        return ret;

    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    ff_thread_finish_setup(avctx);

    /* Now decompress the texture with the standard functions. */
    avctx->execute2(avctx, decompress_texture_thread,
                    tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output. */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}