Пример #1
0
static void cdxl_decode_ham8(CDXLVideoContext *c)
{
    AVCodecContext *avctx = c->avctx;
    uint32_t new_palette[64], r, g, b;
    uint8_t *ptr, *out, index, op;
    int x, y;

    ptr = c->new_video;
    out = c->frame.data[0];

    import_palette(c, new_palette);
    import_format(c, avctx->width, c->new_video);

    for (y = 0; y < avctx->height; y++) {
        r = new_palette[0] & 0xFF0000;
        g = new_palette[0] & 0xFF00;
        b = new_palette[0] & 0xFF;
        for (x = 0; x < avctx->width; x++) {
            index  = *ptr++;
            op     = index >> 6;
            index &= 63;
            switch (op) {
            case 0:
                r = new_palette[index] & 0xFF0000;
                g = new_palette[index] & 0xFF00;
                b = new_palette[index] & 0xFF;
                break;
            case 1:
                b = (index <<  2) | (b & 3);
                break;
            case 2:
                r = (index << 18) | (r & (3 << 16));
                break;
            case 3:
                g = (index << 10) | (g & (3 << 8));
                break;
            }
            AV_WL24(out + x * 3, r | g | b);
        }
        out += c->frame.linesize[0];
    }
}
Пример #2
0
static void cdxl_decode_ham6(CDXLVideoContext *c, AVFrame *frame)
{
    AVCodecContext *avctx = c->avctx;
    uint32_t new_palette[16], r, g, b;
    uint8_t *ptr, *out, index, op;
    int x, y;

    ptr = c->new_video;
    out = frame->data[0];

    import_palette(c, new_palette);
    import_format(c, avctx->width, c->new_video);

    for (y = 0; y < avctx->height; y++) {
        r = new_palette[0] & 0xFF0000;
        g = new_palette[0] & 0xFF00;
        b = new_palette[0] & 0xFF;
        for (x = 0; x < avctx->width; x++) {
            index  = *ptr++;
            op     = index >> 4;
            index &= 15;
            switch (op) {
            case 0:
                r = new_palette[index] & 0xFF0000;
                g = new_palette[index] & 0xFF00;
                b = new_palette[index] & 0xFF;
                break;
            case 1:
                b = index * 0x11;
                break;
            case 2:
                r = index * 0x11 << 16;
                break;
            case 3:
                g = index * 0x11 << 8;
                break;
            }
            AV_WL24(out + x * 3, r | g | b);
        }
        out += frame->linesize[0];
    }
}
Пример #3
0
static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                              const AVFrame *p, int *got_packet)
{
    int bpp, picsize, datasize = -1, ret, i;
    uint8_t *out;

    if(avctx->width > 0xffff || avctx->height > 0xffff) {
        av_log(avctx, AV_LOG_ERROR, "image dimensions too large\n");
        return AVERROR(EINVAL);
    }
    picsize = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
    if ((ret = ff_alloc_packet2(avctx, pkt, picsize + 45)) < 0)
        return ret;

    /* zero out the header and only set applicable fields */
    memset(pkt->data, 0, 12);
    AV_WL16(pkt->data+12, avctx->width);
    AV_WL16(pkt->data+14, avctx->height);
    /* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */
    pkt->data[17] = 0x20 | (avctx->pix_fmt == PIX_FMT_BGRA ? 8 : 0);

    out = pkt->data + 18;  /* skip past the header we write */

    avctx->bits_per_coded_sample = av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]);
    switch(avctx->pix_fmt) {
    case PIX_FMT_PAL8:
        pkt->data[1]  = 1;          /* palette present */
        pkt->data[2]  = TGA_PAL;    /* uncompressed palettised image */
        pkt->data[6]  = 1;          /* palette contains 256 entries */
        pkt->data[7]  = 24;         /* palette contains 24 bit entries */
        pkt->data[16] = 8;          /* bpp */
        for (i = 0; i < 256; i++)
            AV_WL24(pkt->data + 18 + 3 * i, *(uint32_t *)(p->data[1] + i * 4));
        out += 256 * 3;             /* skip past the palette we just output */
        break;
    case PIX_FMT_GRAY8:
        pkt->data[2]  = TGA_BW;     /* uncompressed grayscale image */
        avctx->bits_per_coded_sample = 0x28;
        pkt->data[16] = 8;          /* bpp */
        break;
    case PIX_FMT_RGB555LE:
        pkt->data[2]  = TGA_RGB;    /* uncompresses true-color image */
        avctx->bits_per_coded_sample =
        pkt->data[16] = 16;         /* bpp */
        break;
    case PIX_FMT_BGR24:
        pkt->data[2]  = TGA_RGB;    /* uncompressed true-color image */
        pkt->data[16] = 24;         /* bpp */
        break;
    case PIX_FMT_BGRA:
        pkt->data[2]  = TGA_RGB;    /* uncompressed true-color image */
        pkt->data[16] = 32;         /* bpp */
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n",
               av_get_pix_fmt_name(avctx->pix_fmt));
        return AVERROR(EINVAL);
    }
    bpp = pkt->data[16] >> 3;

    /* try RLE compression */
    if (avctx->coder_type != FF_CODER_TYPE_RAW)
        datasize = targa_encode_rle(out, picsize, p, bpp, avctx->width, avctx->height);

    /* if that worked well, mark the picture as RLE compressed */
    if(datasize >= 0)
        pkt->data[2] |= 8;

    /* if RLE didn't make it smaller, go back to no compression */
    else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height);

    out += datasize;

    /* The standard recommends including this section, even if we don't use
     * any of the features it affords. TODO: take advantage of the pixel
     * aspect ratio and encoder ID fields available? */
    memcpy(out, "\0\0\0\0\0\0\0\0TRUEVISION-XFILE.", 26);

    pkt->size   = out + 26 - pkt->data;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
static void super2xsai(AVFilterContext *ctx,
                       uint8_t *src, int src_linesize,
                       uint8_t *dst, int dst_linesize,
                       int width, int height)
{
    Super2xSaIContext *sai = ctx->priv;
    unsigned int x, y;
    uint32_t color[4][4];
    unsigned char *src_line[4];
    const int bpp = sai->bpp;
    const uint32_t hi_pixel_mask = sai->hi_pixel_mask;
    const uint32_t lo_pixel_mask = sai->lo_pixel_mask;
    const uint32_t q_hi_pixel_mask = sai->q_hi_pixel_mask;
    const uint32_t q_lo_pixel_mask = sai->q_lo_pixel_mask;

    /* Point to the first 4 lines, first line is duplicated */
    src_line[0] = src;
    src_line[1] = src;
    src_line[2] = src + src_linesize*FFMIN(1, height-1);
    src_line[3] = src + src_linesize*FFMIN(2, height-1);

#define READ_COLOR4(dst, src_line, off) dst = *((const uint32_t *)src_line + off)
#define READ_COLOR3(dst, src_line, off) dst = AV_RL24 (src_line + 3*off)
#define READ_COLOR2(dst, src_line, off) dst = sai->is_be ? AV_RB16(src_line + 2 * off) : AV_RL16(src_line + 2 * off)

    for (y = 0; y < height; y++) {
        uint8_t *dst_line[2];

        dst_line[0] = dst + dst_linesize*2*y;
        dst_line[1] = dst + dst_linesize*(2*y+1);

        switch (bpp) {
        case 4:
            READ_COLOR4(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR4(color[0][2], src_line[0], 1); READ_COLOR4(color[0][3], src_line[0], 2);
            READ_COLOR4(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR4(color[1][2], src_line[1], 1); READ_COLOR4(color[1][3], src_line[1], 2);
            READ_COLOR4(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR4(color[2][2], src_line[2], 1); READ_COLOR4(color[2][3], src_line[2], 2);
            READ_COLOR4(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR4(color[3][2], src_line[3], 1); READ_COLOR4(color[3][3], src_line[3], 2);
            break;
        case 3:
            READ_COLOR3(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR3(color[0][2], src_line[0], 1); READ_COLOR3(color[0][3], src_line[0], 2);
            READ_COLOR3(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR3(color[1][2], src_line[1], 1); READ_COLOR3(color[1][3], src_line[1], 2);
            READ_COLOR3(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR3(color[2][2], src_line[2], 1); READ_COLOR3(color[2][3], src_line[2], 2);
            READ_COLOR3(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR3(color[3][2], src_line[3], 1); READ_COLOR3(color[3][3], src_line[3], 2);
            break;
        default:
            READ_COLOR2(color[0][0], src_line[0], 0); color[0][1] = color[0][0]; READ_COLOR2(color[0][2], src_line[0], 1); READ_COLOR2(color[0][3], src_line[0], 2);
            READ_COLOR2(color[1][0], src_line[1], 0); color[1][1] = color[1][0]; READ_COLOR2(color[1][2], src_line[1], 1); READ_COLOR2(color[1][3], src_line[1], 2);
            READ_COLOR2(color[2][0], src_line[2], 0); color[2][1] = color[2][0]; READ_COLOR2(color[2][2], src_line[2], 1); READ_COLOR2(color[2][3], src_line[2], 2);
            READ_COLOR2(color[3][0], src_line[3], 0); color[3][1] = color[3][0]; READ_COLOR2(color[3][2], src_line[3], 1); READ_COLOR2(color[3][3], src_line[3], 2);
        }

        for (x = 0; x < width; x++) {
            uint32_t product1a, product1b, product2a, product2b;

//---------------------------------------  B0 B1 B2 B3    0  1  2  3
//                                         4  5* 6  S2 -> 4  5* 6  7
//                                         1  2  3  S1    8  9 10 11
//                                         A0 A1 A2 A3   12 13 14 15
//--------------------------------------
            if (color[2][1] == color[1][2] && color[1][1] != color[2][2]) {
                product2b = color[2][1];
                product1b = product2b;
            } else if (color[1][1] == color[2][2] && color[2][1] != color[1][2]) {
                product2b = color[1][1];
                product1b = product2b;
            } else if (color[1][1] == color[2][2] && color[2][1] == color[1][2]) {
                int r = 0;

                r += GET_RESULT(color[1][2], color[1][1], color[1][0], color[3][1]);
                r += GET_RESULT(color[1][2], color[1][1], color[2][0], color[0][1]);
                r += GET_RESULT(color[1][2], color[1][1], color[3][2], color[2][3]);
                r += GET_RESULT(color[1][2], color[1][1], color[0][2], color[1][3]);

                if (r > 0)
                    product1b = color[1][2];
                else if (r < 0)
                    product1b = color[1][1];
                else
                    product1b = INTERPOLATE(color[1][1], color[1][2]);

                product2b = product1b;
            } else {
                if (color[1][2] == color[2][2] && color[2][2] == color[3][1] && color[2][1] != color[3][2] && color[2][2] != color[3][0])
                    product2b = Q_INTERPOLATE(color[2][2], color[2][2], color[2][2], color[2][1]);
                else if (color[1][1] == color[2][1] && color[2][1] == color[3][2] && color[3][1] != color[2][2] && color[2][1] != color[3][3])
                    product2b = Q_INTERPOLATE(color[2][1], color[2][1], color[2][1], color[2][2]);
                else
                    product2b = INTERPOLATE(color[2][1], color[2][2]);

                if (color[1][2] == color[2][2] && color[1][2] == color[0][1] && color[1][1] != color[0][2] && color[1][2] != color[0][0])
                    product1b = Q_INTERPOLATE(color[1][2], color[1][2], color[1][2], color[1][1]);
                else if (color[1][1] == color[2][1] && color[1][1] == color[0][2] && color[0][1] != color[1][2] && color[1][1] != color[0][3])
                    product1b = Q_INTERPOLATE(color[1][2], color[1][1], color[1][1], color[1][1]);
                else
                    product1b = INTERPOLATE(color[1][1], color[1][2]);
            }

            if (color[1][1] == color[2][2] && color[2][1] != color[1][2] && color[1][0] == color[1][1] && color[1][1] != color[3][2])
                product2a = INTERPOLATE(color[2][1], color[1][1]);
            else if (color[1][1] == color[2][0] && color[1][2] == color[1][1] && color[1][0] != color[2][1] && color[1][1] != color[3][0])
                product2a = INTERPOLATE(color[2][1], color[1][1]);
            else
                product2a = color[2][1];

            if (color[2][1] == color[1][2] && color[1][1] != color[2][2] && color[2][0] == color[2][1] && color[2][1] != color[0][2])
                product1a = INTERPOLATE(color[2][1], color[1][1]);
            else if (color[1][0] == color[2][1] && color[2][2] == color[2][1] && color[2][0] != color[1][1] && color[2][1] != color[0][0])
                product1a = INTERPOLATE(color[2][1], color[1][1]);
            else
                product1a = color[1][1];

            /* Set the calculated pixels */
            switch (bpp) {
            case 4:
                AV_WN32A(dst_line[0] + x * 8,     product1a);
                AV_WN32A(dst_line[0] + x * 8 + 4, product1b);
                AV_WN32A(dst_line[1] + x * 8,     product2a);
                AV_WN32A(dst_line[1] + x * 8 + 4, product2b);
                break;
            case 3:
                AV_WL24(dst_line[0] + x * 6,     product1a);
                AV_WL24(dst_line[0] + x * 6 + 3, product1b);
                AV_WL24(dst_line[1] + x * 6,     product2a);
                AV_WL24(dst_line[1] + x * 6 + 3, product2b);
                break;
            default: // bpp = 2
                if (sai->is_be) {
                    AV_WB32(dst_line[0] + x * 4, product1a | (product1b << 16));
                    AV_WB32(dst_line[1] + x * 4, product2a | (product2b << 16));
                } else {
                    AV_WL32(dst_line[0] + x * 4, product1a | (product1b << 16));
                    AV_WL32(dst_line[1] + x * 4, product2a | (product2b << 16));
                }
            }

            /* Move color matrix forward */
            color[0][0] = color[0][1]; color[0][1] = color[0][2]; color[0][2] = color[0][3];
            color[1][0] = color[1][1]; color[1][1] = color[1][2]; color[1][2] = color[1][3];
            color[2][0] = color[2][1]; color[2][1] = color[2][2]; color[2][2] = color[2][3];
            color[3][0] = color[3][1]; color[3][1] = color[3][2]; color[3][2] = color[3][3];

            if (x < width - 3) {
                x += 3;
                switch (bpp) {
                case 4:
                    READ_COLOR4(color[0][3], src_line[0], x);
                    READ_COLOR4(color[1][3], src_line[1], x);
                    READ_COLOR4(color[2][3], src_line[2], x);
                    READ_COLOR4(color[3][3], src_line[3], x);
                    break;
                case 3:
                    READ_COLOR3(color[0][3], src_line[0], x);
                    READ_COLOR3(color[1][3], src_line[1], x);
                    READ_COLOR3(color[2][3], src_line[2], x);
                    READ_COLOR3(color[3][3], src_line[3], x);
                    break;
                default:        /* case 2 */
                    READ_COLOR2(color[0][3], src_line[0], x);
                    READ_COLOR2(color[1][3], src_line[1], x);
                    READ_COLOR2(color[2][3], src_line[2], x);
                    READ_COLOR2(color[3][3], src_line[3], x);
                }
                x -= 3;
            }
        }

        /* We're done with one line, so we shift the source lines up */
        src_line[0] = src_line[1];
        src_line[1] = src_line[2];
        src_line[2] = src_line[3];

        /* Read next line */
        src_line[3] = src_line[2];
        if (y < height - 3)
            src_line[3] += src_linesize;
    } // y loop
}
Пример #5
0
static int flic_decode_frame_24BPP(AVCodecContext *avctx,
                                   void *data, int *got_frame,
                                   const uint8_t *buf, int buf_size)
{
    FlicDecodeContext *s = avctx->priv_data;

    GetByteContext g2;
    int pixel_ptr;
    unsigned char palette_idx1;

    unsigned int frame_size;
    int num_chunks;

    unsigned int chunk_size;
    int chunk_type;

    int i, j, ret;

    int lines;
    int compressed_lines;
    signed short line_packets;
    int y_ptr;
    int byte_run;
    int pixel_skip;
    int pixel_countdown;
    unsigned char *pixels;
    int pixel;
    unsigned int pixel_limit;

    bytestream2_init(&g2, buf, buf_size);

    if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
        return ret;

    pixels = s->frame->data[0];
    pixel_limit = s->avctx->height * s->frame->linesize[0];

    frame_size = bytestream2_get_le32(&g2);
    bytestream2_skip(&g2, 2);  /* skip the magic number */
    num_chunks = bytestream2_get_le16(&g2);
    bytestream2_skip(&g2, 8);  /* skip padding */
    if (frame_size > buf_size)
        frame_size = buf_size;

    if (frame_size < 16)
        return AVERROR_INVALIDDATA;
    frame_size -= 16;

    /* iterate through the chunks */
    while ((frame_size > 0) && (num_chunks > 0) &&
            bytestream2_get_bytes_left(&g2) >= 4) {
        int stream_ptr_after_chunk;
        chunk_size = bytestream2_get_le32(&g2);
        if (chunk_size > frame_size) {
            av_log(avctx, AV_LOG_WARNING,
                   "Invalid chunk_size = %u > frame_size = %u\n", chunk_size, frame_size);
            chunk_size = frame_size;
        }
        stream_ptr_after_chunk = bytestream2_tell(&g2) - 4 + chunk_size;

        chunk_type = bytestream2_get_le16(&g2);


        switch (chunk_type) {
        case FLI_256_COLOR:
        case FLI_COLOR:
            /* For some reason, it seems that non-palettized flics do
             * include one of these chunks in their first frame.
             * Why I do not know, it seems rather extraneous. */
            ff_dlog(avctx,
                    "Unexpected Palette chunk %d in non-palettized FLC\n",
                    chunk_type);
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_DELTA:
        case FLI_DTA_LC:
            y_ptr = 0;
            compressed_lines = bytestream2_get_le16(&g2);
            while (compressed_lines > 0) {
                if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk)
                    break;
                if (y_ptr > pixel_limit)
                    return AVERROR_INVALIDDATA;
                line_packets = bytestream2_get_le16(&g2);
                if (line_packets < 0) {
                    line_packets = -line_packets;
                    if (line_packets > s->avctx->height)
                        return AVERROR_INVALIDDATA;
                    y_ptr += line_packets * s->frame->linesize[0];
                } else {
                    compressed_lines--;
                    pixel_ptr = y_ptr;
                    CHECK_PIXEL_PTR(0);
                    pixel_countdown = s->avctx->width;
                    for (i = 0; i < line_packets; i++) {
                        /* account for the skip bytes */
                        if (bytestream2_tell(&g2) + 2 > stream_ptr_after_chunk)
                            break;
                        pixel_skip = bytestream2_get_byte(&g2);
                        pixel_ptr += (pixel_skip*3); /* Pixel is 3 bytes wide */
                        pixel_countdown -= pixel_skip;
                        byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                        if (byte_run < 0) {
                            byte_run = -byte_run;
                            pixel    = bytestream2_get_le24(&g2);
                            CHECK_PIXEL_PTR(3 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown -= 1) {
                                AV_WL24(&pixels[pixel_ptr], pixel);
                                pixel_ptr += 3;
                            }
                        } else {
                            if (bytestream2_tell(&g2) + 2*byte_run > stream_ptr_after_chunk)
                                break;
                            CHECK_PIXEL_PTR(2 * byte_run);
                            for (j = 0; j < byte_run; j++, pixel_countdown--) {
                                pixel = bytestream2_get_le24(&g2);
                                AV_WL24(&pixels[pixel_ptr], pixel);
                                pixel_ptr += 3;
                            }
                        }
                    }

                    y_ptr += s->frame->linesize[0];
                }
            }
            break;

        case FLI_LC:
            av_log(avctx, AV_LOG_ERROR, "Unexpected FLI_LC chunk in non-palettized FLC\n");
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        case FLI_BLACK:
            /* set the whole frame to 0x00 which is black for 24 bit mode. */
            memset(pixels, 0x00,
                   s->frame->linesize[0] * s->avctx->height);
            break;

        case FLI_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = (s->avctx->width * 3);

                while (pixel_countdown > 0) {
                    if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
                        break;
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        palette_idx1 = bytestream2_get_byte(&g2);
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) (linea%d)\n",
                                       pixel_countdown, lines);
                        }
                    } else {  /* copy bytes if byte_run < 0 */
                        byte_run = -byte_run;
                        if (bytestream2_tell(&g2) + byte_run > stream_ptr_after_chunk)
                            break;
                        CHECK_PIXEL_PTR(byte_run);
                        for (j = 0; j < byte_run; j++) {
                            palette_idx1 = bytestream2_get_byte(&g2);
                            pixels[pixel_ptr++] = palette_idx1;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d) at line %d\n",
                                       pixel_countdown, lines);
                        }
                    }
                }

                y_ptr += s->frame->linesize[0];
            }
            break;

        case FLI_DTA_BRUN:
            y_ptr = 0;
            for (lines = 0; lines < s->avctx->height; lines++) {
                pixel_ptr = y_ptr;
                /* disregard the line packets; instead, iterate through all
                 * pixels on a row */
                bytestream2_skip(&g2, 1);
                pixel_countdown = s->avctx->width; /* Width is in pixels, not bytes */

                while (pixel_countdown > 0) {
                    if (bytestream2_tell(&g2) + 1 > stream_ptr_after_chunk)
                        break;
                    byte_run = sign_extend(bytestream2_get_byte(&g2), 8);
                    if (byte_run > 0) {
                        pixel = bytestream2_get_le24(&g2);
                        CHECK_PIXEL_PTR(3 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            AV_WL24(pixels + pixel_ptr, pixel);
                            pixel_ptr += 3;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    } else {  /* copy pixels if byte_run < 0 */
                        byte_run = -byte_run;
                        if (bytestream2_tell(&g2) + 3 * byte_run > stream_ptr_after_chunk)
                            break;
                        CHECK_PIXEL_PTR(3 * byte_run);
                        for (j = 0; j < byte_run; j++) {
                            pixel = bytestream2_get_le24(&g2);
                            AV_WL24(pixels + pixel_ptr, pixel);
                            pixel_ptr  += 3;
                            pixel_countdown--;
                            if (pixel_countdown < 0)
                                av_log(avctx, AV_LOG_ERROR, "pixel_countdown < 0 (%d)\n",
                                       pixel_countdown);
                        }
                    }
                }

                y_ptr += s->frame->linesize[0];
            }
            break;

        case FLI_COPY:
        case FLI_DTA_COPY:
            /* copy the chunk (uncompressed frame) */
            if (chunk_size - 6 > (unsigned int)(FFALIGN(s->avctx->width, 2) * s->avctx->height)*3) {
                av_log(avctx, AV_LOG_ERROR, "In chunk FLI_COPY : source data (%d bytes) " \
                       "bigger than image, skipping chunk\n", chunk_size - 6);
                bytestream2_skip(&g2, chunk_size - 6);
            } else {
                for (y_ptr = 0; y_ptr < s->frame->linesize[0] * s->avctx->height;
                     y_ptr += s->frame->linesize[0]) {

                    pixel_countdown = s->avctx->width;
                    pixel_ptr = 0;
                    while (pixel_countdown > 0) {
                        pixel = bytestream2_get_le24(&g2);
                        AV_WL24(&pixels[y_ptr + pixel_ptr], pixel);
                        pixel_ptr += 3;
                        pixel_countdown--;
                    }
                    if (s->avctx->width & 1)
                        bytestream2_skip(&g2, 3);
                }
            }
            break;

        case FLI_MINI:
            /* some sort of a thumbnail? disregard this chunk... */
            bytestream2_skip(&g2, chunk_size - 6);
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unrecognized chunk type: %d\n", chunk_type);
            break;
        }

        frame_size -= chunk_size;
        num_chunks--;
    }

    /* by the end of the chunk, the stream ptr should equal the frame
     * size (minus 1, possibly); if it doesn't, issue a warning */
    if ((bytestream2_get_bytes_left(&g2) != 0) && (bytestream2_get_bytes_left(&g2) != 1))
        av_log(avctx, AV_LOG_ERROR, "Processed FLI chunk where chunk size = %d " \
               "and final chunk ptr = %d\n", buf_size, bytestream2_tell(&g2));

    if ((ret = av_frame_ref(data, s->frame)) < 0)
        return ret;

    *got_frame = 1;

    return buf_size;
}