Esempio n. 1
0
static int hap_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    HapContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    int ret, length;
    int blocks = avctx->coded_width * avctx->coded_height / (TEXTURE_BLOCK_W * TEXTURE_BLOCK_H);

    bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);

    /* Check for section header */
    length = parse_section_header(avctx);
    if (length < 0) {
        av_log(avctx, AV_LOG_ERROR, "Frame is too small.\n");
        return length;
    }

    /* Prepare the texture buffer and decompress function */
    ret = setup_texture(avctx, length);
    if (ret < 0)
        return ret;

    /* Get the output frame ready to receive data */
    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    if (avctx->codec->update_thread_context)
        ff_thread_finish_setup(avctx);

    /* Use the decompress function on the texture, one block per thread */
    avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, blocks);

    /* Frame is ready to be output */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Esempio n. 2
0
static int init_image(TiffContext *s, ThreadFrame *frame)
{
    int i, ret;
    uint32_t *pal;

    switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) {
    case 11:
        if (!s->palette_is_set) {
            s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
            break;
        }
    case 21:
    case 41:
    case 81:
        s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
        break;
    case 243:
        if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
            if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
            } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
            } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
            } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
            } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
            } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
            } else {
                av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
                return AVERROR_PATCHWELCOME;
            }
        } else
            s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
        break;
    case 161:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
        break;
    case 162:
        s->avctx->pix_fmt = AV_PIX_FMT_GRAY8A;
        break;
    case 324:
        s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
        break;
    case 483:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE  : AV_PIX_FMT_RGB48BE;
        break;
    case 644:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE  : AV_PIX_FMT_RGBA64BE;
        break;
    case 1243:
        s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
        break;
    case 1324:
        s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
        break;
    case 1483:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
        break;
    case 1644:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
        break;
    default:
        av_log(s->avctx, AV_LOG_ERROR,
               "This format is not supported (bpp=%d, bppcount=%d)\n",
               s->bpp, s->bppcount);
        return AVERROR_INVALIDDATA;
    }
    if (s->width != s->avctx->width || s->height != s->avctx->height) {
        ret = ff_set_dimensions(s->avctx, s->width, s->height);
        if (ret < 0)
            return ret;
    }
    if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
        return ret;
    if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
        if (s->palette_is_set) {
            memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
        } else {
            /* make default grayscale pal */
            pal = (uint32_t *) frame->f->data[1];
            for (i = 0; i < 1<<s->bpp; i++)
                pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
        }
    }
    return 0;
}
Esempio n. 3
0
static int flac_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame_ptr, AVPacket *avpkt)
{
    AVFrame *frame     = data;
    ThreadFrame tframe = { .f = data };
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    FLACContext *s = avctx->priv_data;
    int bytes_read = 0;
    int ret;

    *got_frame_ptr = 0;

    if (s->max_framesize == 0) {
        s->max_framesize =
            ff_flac_get_max_frame_size(s->max_blocksize ? s->max_blocksize : FLAC_MAX_BLOCKSIZE,
                                       FLAC_MAX_CHANNELS, 32);
    }

    if (buf_size > 5 && !memcmp(buf, "\177FLAC", 5)) {
        av_log(s->avctx, AV_LOG_DEBUG, "skipping flac header packet 1\n");
        return buf_size;
    }

    if (buf_size > 0 && (*buf & 0x7F) == FLAC_METADATA_TYPE_VORBIS_COMMENT) {
        av_log(s->avctx, AV_LOG_DEBUG, "skipping vorbis comment\n");
        return buf_size;
    }

    /* check that there is at least the smallest decodable amount of data.
       this amount corresponds to the smallest valid FLAC frame possible.
       FF F8 69 02 00 00 9A 00 00 34 46 */
    if (buf_size < FLAC_MIN_FRAME_SIZE)
        return buf_size;

    /* check for inline header */
    if (AV_RB32(buf) == MKBETAG('f','L','a','C')) {
        if (!s->got_streaminfo && (ret = parse_streaminfo(s, buf, buf_size))) {
            av_log(s->avctx, AV_LOG_ERROR, "invalid header\n");
            return ret;
        }
        return get_metadata_size(buf, buf_size);
    }

    /* decode frame */
    if ((ret = init_get_bits8(&s->gb, buf, buf_size)) < 0)
        return ret;
    if ((ret = decode_frame(s)) < 0) {
        av_log(s->avctx, AV_LOG_ERROR, "decode_frame() failed\n");
        return ret;
    }
    bytes_read = get_bits_count(&s->gb)/8;

    if ((s->avctx->err_recognition & (AV_EF_CRCCHECK|AV_EF_COMPLIANT)) &&
        av_crc(av_crc_get_table(AV_CRC_16_ANSI),
               0, buf, bytes_read)) {
        av_log(s->avctx, AV_LOG_ERROR, "CRC error at PTS %"PRId64"\n", avpkt->pts);
        if (s->avctx->err_recognition & AV_EF_EXPLODE)
            return AVERROR_INVALIDDATA;
    }

    /* get output buffer */
    frame->nb_samples = s->blocksize;
    if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
        return ret;

    s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded, s->channels,
                                   s->blocksize, s->sample_shift);

    if (bytes_read > buf_size) {
        av_log(s->avctx, AV_LOG_ERROR, "overread: %d\n", bytes_read - buf_size);
        return AVERROR_INVALIDDATA;
    }
    if (bytes_read < buf_size) {
        av_log(s->avctx, AV_LOG_DEBUG, "underread: %d orig size: %d\n",
               buf_size - bytes_read, buf_size);
    }

    *got_frame_ptr = 1;

    return bytes_read;
}
Esempio n. 4
0
static int hap_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    HapContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    int ret, i;
    int tex_size;

    bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);

    /* Check for section header */
    ret = hap_parse_frame_header(avctx);
    if (ret < 0)
        return ret;

    /* Get the output frame ready to receive data */
    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    if (avctx->codec->update_thread_context)
        ff_thread_finish_setup(avctx);

    /* Unpack the DXT texture */
    if (hap_can_use_tex_in_place(ctx)) {
        /* Only DXTC texture compression in a contiguous block */
        ctx->tex_data = ctx->gbc.buffer;
        tex_size = bytestream2_get_bytes_left(&ctx->gbc);
    } else {
        /* Perform the second-stage decompression */
        ret = av_reallocp(&ctx->tex_buf, ctx->tex_size);
        if (ret < 0)
            return ret;

        avctx->execute2(avctx, decompress_chunks_thread, NULL,
                        ctx->chunk_results, ctx->chunk_count);

        for (i = 0; i < ctx->chunk_count; i++) {
            if (ctx->chunk_results[i] < 0)
                return ctx->chunk_results[i];
        }

        ctx->tex_data = ctx->tex_buf;
        tex_size = ctx->tex_size;
    }

    if (tex_size < (avctx->coded_width  / TEXTURE_BLOCK_W)
                  *(avctx->coded_height / TEXTURE_BLOCK_H)
                  *ctx->tex_rat) {
        av_log(avctx, AV_LOG_ERROR, "Insufficient data\n");
        return AVERROR_INVALIDDATA;
    }

    /* Use the decompress function on the texture, one block per thread */
    avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Esempio n. 5
0
File: dxv.c Progetto: AVLeo/libav
static int dxv_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DXVContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    GetByteContext *gbc = &ctx->gbc;
    int (*decompress_tex)(AVCodecContext *avctx);
    const char *msgcomp, *msgtext;
    uint32_t tag;
    int version_major, version_minor = 0;
    int size = 0, old_type = 0;
    int ret;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    tag = bytestream2_get_le32(gbc);
    switch (tag) {
    case MKBETAG('D', 'X', 'T', '1'):
        decompress_tex = dxv_decompress_dxt1;
        ctx->tex_funct = ctx->texdsp.dxt1_block;
        ctx->tex_rat   = 8;
        ctx->tex_step  = 8;
        msgcomp = "DXTR1";
        msgtext = "DXT1";
        break;
    case MKBETAG('D', 'X', 'T', '5'):
        decompress_tex = dxv_decompress_dxt5;
        ctx->tex_funct = ctx->texdsp.dxt5_block;
        ctx->tex_rat   = 4;
        ctx->tex_step  = 16;
        msgcomp = "DXTR5";
        msgtext = "DXT5";
        break;
    case MKBETAG('Y', 'C', 'G', '6'):
    case MKBETAG('Y', 'G', '1', '0'):
        avpriv_report_missing_feature(avctx, "Tag 0x%08X", tag);
        return AVERROR_PATCHWELCOME;
    default:
        /* Old version does not have a real header, just size and type. */
        size = tag & 0x00FFFFFF;
        old_type = tag >> 24;
        version_major = (old_type & 0x0F) - 1;

        if (old_type & 0x80) {
            msgcomp = "RAW";
            decompress_tex = dxv_decompress_raw;
        } else {
            msgcomp = "LZF";
            decompress_tex = dxv_decompress_lzf;
        }

        if (old_type & 0x40) {
            msgtext = "DXT5";

            ctx->tex_funct = ctx->texdsp.dxt5_block;
            ctx->tex_step  = 16;
        } else if (old_type & 0x20 || version_major == 1) {
            msgtext = "DXT1";

            ctx->tex_funct = ctx->texdsp.dxt1_block;
            ctx->tex_step  = 8;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Unsupported header (0x%08X)\n.", tag);
            return AVERROR_INVALIDDATA;
        }
        ctx->tex_rat = 1;
        break;
    }

    /* New header is 12 bytes long. */
    if (!old_type) {
        version_major = bytestream2_get_byte(gbc) - 1;
        version_minor = bytestream2_get_byte(gbc);

        /* Encoder copies texture data when compression is not advantageous. */
        if (bytestream2_get_byte(gbc)) {
            msgcomp = "RAW";
            ctx->tex_rat = 1;
            decompress_tex = dxv_decompress_raw;
        }

        bytestream2_skip(gbc, 1); // unknown
        size = bytestream2_get_le32(gbc);
    }
    av_log(avctx, AV_LOG_DEBUG,
           "%s compression with %s texture (version %d.%d)\n",
           msgcomp, msgtext, version_major, version_minor);

    if (size != bytestream2_get_bytes_left(gbc)) {
        av_log(avctx, AV_LOG_ERROR,
               "Incomplete or invalid file (header %d, left %d).\n",
               size, bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    ctx->tex_size = avctx->coded_width * avctx->coded_height * 4 / ctx->tex_rat;
    ret = av_reallocp(&ctx->tex_data, ctx->tex_size);
    if (ret < 0)
        return ret;

    /* Decompress texture out of the intermediate compression. */
    ret = decompress_tex(avctx);
    if (ret < 0)
        return ret;

    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    ff_thread_finish_setup(avctx);

    /* Now decompress the texture with the standard functions. */
    avctx->execute2(avctx, decompress_texture_thread,
                    tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output. */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Esempio n. 6
0
static int magy_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
{
    MagicYUVContext *s = avctx->priv_data;
    ThreadFrame frame = { .f = data };
    AVFrame *p = data;
    GetByteContext gbyte;
    GetBitContext gbit;
    uint32_t first_offset, offset, next_offset, header_size, slice_width;
    int width, height, format, version, table_size;
    int ret, i, j;

    bytestream2_init(&gbyte, avpkt->data, avpkt->size);
    if (bytestream2_get_le32(&gbyte) != MKTAG('M', 'A', 'G', 'Y'))
        return AVERROR_INVALIDDATA;

    header_size = bytestream2_get_le32(&gbyte);
    if (header_size < 32 || header_size >= avpkt->size) {
        av_log(avctx, AV_LOG_ERROR,
               "header or packet too small %"PRIu32"\n", header_size);
        return AVERROR_INVALIDDATA;
    }

    version = bytestream2_get_byte(&gbyte);
    if (version != 7) {
        avpriv_request_sample(avctx, "Version %d", version);
        return AVERROR_PATCHWELCOME;
    }

    s->hshift[1] =
    s->vshift[1] =
    s->hshift[2] =
    s->vshift[2] = 0;
    s->decorrelate = 0;
    s->max = 256;
    s->huff_build = huff_build;
    s->magy_decode_slice = magy_decode_slice;

    format = bytestream2_get_byte(&gbyte);
    switch (format) {
    case 0x65:
        avctx->pix_fmt = AV_PIX_FMT_GBRP;
        s->decorrelate = 1;
        break;
    case 0x66:
        avctx->pix_fmt = AV_PIX_FMT_GBRAP;
        s->decorrelate = 1;
        break;
    case 0x67:
        avctx->pix_fmt = AV_PIX_FMT_YUV444P;
        break;
    case 0x68:
        avctx->pix_fmt = AV_PIX_FMT_YUV422P;
        s->hshift[1] =
        s->hshift[2] = 1;
        break;
    case 0x69:
        avctx->pix_fmt = AV_PIX_FMT_YUV420P;
        s->hshift[1] =
        s->vshift[1] =
        s->hshift[2] =
        s->vshift[2] = 1;
        break;
    case 0x6a:
        avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
        break;
    case 0x6b:
        avctx->pix_fmt = AV_PIX_FMT_GRAY8;
        break;
    case 0x6c:
        avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
        s->hshift[1] =
        s->hshift[2] = 1;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    case 0x6d:
        avctx->pix_fmt = AV_PIX_FMT_GBRP10;
        s->decorrelate = 1;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    case 0x6e:
        avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
        s->decorrelate = 1;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    case 0x73:
        avctx->pix_fmt = AV_PIX_FMT_GRAY10;
        s->max = 1024;
        s->huff_build = huff_build10;
        s->magy_decode_slice = magy_decode_slice10;
        break;
    default:
        avpriv_request_sample(avctx, "Format 0x%X", format);
        return AVERROR_PATCHWELCOME;
    }
    s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);

    bytestream2_skip(&gbyte, 2);
    s->interlaced = !!(bytestream2_get_byte(&gbyte) & 2);
    bytestream2_skip(&gbyte, 3);

    width  = bytestream2_get_le32(&gbyte);
    height = bytestream2_get_le32(&gbyte);
    ret = ff_set_dimensions(avctx, width, height);
    if (ret < 0)
        return ret;

    slice_width = bytestream2_get_le32(&gbyte);
    if (slice_width != avctx->coded_width) {
        avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
        return AVERROR_PATCHWELCOME;
    }
    s->slice_height = bytestream2_get_le32(&gbyte);
    if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
        av_log(avctx, AV_LOG_ERROR,
               "invalid slice height: %d\n", s->slice_height);
        return AVERROR_INVALIDDATA;
    }

    bytestream2_skip(&gbyte, 4);

    s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
    if (s->nb_slices > INT_MAX / sizeof(Slice)) {
        av_log(avctx, AV_LOG_ERROR,
               "invalid number of slices: %d\n", s->nb_slices);
        return AVERROR_INVALIDDATA;
    }

    for (i = 0; i < s->planes; i++) {
        av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
        if (!s->slices[i])
            return AVERROR(ENOMEM);

        offset = bytestream2_get_le32(&gbyte);
        if (offset >= avpkt->size - header_size)
            return AVERROR_INVALIDDATA;

        if (i == 0)
            first_offset = offset;

        for (j = 0; j < s->nb_slices - 1; j++) {
            s->slices[i][j].start = offset + header_size;

            next_offset = bytestream2_get_le32(&gbyte);
            if (next_offset <= offset || next_offset >= avpkt->size - header_size)
                return AVERROR_INVALIDDATA;

            s->slices[i][j].size = next_offset - offset;
            offset = next_offset;
        }

        s->slices[i][j].start = offset + header_size;
        s->slices[i][j].size  = avpkt->size - s->slices[i][j].start;
    }

    if (bytestream2_get_byte(&gbyte) != s->planes)
        return AVERROR_INVALIDDATA;

    bytestream2_skip(&gbyte, s->nb_slices * s->planes);

    table_size = header_size + first_offset - bytestream2_tell(&gbyte);
    if (table_size < 2)
        return AVERROR_INVALIDDATA;

    ret = init_get_bits8(&gbit, avpkt->data + bytestream2_tell(&gbyte), table_size);
    if (ret < 0)
        return ret;

    ret = build_huffman(avctx, &gbit, s->max);
    if (ret < 0)
        return ret;

    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
        return ret;

    s->buf = avpkt->data;
    s->p = p;
    avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);

    if (avctx->pix_fmt == AV_PIX_FMT_GBRP   ||
        avctx->pix_fmt == AV_PIX_FMT_GBRAP  ||
        avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
        avctx->pix_fmt == AV_PIX_FMT_GBRAP10) {
        FFSWAP(uint8_t*, p->data[0], p->data[1]);
        FFSWAP(int, p->linesize[0], p->linesize[1]);
    }
static int libopenjpeg_decode_frame(AVCodecContext *avctx,
                                    void *data, int *data_size,
                                    AVPacket *avpkt)
{
    uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    LibOpenJPEGContext *ctx = avctx->priv_data;
    AVFrame *picture = &ctx->image, *output = data;
    opj_dinfo_t *dec;
    opj_cio_t *stream;
    opj_image_t *image;
    int width, height, ret = -1;
    int pixel_size = 0;
    int ispacked = 0;

    *data_size = 0;

    // Check if input is a raw jpeg2k codestream or in jp2 wrapping
    if((AV_RB32(buf) == 12) &&
       (AV_RB32(buf + 4) == JP2_SIG_TYPE) &&
       (AV_RB32(buf + 8) == JP2_SIG_VALUE)) {
        dec = opj_create_decompress(CODEC_JP2);
    } else {
        // If the AVPacket contains a jp2c box, then skip to
        // the starting byte of the codestream.
        if (AV_RB32(buf + 4) == AV_RB32("jp2c"))
            buf += 8;
        dec = opj_create_decompress(CODEC_J2K);
    }

    if(!dec) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n");
        return -1;
    }
    opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL);

    ctx->dec_params.cp_limit_decoding = LIMIT_TO_MAIN_HEADER;
    // Tie decoder with decoding parameters
    opj_setup_decoder(dec, &ctx->dec_params);
    stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
    if(!stream) {
        av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
        opj_destroy_decompress(dec);
        return -1;
    }

    // Decode the header only
    image = opj_decode_with_info(dec, stream, NULL);
    opj_cio_close(stream);
    if(!image) {
        av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
        opj_destroy_decompress(dec);
        return -1;
    }
    width  = image->x1 - image->x0;
    height = image->y1 - image->y0;
    if(av_image_check_size(width, height, 0, avctx) < 0) {
        av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid.\n", width, height);
        goto done;
    }
    avcodec_set_dimensions(avctx, width, height);

    if (avctx->pix_fmt != PIX_FMT_NONE) {
        if (!libopenjpeg_matches_pix_fmt(image, avctx->pix_fmt)) {
            avctx->pix_fmt = PIX_FMT_NONE;
        }
    }

    if (avctx->pix_fmt == PIX_FMT_NONE) {
        avctx->pix_fmt = libopenjpeg_guess_pix_fmt(image);
    }

    if (avctx->pix_fmt == PIX_FMT_NONE) {
        av_log(avctx, AV_LOG_ERROR, "Unable to determine pixel format\n");
        goto done;
    }

    if(picture->data[0])
        ff_thread_release_buffer(avctx, picture);

    if(ff_thread_get_buffer(avctx, picture) < 0){
        av_log(avctx, AV_LOG_ERROR, "ff_thread_get_buffer() failed\n");
        goto done;
    }

    ctx->dec_params.cp_limit_decoding = NO_LIMITATION;
    ctx->dec_params.cp_reduce = avctx->lowres;
    // Tie decoder with decoding parameters
    opj_setup_decoder(dec, &ctx->dec_params);
    stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
    if(!stream) {
        av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
        goto done;
    }

    opj_image_destroy(image);
    // Decode the codestream
    image = opj_decode_with_info(dec, stream, NULL);
    opj_cio_close(stream);
    if(!image) {
        av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
        goto done;
    }

    pixel_size = av_pix_fmt_descriptors[avctx->pix_fmt].comp[0].step_minus1 + 1;
    ispacked = libopenjpeg_ispacked(avctx->pix_fmt);

    switch (pixel_size) {
    case 1:
        if (ispacked) {
            libopenjpeg_copy_to_packed8(picture, image);
        } else {
            libopenjpeg_copyto8(picture, image);
        }
        break;
    case 2:
        if (ispacked) {
            libopenjpeg_copy_to_packed8(picture, image);
        } else {
            libopenjpeg_copyto16(picture, image);
        }
        break;
    case 3:
    case 4:
        if (ispacked) {
            libopenjpeg_copy_to_packed8(picture, image);
        }
        break;
    case 6:
    case 8:
        if (ispacked) {
            libopenjpeg_copy_to_packed16(picture, image);
        }
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "unsupported pixel size %d\n", pixel_size);
        goto done;
    }

    *output    = ctx->image;
    *data_size = sizeof(AVPicture);
    ret = buf_size;

done:
    opj_image_destroy(image);
    opj_destroy_decompress(dec);
    return ret;
}
Esempio n. 8
0
static int init_image(TiffContext *s, ThreadFrame *frame)
{
    int ret;
    int create_gray_palette = 0;

    // make sure there is no aliasing in the following switch
    if (s->bpp >= 100 || s->bppcount >= 10) {
        av_log(s->avctx, AV_LOG_ERROR,
               "Unsupported image parameters: bpp=%d, bppcount=%d\n",
               s->bpp, s->bppcount);
        return AVERROR_INVALIDDATA;
    }

    switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) {
    case 11:
        if (!s->palette_is_set) {
            s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
            break;
        }
    case 21:
    case 41:
        s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
        if (!s->palette_is_set) {
            create_gray_palette = 1;
        }
        break;
    case 81:
        s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
        break;
    case 243:
        if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
            if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
            } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
            } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
            } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
            } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
            } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
                s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
            } else {
                av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
                return AVERROR_PATCHWELCOME;
            }
        } else
            s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
        break;
    case 161:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
        break;
    case 162:
        s->avctx->pix_fmt = AV_PIX_FMT_YA8;
        break;
    case 322:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
        break;
    case 324:
        s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
        break;
    case 483:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE  : AV_PIX_FMT_RGB48BE;
        break;
    case 644:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE  : AV_PIX_FMT_RGBA64BE;
        break;
    case 1243:
        s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
        break;
    case 1324:
        s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
        break;
    case 1483:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
        break;
    case 1644:
        s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
        break;
    default:
        av_log(s->avctx, AV_LOG_ERROR,
               "This format is not supported (bpp=%d, bppcount=%d)\n",
               s->bpp, s->bppcount);
        return AVERROR_INVALIDDATA;
    }

    if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
        if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
           !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
           desc->nb_components < 3) {
            av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
            return AVERROR_INVALIDDATA;
        }
    }

    if (s->width != s->avctx->width || s->height != s->avctx->height) {
        ret = ff_set_dimensions(s->avctx, s->width, s->height);
        if (ret < 0)
            return ret;
    }
    if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
        return ret;
    if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
        if (!create_gray_palette)
            memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
        else {
            /* make default grayscale pal */
            int i;
            uint32_t *pal = (uint32_t *)frame->f->data[1];
            for (i = 0; i < 1<<s->bpp; i++)
                pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
        }
    }
    return 0;
}
Esempio n. 9
0
File: dxv.c Progetto: vriera/libav
static int dxv_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DXVContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    GetByteContext *gbc = &ctx->gbc;
    int (*decompress_tex)(AVCodecContext *avctx);
    uint32_t tag;
    int channels, size = 0, old_type = 0;
    int ret;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    tag = bytestream2_get_le32(gbc);
    switch (tag) {
    case MKBETAG('D', 'X', 'T', '1'):
        decompress_tex = dxv_decompress_dxt1;
        ctx->tex_funct = ctx->texdsp.dxt1_block;
        ctx->tex_rat   = 8;
        ctx->tex_step  = 8;
        av_log(avctx, AV_LOG_DEBUG, "DXTR1 compression and DXT1 texture ");
        break;
    case MKBETAG('D', 'X', 'T', '5'):
        decompress_tex = dxv_decompress_dxt5;
        ctx->tex_funct = ctx->texdsp.dxt5_block;
        ctx->tex_rat   = 4;
        ctx->tex_step  = 16;
        av_log(avctx, AV_LOG_DEBUG, "DXTR5 compression and DXT5 texture ");
        break;
    case MKBETAG('Y', 'C', 'G', '6'):
    case MKBETAG('Y', 'G', '1', '0'):
        avpriv_report_missing_feature(avctx, "Tag 0x%08X", tag);
        return AVERROR_PATCHWELCOME;
    default:
        /* Old version does not have a real header, just size and type. */
        size = tag & 0x00FFFFFF;
        old_type = tag >> 24;
        channels = old_type & 0x0F;
        if (old_type & 0x40) {
            av_log(avctx, AV_LOG_DEBUG, "LZF compression and DXT5 texture ");
            ctx->tex_funct = ctx->texdsp.dxt5_block;
            ctx->tex_step  = 16;
        } else if (old_type & 0x20) {
            av_log(avctx, AV_LOG_DEBUG, "LZF compression and DXT1 texture ");
            ctx->tex_funct = ctx->texdsp.dxt1_block;
            ctx->tex_step  = 8;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Unsupported header (0x%08X)\n.", tag);
            return AVERROR_INVALIDDATA;
        }
        decompress_tex = dxv_decompress_lzf;
        ctx->tex_rat = 1;
        break;
    }

    /* New header is 12 bytes long. */
    if (!old_type) {
        channels = bytestream2_get_byte(gbc);
        bytestream2_skip(gbc, 3); // unknown
        size = bytestream2_get_le32(gbc);
    }
    av_log(avctx, AV_LOG_DEBUG, "(%d channels)\n", channels);

    if (size != bytestream2_get_bytes_left(gbc)) {
        av_log(avctx, AV_LOG_ERROR, "Incomplete or invalid file (%u > %u)\n.",
               size, bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    ctx->tex_size = avctx->coded_width * avctx->coded_height * 4 / ctx->tex_rat;
    ret = av_reallocp(&ctx->tex_data, ctx->tex_size);
    if (ret < 0)
        return ret;

    /* Decompress texture out of the intermediate compression. */
    ret = decompress_tex(avctx);
    if (ret < 0)
        return ret;

    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    ff_thread_finish_setup(avctx);

    /* Now decompress the texture with the standard functions. */
    avctx->execute2(avctx, decompress_texture_thread,
                    tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output. */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Esempio n. 10
0
static int libopenjpeg_decode_frame(AVCodecContext *avctx,
                                    void *data, int *data_size,
                                    AVPacket *avpkt)
{
    uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    LibOpenJPEGContext *ctx = avctx->priv_data;
    AVFrame *picture = &ctx->image, *output = data;
    opj_dinfo_t *dec;
    opj_cio_t *stream;
    opj_image_t *image;
    int width, height, has_alpha = 0, ret = -1;
    int x, y, index;
    uint8_t *img_ptr;
    int adjust[4];

    *data_size = 0;

    // Check if input is a raw jpeg2k codestream or in jp2 wrapping
    if((AV_RB32(buf) == 12) &&
            (AV_RB32(buf + 4) == JP2_SIG_TYPE) &&
            (AV_RB32(buf + 8) == JP2_SIG_VALUE)) {
        dec = opj_create_decompress(CODEC_JP2);
    } else {
        // If the AVPacket contains a jp2c box, then skip to
        // the starting byte of the codestream.
        if (AV_RB32(buf + 4) == AV_RB32("jp2c"))
            buf += 8;
        dec = opj_create_decompress(CODEC_J2K);
    }

    if(!dec) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n");
        return -1;
    }
    opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL);

    ctx->dec_params.cp_limit_decoding = LIMIT_TO_MAIN_HEADER;
    // Tie decoder with decoding parameters
    opj_setup_decoder(dec, &ctx->dec_params);
    stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
    if(!stream) {
        av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
        opj_destroy_decompress(dec);
        return -1;
    }

    // Decode the header only
    image = opj_decode_with_info(dec, stream, NULL);
    opj_cio_close(stream);
    if(!image) {
        av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
        opj_destroy_decompress(dec);
        return -1;
    }
    width  = image->x1 - image->x0;
    height = image->y1 - image->y0;
    if(av_image_check_size(width, height, 0, avctx) < 0) {
        av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid.\n", width, height);
        goto done;
    }
    avcodec_set_dimensions(avctx, width, height);

    switch(image->numcomps)
    {
    case 1:
        avctx->pix_fmt = PIX_FMT_GRAY8;
        break;
    case 3:
        if(check_image_attributes(image)) {
            avctx->pix_fmt = PIX_FMT_RGB24;
        } else {
            avctx->pix_fmt = PIX_FMT_GRAY8;
            av_log(avctx, AV_LOG_ERROR, "Only first component will be used.\n");
        }
        break;
    case 4:
        has_alpha = 1;
        avctx->pix_fmt = PIX_FMT_RGBA;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "%d components unsupported.\n", image->numcomps);
        goto done;
    }

    if(picture->data[0])
        ff_thread_release_buffer(avctx, picture);

    if(ff_thread_get_buffer(avctx, picture) < 0) {
        av_log(avctx, AV_LOG_ERROR, "ff_thread_get_buffer() failed\n");
        return -1;
    }

    ff_thread_finish_setup(avctx);

    ctx->dec_params.cp_limit_decoding = NO_LIMITATION;
    ctx->dec_params.cp_reduce = avctx->lowres;
    // Tie decoder with decoding parameters
    opj_setup_decoder(dec, &ctx->dec_params);
    stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
    if(!stream) {
        av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
        opj_destroy_decompress(dec);
        return -1;
    }

    // Decode the codestream
    image = opj_decode_with_info(dec, stream, NULL);
    opj_cio_close(stream);

    for(x = 0; x < image->numcomps; x++) {
        adjust[x] = FFMAX(image->comps[x].prec - 8, 0);
    }

    for(y = 0; y < avctx->height; y++) {
        index = y*avctx->width;
        img_ptr = picture->data[0] + y*picture->linesize[0];
        for(x = 0; x < avctx->width; x++, index++) {
            *img_ptr++ = image->comps[0].data[index] >> adjust[0];
            if(image->numcomps > 2 && check_image_attributes(image)) {
                *img_ptr++ = image->comps[1].data[index] >> adjust[1];
                *img_ptr++ = image->comps[2].data[index] >> adjust[2];
                if(has_alpha)
                    *img_ptr++ = image->comps[3].data[index] >> adjust[3];
            }
        }
    }