Ejemplo n.º 1
0
/**
 * Codec worker thread.
 *
 * Automatically calls ff_thread_finish_setup() if the codec does
 * not provide an update_thread_context method, or if the codec returns
 * before calling it.
 */
static attribute_align_arg void *frame_worker_thread(void *arg)
{
    PerThreadContext *p = arg;
    AVCodecContext *avctx = p->avctx;
    const AVCodec *codec = avctx->codec;

    while (1) {
        if (atomic_load(&p->state) == STATE_INPUT_READY) {
            pthread_mutex_lock(&p->mutex);
            while (atomic_load(&p->state) == STATE_INPUT_READY) {
                if (p->die) {
                    pthread_mutex_unlock(&p->mutex);
                    goto die;
                }
                pthread_cond_wait(&p->input_cond, &p->mutex);
            }
            pthread_mutex_unlock(&p->mutex);
        }

        if (!codec->update_thread_context && avctx->thread_safe_callbacks)
            ff_thread_finish_setup(avctx);

        pthread_mutex_lock(&p->mutex);
        av_frame_unref(p->frame);
        p->got_frame = 0;
        p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);

        if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
            if (avctx->internal->allocate_progress)
                av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
                       "free the frame on failure. This is a bug, please report it.\n");
            av_frame_unref(p->frame);
        }

        if (atomic_load(&p->state) == STATE_SETTING_UP)
            ff_thread_finish_setup(avctx);

        atomic_store(&p->state, STATE_INPUT_READY);

        pthread_mutex_lock(&p->progress_mutex);
        pthread_cond_signal(&p->output_cond);
        pthread_mutex_unlock(&p->progress_mutex);

        pthread_mutex_unlock(&p->mutex);
    }
die:

    return NULL;
}
Ejemplo n.º 2
0
/**
 * Codec worker thread.
 *
 * Automatically calls ff_thread_finish_setup() if the codec does
 * not provide an update_thread_context method, or if the codec returns
 * before calling it.
 */
static attribute_align_arg void *frame_worker_thread(void *arg)
{
    PerThreadContext *p = arg;
    FrameThreadContext *fctx = p->parent;
    AVCodecContext *avctx = p->avctx;
    const AVCodec *codec = avctx->codec;

    pthread_mutex_lock(&p->mutex);
    while (1) {
            while (p->state == STATE_INPUT_READY && !fctx->die)
                pthread_cond_wait(&p->input_cond, &p->mutex);

        if (fctx->die) break;

        if (!codec->update_thread_context && THREAD_SAFE_CALLBACKS(avctx))
            ff_thread_finish_setup(avctx);

        av_frame_unref(p->frame);
        p->got_frame = 0;
        p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);

        if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
            if (avctx->internal->allocate_progress)
                av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
                       "free the frame on failure. This is a bug, please report it.\n");
            av_frame_unref(p->frame);
        }

        if (p->state == STATE_SETTING_UP) ff_thread_finish_setup(avctx);

        pthread_mutex_lock(&p->progress_mutex);
#if 0 //BUFREF-FIXME
        for (i = 0; i < MAX_BUFFERS; i++)
            if (p->progress_used[i] && (p->got_frame || p->result<0 || avctx->codec_id != AV_CODEC_ID_H264)) {
                p->progress[i][0] = INT_MAX;
                p->progress[i][1] = INT_MAX;
            }
#endif
        p->state = STATE_INPUT_READY;

        pthread_cond_broadcast(&p->progress_cond);
        pthread_cond_signal(&p->output_cond);
        pthread_mutex_unlock(&p->progress_mutex);
    }
    pthread_mutex_unlock(&p->mutex);

    return NULL;
}
Ejemplo n.º 3
0
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
{
    PerThreadContext *p = avctx->thread_opaque;
    int *progress, err;

    f->owner = avctx;

    ff_init_buffer_info(avctx, f);

    if (!(avctx->active_thread_type&FF_THREAD_FRAME)) {
        f->thread_opaque = NULL;
        return avctx->get_buffer(avctx, f);
    }

    if (p->state != STATE_SETTING_UP &&
        (avctx->codec->update_thread_context || (!avctx->thread_safe_callbacks &&
                avctx->get_buffer != avcodec_default_get_buffer))) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
        return -1;
    }

    pthread_mutex_lock(&p->parent->buffer_mutex);
    f->thread_opaque = progress = allocate_progress(p);

    if (!progress) {
        pthread_mutex_unlock(&p->parent->buffer_mutex);
        return -1;
    }

    progress[0] =
    progress[1] = -1;

    if (avctx->thread_safe_callbacks ||
        avctx->get_buffer == avcodec_default_get_buffer) {
        err = avctx->get_buffer(avctx, f);
    } else {
        p->requested_frame = f;
        p->state = STATE_GET_BUFFER;
        pthread_mutex_lock(&p->progress_mutex);
        pthread_cond_broadcast(&p->progress_cond);

        while (p->state != STATE_SETTING_UP)
            pthread_cond_wait(&p->progress_cond, &p->progress_mutex);

        err = p->result;

        pthread_mutex_unlock(&p->progress_mutex);

        if (!avctx->codec->update_thread_context)
            ff_thread_finish_setup(avctx);
    }

    if (err) {
        free_progress(f);
        f->thread_opaque = NULL;
    }
    pthread_mutex_unlock(&p->parent->buffer_mutex);

    return err;
}
Ejemplo n.º 4
0
/**
 * Codec worker thread.
 *
 * Automatically calls ff_thread_finish_setup() if the codec does
 * not provide an update_thread_context method, or if the codec returns
 * before calling it.
 */
static attribute_align_arg void *frame_worker_thread(void *arg)
{
    PerThreadContext *p = arg;
    FrameThreadContext *fctx = p->parent;
    AVCodecContext *avctx = p->avctx;
    const AVCodec *codec = avctx->codec;

    pthread_mutex_lock(&p->mutex);
    while (1) {
        while (p->state == STATE_INPUT_READY && !fctx->die)
            pthread_cond_wait(&p->input_cond, &p->mutex);

        if (fctx->die) break;

        if (!codec->update_thread_context && THREAD_SAFE_CALLBACKS(avctx))
            ff_thread_finish_setup(avctx);

        avcodec_get_frame_defaults(&p->frame);
        p->got_frame = 0;
        p->result = codec->decode(avctx, &p->frame, &p->got_frame, &p->avpkt);

        /* many decoders assign whole AVFrames, thus overwriting extended_data;
         * make sure it's set correctly */
        p->frame.extended_data = p->frame.data;

        if (p->state == STATE_SETTING_UP) ff_thread_finish_setup(avctx);

        pthread_mutex_lock(&p->progress_mutex);
#if 0 //BUFREF-FIXME
        for (i = 0; i < MAX_BUFFERS; i++)
            if (p->progress_used[i] && (p->got_frame || p->result<0 || avctx->codec_id != AV_CODEC_ID_H264)) {
                p->progress[i][0] = INT_MAX;
                p->progress[i][1] = INT_MAX;
            }
#endif
        p->state = STATE_INPUT_READY;

        pthread_cond_broadcast(&p->progress_cond);
        pthread_cond_signal(&p->output_cond);
        pthread_mutex_unlock(&p->progress_mutex);
    }
    pthread_mutex_unlock(&p->mutex);

    return NULL;
}
Ejemplo n.º 5
0
/**
 * Codec worker thread.
 *
 * Automatically calls ff_thread_finish_setup() if the codec does
 * not provide an update_thread_context method, or if the codec returns
 * before calling it.
 */
static attribute_align_arg void *frame_worker_thread(void *arg)
{
    PerThreadContext *p = arg;
    FrameThreadContext *fctx = p->parent;
    AVCodecContext *avctx = p->avctx;
    AVCodec *codec = avctx->codec;

    while (1) {
        int i;
        if (p->state == STATE_INPUT_READY && !fctx->die) {
            pthread_mutex_lock(&p->mutex);
            while (p->state == STATE_INPUT_READY && !fctx->die)
                pthread_cond_wait(&p->input_cond, &p->mutex);
            pthread_mutex_unlock(&p->mutex);
        }

        if (fctx->die) break;

        if (!codec->update_thread_context && (avctx->thread_safe_callbacks || avctx->get_buffer == avcodec_default_get_buffer))
            ff_thread_finish_setup(avctx);

        pthread_mutex_lock(&p->mutex);
        avcodec_get_frame_defaults(&p->frame);
        p->got_frame = 0;
        p->result = codec->decode(avctx, &p->frame, &p->got_frame, &p->avpkt);

        if (p->state == STATE_SETTING_UP) ff_thread_finish_setup(avctx);

        p->state = STATE_INPUT_READY;

        pthread_mutex_lock(&p->progress_mutex);
        for (i = 0; i < MAX_BUFFERS; i++)
            if (p->progress_used[i] && (p->got_frame || p->result<0 || avctx->codec_id != CODEC_ID_H264)) {
                p->progress[i][0] = INT_MAX;
                p->progress[i][1] = INT_MAX;
            }
        pthread_cond_broadcast(&p->progress_cond);
        pthread_cond_signal(&p->output_cond);
        pthread_mutex_unlock(&p->progress_mutex);

        pthread_mutex_unlock(&p->mutex);
    }

    return NULL;
}
Ejemplo n.º 6
0
/**
 * Codec worker thread.
 *
 * Automatically calls ff_thread_finish_setup() if the codec does
 * not provide an update_thread_context method, or if the codec returns
 * before calling it.
 */
static attribute_align_arg void *frame_worker_thread(void *arg)
{
    PerThreadContext *p = arg;
    FrameThreadContext *fctx = p->parent;
    AVCodecContext *avctx = p->avctx;
    const AVCodec *codec = avctx->codec;

    while (1) {
        if (p->state == STATE_INPUT_READY && !fctx->die) {
            pthread_mutex_lock(&p->mutex);
            while (p->state == STATE_INPUT_READY && !fctx->die)
                pthread_cond_wait(&p->input_cond, &p->mutex);
            pthread_mutex_unlock(&p->mutex);
        }

        if (fctx->die) break;

        if (!codec->update_thread_context && avctx->thread_safe_callbacks)
            ff_thread_finish_setup(avctx);

        pthread_mutex_lock(&p->mutex);
        avcodec_get_frame_defaults(&p->frame);
        p->got_frame = 0;
        p->result = codec->decode(avctx, &p->frame, &p->got_frame, &p->avpkt);

        /* many decoders assign whole AVFrames, thus overwriting extended_data;
         * make sure it's set correctly */
        p->frame.extended_data = p->frame.data;

        if (p->state == STATE_SETTING_UP) ff_thread_finish_setup(avctx);

        p->state = STATE_INPUT_READY;

        pthread_mutex_lock(&p->progress_mutex);
        pthread_cond_signal(&p->output_cond);
        pthread_mutex_unlock(&p->progress_mutex);

        pthread_mutex_unlock(&p->mutex);
    }

    return NULL;
}
Ejemplo n.º 7
0
static int hap_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    HapContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    int ret, length;
    int blocks = avctx->coded_width * avctx->coded_height / (TEXTURE_BLOCK_W * TEXTURE_BLOCK_H);

    bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);

    /* Check for section header */
    length = parse_section_header(avctx);
    if (length < 0) {
        av_log(avctx, AV_LOG_ERROR, "Frame is too small.\n");
        return length;
    }

    /* Prepare the texture buffer and decompress function */
    ret = setup_texture(avctx, length);
    if (ret < 0)
        return ret;

    /* Get the output frame ready to receive data */
    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    if (avctx->codec->update_thread_context)
        ff_thread_finish_setup(avctx);

    /* Use the decompress function on the texture, one block per thread */
    avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, blocks);

    /* Frame is ready to be output */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Ejemplo n.º 8
0
/**
 * Codec worker thread.
 *
 * Automatically calls ff_thread_finish_setup() if the codec does
 * not provide an update_thread_context method, or if the codec returns
 * before calling it.
 */
static attribute_align_arg void *frame_worker_thread(void *arg)
{
    PerThreadContext *p = arg;
    AVCodecContext *avctx = p->avctx;
    const AVCodec *codec = avctx->codec;

    pthread_mutex_lock(&p->mutex);
    while (1) {
        while (atomic_load(&p->state) == STATE_INPUT_READY && !p->die)
            pthread_cond_wait(&p->input_cond, &p->mutex);

        if (p->die) break;

        if (!codec->update_thread_context && THREAD_SAFE_CALLBACKS(avctx))
            ff_thread_finish_setup(avctx);

        /* If a decoder supports hwaccel, then it must call ff_get_format().
         * Since that call must happen before ff_thread_finish_setup(), the
         * decoder is required to implement update_thread_context() and call
         * ff_thread_finish_setup() manually. Therefore the above
         * ff_thread_finish_setup() call did not happen and hwaccel_serializing
         * cannot be true here. */
        av_assert0(!p->hwaccel_serializing);

        /* if the previous thread uses hwaccel then we take the lock to ensure
         * the threads don't run concurrently */
        if (avctx->hwaccel) {
            pthread_mutex_lock(&p->parent->hwaccel_mutex);
            p->hwaccel_serializing = 1;
        }

        av_frame_unref(p->frame);
        p->got_frame = 0;
        p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);

        if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
            if (avctx->internal->allocate_progress)
                av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
                       "free the frame on failure. This is a bug, please report it.\n");
            av_frame_unref(p->frame);
        }

        if (atomic_load(&p->state) == STATE_SETTING_UP)
            ff_thread_finish_setup(avctx);

        if (p->hwaccel_serializing) {
            p->hwaccel_serializing = 0;
            pthread_mutex_unlock(&p->parent->hwaccel_mutex);
        }

        if (p->async_serializing) {
            p->async_serializing = 0;

            async_unlock(p->parent);
        }

        pthread_mutex_lock(&p->progress_mutex);

        atomic_store(&p->state, STATE_INPUT_READY);

        pthread_cond_broadcast(&p->progress_cond);
        pthread_cond_signal(&p->output_cond);
        pthread_mutex_unlock(&p->progress_mutex);
    }
    pthread_mutex_unlock(&p->mutex);

    return NULL;
}
Ejemplo n.º 9
0
static int hap_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    HapContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    int ret, i;
    int tex_size;

    bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);

    /* Check for section header */
    ret = hap_parse_frame_header(avctx);
    if (ret < 0)
        return ret;

    /* Get the output frame ready to receive data */
    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    if (avctx->codec->update_thread_context)
        ff_thread_finish_setup(avctx);

    /* Unpack the DXT texture */
    if (hap_can_use_tex_in_place(ctx)) {
        /* Only DXTC texture compression in a contiguous block */
        ctx->tex_data = ctx->gbc.buffer;
        tex_size = bytestream2_get_bytes_left(&ctx->gbc);
    } else {
        /* Perform the second-stage decompression */
        ret = av_reallocp(&ctx->tex_buf, ctx->tex_size);
        if (ret < 0)
            return ret;

        avctx->execute2(avctx, decompress_chunks_thread, NULL,
                        ctx->chunk_results, ctx->chunk_count);

        for (i = 0; i < ctx->chunk_count; i++) {
            if (ctx->chunk_results[i] < 0)
                return ctx->chunk_results[i];
        }

        ctx->tex_data = ctx->tex_buf;
        tex_size = ctx->tex_size;
    }

    if (tex_size < (avctx->coded_width  / TEXTURE_BLOCK_W)
                  *(avctx->coded_height / TEXTURE_BLOCK_H)
                  *ctx->tex_rat) {
        av_log(avctx, AV_LOG_ERROR, "Insufficient data\n");
        return AVERROR_INVALIDDATA;
    }

    /* Use the decompress function on the texture, one block per thread */
    avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Ejemplo n.º 10
0
Archivo: dxv.c Proyecto: AVLeo/libav
static int dxv_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DXVContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    GetByteContext *gbc = &ctx->gbc;
    int (*decompress_tex)(AVCodecContext *avctx);
    const char *msgcomp, *msgtext;
    uint32_t tag;
    int version_major, version_minor = 0;
    int size = 0, old_type = 0;
    int ret;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    tag = bytestream2_get_le32(gbc);
    switch (tag) {
    case MKBETAG('D', 'X', 'T', '1'):
        decompress_tex = dxv_decompress_dxt1;
        ctx->tex_funct = ctx->texdsp.dxt1_block;
        ctx->tex_rat   = 8;
        ctx->tex_step  = 8;
        msgcomp = "DXTR1";
        msgtext = "DXT1";
        break;
    case MKBETAG('D', 'X', 'T', '5'):
        decompress_tex = dxv_decompress_dxt5;
        ctx->tex_funct = ctx->texdsp.dxt5_block;
        ctx->tex_rat   = 4;
        ctx->tex_step  = 16;
        msgcomp = "DXTR5";
        msgtext = "DXT5";
        break;
    case MKBETAG('Y', 'C', 'G', '6'):
    case MKBETAG('Y', 'G', '1', '0'):
        avpriv_report_missing_feature(avctx, "Tag 0x%08X", tag);
        return AVERROR_PATCHWELCOME;
    default:
        /* Old version does not have a real header, just size and type. */
        size = tag & 0x00FFFFFF;
        old_type = tag >> 24;
        version_major = (old_type & 0x0F) - 1;

        if (old_type & 0x80) {
            msgcomp = "RAW";
            decompress_tex = dxv_decompress_raw;
        } else {
            msgcomp = "LZF";
            decompress_tex = dxv_decompress_lzf;
        }

        if (old_type & 0x40) {
            msgtext = "DXT5";

            ctx->tex_funct = ctx->texdsp.dxt5_block;
            ctx->tex_step  = 16;
        } else if (old_type & 0x20 || version_major == 1) {
            msgtext = "DXT1";

            ctx->tex_funct = ctx->texdsp.dxt1_block;
            ctx->tex_step  = 8;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Unsupported header (0x%08X)\n.", tag);
            return AVERROR_INVALIDDATA;
        }
        ctx->tex_rat = 1;
        break;
    }

    /* New header is 12 bytes long. */
    if (!old_type) {
        version_major = bytestream2_get_byte(gbc) - 1;
        version_minor = bytestream2_get_byte(gbc);

        /* Encoder copies texture data when compression is not advantageous. */
        if (bytestream2_get_byte(gbc)) {
            msgcomp = "RAW";
            ctx->tex_rat = 1;
            decompress_tex = dxv_decompress_raw;
        }

        bytestream2_skip(gbc, 1); // unknown
        size = bytestream2_get_le32(gbc);
    }
    av_log(avctx, AV_LOG_DEBUG,
           "%s compression with %s texture (version %d.%d)\n",
           msgcomp, msgtext, version_major, version_minor);

    if (size != bytestream2_get_bytes_left(gbc)) {
        av_log(avctx, AV_LOG_ERROR,
               "Incomplete or invalid file (header %d, left %d).\n",
               size, bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    ctx->tex_size = avctx->coded_width * avctx->coded_height * 4 / ctx->tex_rat;
    ret = av_reallocp(&ctx->tex_data, ctx->tex_size);
    if (ret < 0)
        return ret;

    /* Decompress texture out of the intermediate compression. */
    ret = decompress_tex(avctx);
    if (ret < 0)
        return ret;

    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    ff_thread_finish_setup(avctx);

    /* Now decompress the texture with the standard functions. */
    avctx->execute2(avctx, decompress_texture_thread,
                    tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output. */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Ejemplo n.º 11
0
Archivo: dxv.c Proyecto: vriera/libav
static int dxv_decode(AVCodecContext *avctx, void *data,
                      int *got_frame, AVPacket *avpkt)
{
    DXVContext *ctx = avctx->priv_data;
    ThreadFrame tframe;
    GetByteContext *gbc = &ctx->gbc;
    int (*decompress_tex)(AVCodecContext *avctx);
    uint32_t tag;
    int channels, size = 0, old_type = 0;
    int ret;

    bytestream2_init(gbc, avpkt->data, avpkt->size);

    tag = bytestream2_get_le32(gbc);
    switch (tag) {
    case MKBETAG('D', 'X', 'T', '1'):
        decompress_tex = dxv_decompress_dxt1;
        ctx->tex_funct = ctx->texdsp.dxt1_block;
        ctx->tex_rat   = 8;
        ctx->tex_step  = 8;
        av_log(avctx, AV_LOG_DEBUG, "DXTR1 compression and DXT1 texture ");
        break;
    case MKBETAG('D', 'X', 'T', '5'):
        decompress_tex = dxv_decompress_dxt5;
        ctx->tex_funct = ctx->texdsp.dxt5_block;
        ctx->tex_rat   = 4;
        ctx->tex_step  = 16;
        av_log(avctx, AV_LOG_DEBUG, "DXTR5 compression and DXT5 texture ");
        break;
    case MKBETAG('Y', 'C', 'G', '6'):
    case MKBETAG('Y', 'G', '1', '0'):
        avpriv_report_missing_feature(avctx, "Tag 0x%08X", tag);
        return AVERROR_PATCHWELCOME;
    default:
        /* Old version does not have a real header, just size and type. */
        size = tag & 0x00FFFFFF;
        old_type = tag >> 24;
        channels = old_type & 0x0F;
        if (old_type & 0x40) {
            av_log(avctx, AV_LOG_DEBUG, "LZF compression and DXT5 texture ");
            ctx->tex_funct = ctx->texdsp.dxt5_block;
            ctx->tex_step  = 16;
        } else if (old_type & 0x20) {
            av_log(avctx, AV_LOG_DEBUG, "LZF compression and DXT1 texture ");
            ctx->tex_funct = ctx->texdsp.dxt1_block;
            ctx->tex_step  = 8;
        } else {
            av_log(avctx, AV_LOG_ERROR, "Unsupported header (0x%08X)\n.", tag);
            return AVERROR_INVALIDDATA;
        }
        decompress_tex = dxv_decompress_lzf;
        ctx->tex_rat = 1;
        break;
    }

    /* New header is 12 bytes long. */
    if (!old_type) {
        channels = bytestream2_get_byte(gbc);
        bytestream2_skip(gbc, 3); // unknown
        size = bytestream2_get_le32(gbc);
    }
    av_log(avctx, AV_LOG_DEBUG, "(%d channels)\n", channels);

    if (size != bytestream2_get_bytes_left(gbc)) {
        av_log(avctx, AV_LOG_ERROR, "Incomplete or invalid file (%u > %u)\n.",
               size, bytestream2_get_bytes_left(gbc));
        return AVERROR_INVALIDDATA;
    }

    ctx->tex_size = avctx->coded_width * avctx->coded_height * 4 / ctx->tex_rat;
    ret = av_reallocp(&ctx->tex_data, ctx->tex_size);
    if (ret < 0)
        return ret;

    /* Decompress texture out of the intermediate compression. */
    ret = decompress_tex(avctx);
    if (ret < 0)
        return ret;

    tframe.f = data;
    ret = ff_thread_get_buffer(avctx, &tframe, 0);
    if (ret < 0)
        return ret;
    ff_thread_finish_setup(avctx);

    /* Now decompress the texture with the standard functions. */
    avctx->execute2(avctx, decompress_texture_thread,
                    tframe.f, NULL, ctx->slice_count);

    /* Frame is ready to be output. */
    tframe.f->pict_type = AV_PICTURE_TYPE_I;
    tframe.f->key_frame = 1;
    *got_frame = 1;

    return avpkt->size;
}
Ejemplo n.º 12
0
static int libopenjpeg_decode_frame(AVCodecContext *avctx,
                                    void *data, int *data_size,
                                    AVPacket *avpkt)
{
    uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    LibOpenJPEGContext *ctx = avctx->priv_data;
    AVFrame *picture = &ctx->image, *output = data;
    opj_dinfo_t *dec;
    opj_cio_t *stream;
    opj_image_t *image;
    int width, height, has_alpha = 0, ret = -1;
    int x, y, index;
    uint8_t *img_ptr;
    int adjust[4];

    *data_size = 0;

    // Check if input is a raw jpeg2k codestream or in jp2 wrapping
    if((AV_RB32(buf) == 12) &&
            (AV_RB32(buf + 4) == JP2_SIG_TYPE) &&
            (AV_RB32(buf + 8) == JP2_SIG_VALUE)) {
        dec = opj_create_decompress(CODEC_JP2);
    } else {
        // If the AVPacket contains a jp2c box, then skip to
        // the starting byte of the codestream.
        if (AV_RB32(buf + 4) == AV_RB32("jp2c"))
            buf += 8;
        dec = opj_create_decompress(CODEC_J2K);
    }

    if(!dec) {
        av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n");
        return -1;
    }
    opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL);

    ctx->dec_params.cp_limit_decoding = LIMIT_TO_MAIN_HEADER;
    // Tie decoder with decoding parameters
    opj_setup_decoder(dec, &ctx->dec_params);
    stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
    if(!stream) {
        av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
        opj_destroy_decompress(dec);
        return -1;
    }

    // Decode the header only
    image = opj_decode_with_info(dec, stream, NULL);
    opj_cio_close(stream);
    if(!image) {
        av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
        opj_destroy_decompress(dec);
        return -1;
    }
    width  = image->x1 - image->x0;
    height = image->y1 - image->y0;
    if(av_image_check_size(width, height, 0, avctx) < 0) {
        av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid.\n", width, height);
        goto done;
    }
    avcodec_set_dimensions(avctx, width, height);

    switch(image->numcomps)
    {
    case 1:
        avctx->pix_fmt = PIX_FMT_GRAY8;
        break;
    case 3:
        if(check_image_attributes(image)) {
            avctx->pix_fmt = PIX_FMT_RGB24;
        } else {
            avctx->pix_fmt = PIX_FMT_GRAY8;
            av_log(avctx, AV_LOG_ERROR, "Only first component will be used.\n");
        }
        break;
    case 4:
        has_alpha = 1;
        avctx->pix_fmt = PIX_FMT_RGBA;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "%d components unsupported.\n", image->numcomps);
        goto done;
    }

    if(picture->data[0])
        ff_thread_release_buffer(avctx, picture);

    if(ff_thread_get_buffer(avctx, picture) < 0) {
        av_log(avctx, AV_LOG_ERROR, "ff_thread_get_buffer() failed\n");
        return -1;
    }

    ff_thread_finish_setup(avctx);

    ctx->dec_params.cp_limit_decoding = NO_LIMITATION;
    ctx->dec_params.cp_reduce = avctx->lowres;
    // Tie decoder with decoding parameters
    opj_setup_decoder(dec, &ctx->dec_params);
    stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
    if(!stream) {
        av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
        opj_destroy_decompress(dec);
        return -1;
    }

    // Decode the codestream
    image = opj_decode_with_info(dec, stream, NULL);
    opj_cio_close(stream);

    for(x = 0; x < image->numcomps; x++) {
        adjust[x] = FFMAX(image->comps[x].prec - 8, 0);
    }

    for(y = 0; y < avctx->height; y++) {
        index = y*avctx->width;
        img_ptr = picture->data[0] + y*picture->linesize[0];
        for(x = 0; x < avctx->width; x++, index++) {
            *img_ptr++ = image->comps[0].data[index] >> adjust[0];
            if(image->numcomps > 2 && check_image_attributes(image)) {
                *img_ptr++ = image->comps[1].data[index] >> adjust[1];
                *img_ptr++ = image->comps[2].data[index] >> adjust[2];
                if(has_alpha)
                    *img_ptr++ = image->comps[3].data[index] >> adjust[3];
            }
        }
    }