static void libschroedinger_handle_first_access_unit(AVCodecContext *avccontext)
{
    FfmpegSchroDecoderParams *p_schro_params = avccontext->priv_data;
    SchroDecoder *decoder = p_schro_params->decoder;

    p_schro_params->format = schro_decoder_get_video_format (decoder);

    /* Tell FFmpeg about sequence details. */
    if(avcodec_check_dimensions(avccontext, p_schro_params->format->width,
                                p_schro_params->format->height) < 0) {
        av_log(avccontext, AV_LOG_ERROR, "invalid dimensions (%dx%d)\n",
               p_schro_params->format->width, p_schro_params->format->height);
        avccontext->height = avccontext->width = 0;
        return;
    }
    avccontext->height  = p_schro_params->format->height;
    avccontext->width   = p_schro_params->format->width;
    avccontext->pix_fmt =
                   GetFfmpegChromaFormat(p_schro_params->format->chroma_format);

    if (ff_get_schro_frame_format( p_schro_params->format->chroma_format,
                                   &p_schro_params->frame_format) == -1) {
        av_log (avccontext, AV_LOG_ERROR,
                "This codec currently only supports planar YUV 4:2:0, 4:2:2 "
                "and 4:4:4 formats.\n");
        return;
    }

    avccontext->time_base.den = p_schro_params->format->frame_rate_numerator;
    avccontext->time_base.num = p_schro_params->format->frame_rate_denominator;

    if (p_schro_params->dec_pic.data[0] == NULL)
    {
        avpicture_alloc(&p_schro_params->dec_pic,
                        avccontext->pix_fmt,
                        avccontext->width,
                        avccontext->height);
    }
}
Esempio n. 2
0
static int libdirac_decode_frame(AVCodecContext *avccontext,
                                 void *data, int *data_size,
                                 AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;

    FfmpegDiracDecoderParams *p_dirac_params = avccontext->priv_data;
    AVPicture *picture = data;
    AVPicture pic;
    int pict_size;
    unsigned char *buffer[3];

    *data_size = 0;

    if (buf_size > 0) {
        /* set data to decode into buffer */
        dirac_buffer(p_dirac_params->p_decoder, buf, buf + buf_size);
        if ((buf[4] & 0x08) == 0x08 && (buf[4] & 0x03))
            avccontext->has_b_frames = 1;
    }
    while (1) {
         /* parse data and process result */
        DecoderState state = dirac_parse(p_dirac_params->p_decoder);
        switch (state) {
        case STATE_BUFFER:
            return buf_size;

        case STATE_SEQUENCE:
        {
            /* tell FFmpeg about sequence details */
            dirac_sourceparams_t *src_params = &p_dirac_params->p_decoder->src_params;

            if (av_image_check_size(src_params->width, src_params->height,
                                    0, avccontext) < 0) {
                av_log(avccontext, AV_LOG_ERROR, "Invalid dimensions (%dx%d)\n",
                       src_params->width, src_params->height);
                avccontext->height = avccontext->width = 0;
                return -1;
            }

            avccontext->height = src_params->height;
            avccontext->width  = src_params->width;

            avccontext->pix_fmt = GetFfmpegChromaFormat(src_params->chroma);
            if (avccontext->pix_fmt == PIX_FMT_NONE) {
                av_log(avccontext, AV_LOG_ERROR,
                       "Dirac chroma format %d not supported currently\n",
                       src_params->chroma);
                return -1;
            }

            avccontext->time_base.den = src_params->frame_rate.numerator;
            avccontext->time_base.num = src_params->frame_rate.denominator;

            /* calculate output dimensions */
            avpicture_fill(&pic, NULL, avccontext->pix_fmt,
                           avccontext->width, avccontext->height);

            pict_size = avpicture_get_size(avccontext->pix_fmt,
                                           avccontext->width,
                                           avccontext->height);

            /* allocate output buffer */
            if (!p_dirac_params->p_out_frame_buf)
                p_dirac_params->p_out_frame_buf = av_malloc(pict_size);
            buffer[0] = p_dirac_params->p_out_frame_buf;
            buffer[1] = p_dirac_params->p_out_frame_buf +
                        pic.linesize[0] * avccontext->height;
            buffer[2] = buffer[1] +
                        pic.linesize[1] * src_params->chroma_height;

            /* tell Dirac about output destination */
            dirac_set_buf(p_dirac_params->p_decoder, buffer, NULL);
            break;
        }
        case STATE_SEQUENCE_END:
            break;

        case STATE_PICTURE_AVAIL:
            /* fill picture with current buffer data from Dirac */
            avpicture_fill(picture, p_dirac_params->p_out_frame_buf,
                           avccontext->pix_fmt,
                           avccontext->width, avccontext->height);
            *data_size = sizeof(AVPicture);
            return buf_size;

        case STATE_INVALID:
            return -1;

        default:
            break;
        }
    }

    return buf_size;
}