Exemple #1
0
static int get_buffer(AVCodecContext *avctx, AVFrame *pic)
{
	GstBuffer *out_buf;
	GstFlowReturn ret;
	struct obj *self = avctx->opaque;
	int width = avctx->width;
	int height = avctx->height;

	avcodec_align_dimensions(avctx, &width, &height);

	pic->linesize[0] = width;
	pic->linesize[1] = width / 2;
	pic->linesize[2] = width / 2;

	if (avctx->width == width && avctx->height == height) {
		ret = gst_pad_alloc_buffer_and_set_caps(self->srcpad, 0,
				width * height * 3 / 2,
				self->srcpad->caps, &out_buf);
		if (ret != GST_FLOW_OK)
			return 1;
		gst_buffer_ref(out_buf);
		pic->opaque = out_buf;

		pic->data[0] = out_buf->data;
		pic->data[1] = pic->data[0] + pic->linesize[0] * height;
		pic->data[2] = pic->data[1] + pic->linesize[1] * height / 2;
	} else {
		ret = av_image_alloc(pic->base, pic->linesize, width, height, avctx->pix_fmt, 1);
		if (ret < 0)
			return ret;
		for (unsigned i = 0; i < 3; i++)
			pic->data[i] = pic->base[i];
	}

	pic->type = FF_BUFFER_TYPE_USER;

	if (avctx->pkt)
		pic->pkt_pts = avctx->pkt->pts;
	else
		pic->pkt_pts = AV_NOPTS_VALUE;

	return 0;
}
Exemple #2
0
static picture_t *ffmpeg_dr_GetFrameBuf(struct AVCodecContext *p_context)
{
    decoder_t *p_dec = (decoder_t *)p_context->opaque;
    decoder_sys_t *p_sys = p_dec->p_sys;

    int i_width = p_context->width;
    int i_height = p_context->height;
    avcodec_align_dimensions( p_context, &i_width, &i_height );

    picture_t *p_pic = NULL;
    if (GetVlcChroma(&p_dec->fmt_out.video, p_context->pix_fmt) != VLC_SUCCESS)
        goto no_dr;

    if (p_context->pix_fmt == PIX_FMT_PAL8)
        goto no_dr;

    p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;

    p_pic = ffmpeg_NewPictBuf( p_dec, p_context );
    if( !p_pic )
        goto no_dr;

    if( p_pic->p[0].i_pitch / p_pic->p[0].i_pixel_pitch < i_width ||
        p_pic->p[0].i_lines < i_height )
        goto no_dr;

    for( int i = 0; i < p_pic->i_planes; i++ )
    {
        unsigned i_align;
        switch( p_sys->i_codec_id )
        {
        case AV_CODEC_ID_SVQ1:
        case AV_CODEC_ID_VP5:
        case AV_CODEC_ID_VP6:
        case AV_CODEC_ID_VP6F:
        case AV_CODEC_ID_VP6A:
            i_align = 16;
            break;
        default:
            i_align = i == 0 ? 16 : 8;
            break;
        }
        if( p_pic->p[i].i_pitch % i_align )
            goto no_dr;
        if( (intptr_t)p_pic->p[i].p_pixels % i_align )
            goto no_dr;
    }

    if( p_context->pix_fmt == PIX_FMT_YUV422P )
    {
        if( 2 * p_pic->p[1].i_pitch != p_pic->p[0].i_pitch ||
            2 * p_pic->p[2].i_pitch != p_pic->p[0].i_pitch )
            goto no_dr;
    }

    return p_pic;

no_dr:
    if (p_pic)
        decoder_DeletePicture( p_dec, p_pic );

    return NULL;
}
/** @internal @This is called by avcodec when allocating a new frame
 * @param context current avcodec context
 * @param frame avframe handler entering avcodec black magic box
 */
static int upipe_avcdec_get_buffer(struct AVCodecContext *context, AVFrame *frame)
{
    struct upipe *upipe = context->opaque;
    struct upipe_avcdec *upipe_avcdec = upipe_avcdec_from_upipe(upipe);
    struct ubuf *ubuf_pic;
    int width_aligned, height_aligned, i;
    const struct upipe_av_plane *planes = NULL;
    size_t stride = 0;

    frame->opaque = uref_dup(upipe_avcdec->uref);

    uint64_t framenum = 0;
    uref_pic_get_number(frame->opaque, &framenum);

    upipe_dbg_va(upipe, "Allocating frame for %u (%p) - %ux%u",
                 framenum, frame->opaque, frame->width, frame->height);

    if (unlikely(!upipe_avcdec->pixfmt)) {
        upipe_avcdec->pixfmt = upipe_av_pixfmt_from_ubuf_mgr(upipe_avcdec->ubuf_mgr);
        if (unlikely(!upipe_avcdec->pixfmt)) {
            upipe_err_va(upipe, "frame format of ubuf manager not recognized");
            return 0;
        }
    }
    if (context->pix_fmt != *upipe_avcdec->pixfmt->pixfmt) {
        upipe_err_va(upipe, "frame format not compatible (%s != %s",
                                       av_get_pix_fmt_name(context->pix_fmt),
                            av_get_pix_fmt_name(*upipe_avcdec->pixfmt->pixfmt));
        return 0;
    }
    planes = upipe_avcdec->pixfmt->planes;

    /* direct rendering - allocate ubuf pic */
    if (upipe_avcdec->context->codec->capabilities & CODEC_CAP_DR1) {
        width_aligned = context->width;
        height_aligned = context->height;

        /* use avcodec width/height alignement, then resize pic */
        avcodec_align_dimensions(context, &width_aligned, &height_aligned);
        ubuf_pic = ubuf_pic_alloc(upipe_avcdec->ubuf_mgr, width_aligned, height_aligned);

        if (likely(ubuf_pic)) {
            ubuf_pic_resize(ubuf_pic, 0, 0, context->width, context->height);
            uref_attach_ubuf(frame->opaque, ubuf_pic);

            for (i=0; i < 4 && planes[i].chroma; i++) {
                ubuf_pic_plane_write(ubuf_pic, planes[i].chroma,
                        0, 0, -1, -1, &frame->data[i]);
                ubuf_pic_plane_size(ubuf_pic, planes[i].chroma, &stride,
                        NULL, NULL, NULL);
                frame->linesize[i] = stride;
            }

            frame->extended_data = frame->data;
            frame->type = FF_BUFFER_TYPE_USER;
            
            return 1; /* success */
        } else {
            upipe_dbg_va(upipe, "ubuf_pic_alloc(%d, %d) failed, fallback", width_aligned, height_aligned);
        }
    }

    /* default : DR failed or not available */
    return avcodec_default_get_buffer(context, frame);
}
Exemple #4
0
static int ffmpeg_GetFrameBuf( struct AVCodecContext *p_context,
                               AVFrame *p_ff_pic )
{
    decoder_t *p_dec = (decoder_t *)p_context->opaque;
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic;

    /* Set picture PTS */
    ffmpeg_SetFrameBufferPts( p_dec, p_ff_pic );

    /* */
    p_ff_pic->opaque = NULL;

    if( p_sys->p_va )
    {
#ifdef HAVE_AVCODEC_VA
        /* hwaccel_context is not present in old fffmpeg version */
        if( vlc_va_Setup( p_sys->p_va,
                          &p_sys->p_context->hwaccel_context, &p_dec->fmt_out.video.i_chroma,
                          p_sys->p_context->width, p_sys->p_context->height ) )
        {
            msg_Err( p_dec, "vlc_va_Setup failed" );
            return -1;
        }
#else
        assert(0);
#endif

        /* */
        p_ff_pic->type = FF_BUFFER_TYPE_USER;
        /* FIXME what is that, should give good value */
        p_ff_pic->age = 256*256*256*64; // FIXME FIXME from ffmpeg

        if( vlc_va_Get( p_sys->p_va, p_ff_pic ) )
        {
            msg_Err( p_dec, "VaGrabSurface failed" );
            return -1;
        }
        return 0;
    }
    else if( !p_sys->b_direct_rendering )
    {
        /* Not much to do in indirect rendering mode. */
        return avcodec_default_get_buffer( p_context, p_ff_pic );
    }

    /* Some codecs set pix_fmt only after the 1st frame has been decoded,
     * so we need to check for direct rendering again. */

    int i_width = p_sys->p_context->width;
    int i_height = p_sys->p_context->height;
    avcodec_align_dimensions( p_sys->p_context, &i_width, &i_height );

    if( GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) != VLC_SUCCESS ||
        p_context->pix_fmt == PIX_FMT_PAL8 )
        goto no_dr;

    p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;

    /* Get a new picture */
    p_pic = ffmpeg_NewPictBuf( p_dec, p_sys->p_context );
    if( !p_pic )
        goto no_dr;
    bool b_compatible = true;
    if( p_pic->p[0].i_pitch / p_pic->p[0].i_pixel_pitch < i_width ||
        p_pic->p[0].i_lines < i_height )
        b_compatible = false;
    for( int i = 0; i < p_pic->i_planes && b_compatible; i++ )
    {
        unsigned i_align;
        switch( p_sys->i_codec_id )
        {
        case CODEC_ID_SVQ1:
        case CODEC_ID_VP5:
        case CODEC_ID_VP6:
        case CODEC_ID_VP6F:
        case CODEC_ID_VP6A:
            i_align = 16;
            break;
        default:
            i_align = i == 0 ? 16 : 8;
            break;
        }
        if( p_pic->p[i].i_pitch % i_align )
            b_compatible = false;
        if( (intptr_t)p_pic->p[i].p_pixels % i_align )
            b_compatible = false;
    }
    if( p_context->pix_fmt == PIX_FMT_YUV422P && b_compatible )
    {
        if( 2 * p_pic->p[1].i_pitch != p_pic->p[0].i_pitch ||
            2 * p_pic->p[2].i_pitch != p_pic->p[0].i_pitch )
            b_compatible = false;
    }
    if( !b_compatible )
    {
        decoder_DeletePicture( p_dec, p_pic );
        goto no_dr;
    }

    if( p_sys->i_direct_rendering_used != 1 )
    {
        msg_Dbg( p_dec, "using direct rendering" );
        p_sys->i_direct_rendering_used = 1;
    }

    p_sys->p_context->draw_horiz_band = NULL;

    p_ff_pic->opaque = (void*)p_pic;
    p_ff_pic->type = FF_BUFFER_TYPE_USER;
    p_ff_pic->data[0] = p_pic->p[0].p_pixels;
    p_ff_pic->data[1] = p_pic->p[1].p_pixels;
    p_ff_pic->data[2] = p_pic->p[2].p_pixels;
    p_ff_pic->data[3] = NULL; /* alpha channel but I'm not sure */

    p_ff_pic->linesize[0] = p_pic->p[0].i_pitch;
    p_ff_pic->linesize[1] = p_pic->p[1].i_pitch;
    p_ff_pic->linesize[2] = p_pic->p[2].i_pitch;
    p_ff_pic->linesize[3] = 0;

    decoder_LinkPicture( p_dec, p_pic );

    /* FIXME what is that, should give good value */
    p_ff_pic->age = 256*256*256*64; // FIXME FIXME from ffmpeg

    return 0;

no_dr:
    if( p_sys->i_direct_rendering_used != 0 )
    {
        msg_Warn( p_dec, "disabling direct rendering" );
        p_sys->i_direct_rendering_used = 0;
    }
    return avcodec_default_get_buffer( p_context, p_ff_pic );
}
Exemple #5
0
static int ffmpeg_GetFrameBuf( struct AVCodecContext *p_context,
                               AVFrame *p_ff_pic )
{
    decoder_t *p_dec = (decoder_t *)p_context->opaque;
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic;

    /* Set picture PTS */
    ffmpeg_SetFrameBufferPts( p_dec, p_ff_pic );

    /* */
    p_ff_pic->opaque = NULL;

    if( p_sys->p_va )
    {
#ifdef HAVE_AVCODEC_VAAPI
        /* hwaccel_context is not present in old fffmpeg version */
        if( VaSetup( p_sys->p_va,
                     &p_sys->p_context->hwaccel_context, &p_dec->fmt_out.video.i_chroma,
                     p_sys->p_context->width, p_sys->p_context->height ) )
        {
            msg_Err( p_dec, "VaSetup failed" );
            return -1;
        }
#else
        assert(0);
#endif

        /* */
        p_ff_pic->type = FF_BUFFER_TYPE_USER;
        /* FIXME what is that, should give good value */
        p_ff_pic->age = 256*256*256*64; // FIXME FIXME from ffmpeg

        if( VaGrabSurface( p_sys->p_va, p_ff_pic ) )
        {
            msg_Err( p_dec, "VaGrabSurface failed" );
            return -1;
        }
        return 0;
    }
    else if( !p_sys->b_direct_rendering )
    {
        /* Not much to do in indirect rendering mode */
        return avcodec_default_get_buffer( p_context, p_ff_pic );
    }

    /* Some codecs set pix_fmt only after the 1st frame has been decoded,
     * so we need to check for direct rendering again. */

    int i_width = p_sys->p_context->width;
    int i_height = p_sys->p_context->height;
    avcodec_align_dimensions( p_sys->p_context, &i_width, &i_height );

    if( GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) != VLC_SUCCESS ||
        p_sys->p_context->width % 16 || p_sys->p_context->height % 16 ||
        /* We only pad picture up to 16 */
        PAD(p_sys->p_context->width,16) < i_width || PAD(p_sys->p_context->height,16) < i_height ||
        p_context->pix_fmt == PIX_FMT_PAL8 )
        return avcodec_default_get_buffer( p_context, p_ff_pic );

    p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;

    /* Get a new picture */
    p_pic = ffmpeg_NewPictBuf( p_dec, p_sys->p_context );
    if( !p_pic )
        return avcodec_default_get_buffer( p_context, p_ff_pic );

    p_sys->p_context->draw_horiz_band = NULL;

    p_ff_pic->opaque = (void*)p_pic;
    p_ff_pic->type = FF_BUFFER_TYPE_USER;
    p_ff_pic->data[0] = p_pic->p[0].p_pixels;
    p_ff_pic->data[1] = p_pic->p[1].p_pixels;
    p_ff_pic->data[2] = p_pic->p[2].p_pixels;
    p_ff_pic->data[3] = NULL; /* alpha channel but I'm not sure */

    p_ff_pic->linesize[0] = p_pic->p[0].i_pitch;
    p_ff_pic->linesize[1] = p_pic->p[1].i_pitch;
    p_ff_pic->linesize[2] = p_pic->p[2].i_pitch;
    p_ff_pic->linesize[3] = 0;

    decoder_LinkPicture( p_dec, p_pic );

    /* FIXME what is that, should give good value */
    p_ff_pic->age = 256*256*256*64; // FIXME FIXME from ffmpeg

    return 0;
}