コード例 #1
0
ファイル: StreamReceiver.cpp プロジェクト: a-roy/blobcast
void StreamReceiver::ReceiveFrame(uint8_t *data)
{
	AVPacket *pkt = av_packet_alloc();
	av_init_packet(pkt);
	if (av_read_frame(avfmt, pkt) < 0)
		return;
	int got_picture;
	if (avcodec_decode_video2(avctx, avframe, &got_picture, pkt) < 0)
		return;
	av_packet_unref(pkt);
	av_packet_free(&pkt);
	if (got_picture == 0)
		return;

	int linesize_align[AV_NUM_DATA_POINTERS];
	avcodec_align_dimensions2(
			avctx, &avframe->width, &avframe->height, linesize_align);

	uint8_t *const dstSlice[] = { data };
	int dstStride[] = { width * 4 };
	if (data != nullptr)
		sws_scale(
				swctx,
				avframe->data, avframe->linesize,
				0, STREAM_HEIGHT,
				dstSlice, dstStride);
	av_frame_unref(avframe);
}
コード例 #2
0
ファイル: video_output.c プロジェクト: eric888a/L-SMASH-Works
func_get_buffer_t *setup_video_rendering
(
    lw_video_output_handler_t *lw_vohp,
    AVCodecContext            *ctx,
    VSVideoInfo               *vi,
    int                        width,
    int                        height
)
{
    vs_video_output_handler_t *vs_vohp = (vs_video_output_handler_t *)lw_vohp->private_handler;
    vs_vohp->direct_rendering &= vs_check_dr_available( ctx, ctx->pix_fmt );
    if( vs_vohp->variable_info )
    {
        vi->format = NULL;
        vi->width  = 0;
        vi->height = 0;
    }
    else
    {
        const VSAPI *vsapi = vs_vohp->vsapi;
        vi->format = vsapi->getFormatPreset( vs_vohp->vs_output_pixel_format, vs_vohp->core );
        vi->width  = width;
        vi->height = height;
        if( vs_vohp->direct_rendering )
        {
            /* Align output width and height for direct rendering. */
            int linesize_align[AV_NUM_DATA_POINTERS];
            enum AVPixelFormat input_pixel_format = ctx->pix_fmt;
            ctx->pix_fmt = lw_vohp->scaler.output_pixel_format;
            avcodec_align_dimensions2( ctx, &vi->width, &vi->height, linesize_align );
            ctx->pix_fmt = input_pixel_format;
        }
        vs_vohp->background_frame = vsapi->newVideoFrame( vi->format, vi->width, vi->height, NULL, vs_vohp->core );
        if( !vs_vohp->background_frame )
            return NULL;
        vs_vohp->make_black_background( vs_vohp->background_frame, vsapi );
    }
    lw_vohp->output_width  = vi->width;
    lw_vohp->output_height = vi->height;
    /* Set up custom get_buffer() for direct rendering if available. */
    if( vs_vohp->direct_rendering )
    {
        ctx->get_buffer2 = vs_video_get_buffer;
        ctx->opaque      = lw_vohp;
        ctx->flags      |= CODEC_FLAG_EMU_EDGE;
    }
    return ctx->get_buffer2;
}
コード例 #3
0
ファイル: lavc.c プロジェクト: gz818/obe-rt
int obe_get_buffer( AVCodecContext *codec, AVFrame *pic )
{
    int w = codec->width;
    int h = codec->height;
    int stride[4];

    avcodec_align_dimensions2( codec, &w, &h, stride );

    /* Only EDGE_EMU codecs are used
     * Allocate an extra line so that SIMD can modify the entire stride for every active line */
    if( av_image_alloc( pic->data, pic->linesize, w, h + 1, codec->pix_fmt, 32 ) < 0 )
        return -1;

    pic->type   = FF_BUFFER_TYPE_USER;
    pic->reordered_opaque = codec->reordered_opaque;
    pic->pkt_pts = codec->pkt ? codec->pkt->pts : AV_NOPTS_VALUE;

    return 0;
}
コード例 #4
0
ファイル: video_output.c プロジェクト: myrsloik/L-SMASH-Works
static int vs_video_get_buffer
(
    AVCodecContext *ctx,
    AVFrame        *av_frame,
    int             flags
)
{
    av_frame->opaque = NULL;
    lw_video_output_handler_t *lw_vohp = (lw_video_output_handler_t *)ctx->opaque;
    vs_video_output_handler_t *vs_vohp = (vs_video_output_handler_t *)lw_vohp->private_handler;
    enum AVPixelFormat pix_fmt = av_frame->format;
    avoid_yuv_scale_conversion( &pix_fmt );
    av_frame->format = pix_fmt; /* Don't use AV_PIX_FMT_YUVJ*. */
    if( (!vs_vohp->variable_info && lw_vohp->scaler.output_pixel_format != pix_fmt)
     || !vs_check_dr_available( ctx, pix_fmt ) )
        return avcodec_default_get_buffer2( ctx, av_frame, flags );
    /* New VapourSynth video frame buffer. */
    vs_video_buffer_handler_t *vs_vbhp = malloc( sizeof(vs_video_buffer_handler_t) );
    if( !vs_vbhp )
    {
        av_frame_unref( av_frame );
        return AVERROR( ENOMEM );
    }
    av_frame->opaque = vs_vbhp;
    avcodec_align_dimensions2( ctx, &av_frame->width, &av_frame->height, av_frame->linesize );
    VSFrameRef *vs_frame_buffer = new_output_video_frame( vs_vohp, av_frame, NULL, NULL, 0,
                                                          vs_vohp->frame_ctx, vs_vohp->core, vs_vohp->vsapi );
    if( !vs_frame_buffer )
    {
        free( vs_vbhp );
        av_frame_unref( av_frame );
        return AVERROR( ENOMEM );
    }
    vs_vbhp->vs_frame_buffer = vs_frame_buffer;
    vs_vbhp->vsapi           = vs_vohp->vsapi;
    /* Create frame buffers for the decoder.
     * The callback vs_video_release_buffer_handler() shall be called when no reference to the video buffer handler is present.
     * The callback vs_video_unref_buffer_handler() decrements the reference-counter by 1. */
    memset( av_frame->buf,      0, sizeof(av_frame->buf) );
    memset( av_frame->data,     0, sizeof(av_frame->data) );
    memset( av_frame->linesize, 0, sizeof(av_frame->linesize) );
    AVBufferRef *vs_buffer_handler = av_buffer_create( NULL, 0, vs_video_release_buffer_handler, vs_vbhp, 0 );
    if( !vs_buffer_handler )
    {
        vs_video_release_buffer_handler( vs_vbhp, NULL );
        av_frame_unref( av_frame );
        return AVERROR( ENOMEM );
    }
    vs_vohp->component_reorder = get_component_reorder( pix_fmt );
    for( int i = 0; i < 3; i++ )
        if( vs_create_plane_buffer( vs_vbhp, vs_buffer_handler, av_frame, i, vs_vohp->component_reorder[i] ) < 0 )
            goto fail;
    /* Here, a variable 'vs_buffer_handler' itself is not referenced by any pointer. */
    av_buffer_unref( &vs_buffer_handler );
    av_frame->nb_extended_buf = 0;
    av_frame->extended_data   = av_frame->data;
    return 0;
fail:
    av_frame_unref( av_frame );
    av_buffer_unref( &vs_buffer_handler );
    return AVERROR( ENOMEM );
}
コード例 #5
0
ファイル: video.c プロジェクト: 839687571/vlc-2.2.1.32-2013
static picture_t *lavc_dr_GetFrame(struct AVCodecContext *ctx,
                                   AVFrame *frame, int flags)
{
    decoder_t *dec = (decoder_t *)ctx->opaque;
    decoder_sys_t *sys = dec->p_sys;

    if (GetVlcChroma(&dec->fmt_out.video, ctx->pix_fmt) != VLC_SUCCESS)
        return NULL;
    dec->fmt_out.i_codec = dec->fmt_out.video.i_chroma;
    if (ctx->pix_fmt == PIX_FMT_PAL8)
        return NULL;

    int width = frame->width;
    int height = frame->height;
    int aligns[AV_NUM_DATA_POINTERS];

    avcodec_align_dimensions2(ctx, &width, &height, aligns);

    picture_t *pic = ffmpeg_NewPictBuf(dec, ctx);
    if (pic == NULL)
        return NULL;

    /* Check that the picture is suitable for libavcodec */
    if (pic->p[0].i_pitch < width * pic->p[0].i_pixel_pitch)
    {
        if (sys->i_direct_rendering_used != 0)
            msg_Dbg(dec, "plane 0: pitch too small (%d/%d*%d)",
                    pic->p[0].i_pitch, width, pic->p[0].i_pixel_pitch);
        goto no_dr;
    }

    if (pic->p[0].i_lines < height)
    {
        if (sys->i_direct_rendering_used != 0)
            msg_Dbg(dec, "plane 0: lines too few (%d/%d)",
                    pic->p[0].i_lines, height);
        goto no_dr;
    }

    for (int i = 0; i < pic->i_planes; i++)
    {
        if (pic->p[i].i_pitch % aligns[i])
        {
            if (sys->i_direct_rendering_used != 0)
                msg_Dbg(dec, "plane %d: pitch not aligned (%d%%%d)",
                        i, pic->p[i].i_pitch, aligns[i]);
            goto no_dr;
        }
        if (((uintptr_t)pic->p[i].p_pixels) % aligns[i])
        {
            if (sys->i_direct_rendering_used != 0)
                msg_Warn(dec, "plane %d not aligned", i);
            goto no_dr;
        }
    }

    /* Allocate buffer references */
    for (int i = 0; i < pic->i_planes; i++)
    {
        lavc_pic_ref_t *ref = malloc(sizeof (*ref));
        if (ref == NULL)
            goto error;
        ref->decoder = dec;
        ref->picture = pic;
        decoder_LinkPicture(dec, pic);

        uint8_t *data = pic->p[i].p_pixels;
        int size = pic->p[i].i_pitch * pic->p[i].i_lines;

        frame->buf[i] = av_buffer_create(data, size, lavc_dr_ReleaseFrame,
                                         ref, 0);
        if (unlikely(frame->buf[i] == NULL))
        {
            lavc_dr_ReleaseFrame(ref, data);
            goto error;
        }
    }
    decoder_UnlinkPicture(dec, pic);
    (void) flags;
    return pic;
error:
    for (unsigned i = 0; frame->buf[i] != NULL; i++)
        av_buffer_unref(&frame->buf[i]);
no_dr:
    decoder_DeletePicture(dec, pic);
    return NULL;
}
コード例 #6
0
ファイル: video.c プロジェクト: 839687571/vlc-2.2.1.32-2013
/* Returns a new picture buffer */
static inline picture_t *ffmpeg_NewPictBuf( decoder_t *p_dec,
                                            AVCodecContext *p_context )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    int width = p_context->coded_width;
    int height = p_context->coded_height;

    if( p_sys->p_va == NULL )
    {
        int aligns[AV_NUM_DATA_POINTERS];

        avcodec_align_dimensions2(p_context, &width, &height, aligns);
    }


    if( width == 0 || height == 0 || width > 8192 || height > 8192 )
    {
        msg_Err( p_dec, "Invalid frame size %dx%d.", width, height );
        return NULL; /* invalid display size */
    }
    p_dec->fmt_out.video.i_width = width;
    p_dec->fmt_out.video.i_height = height;

    if( width != p_context->width || height != p_context->height )
    {
        p_dec->fmt_out.video.i_visible_width = p_context->width;
        p_dec->fmt_out.video.i_visible_height = p_context->height;
    }
    else
    {
        p_dec->fmt_out.video.i_visible_width = width;
        p_dec->fmt_out.video.i_visible_height = height;
    }

    if( !p_sys->p_va && GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) )
    {
        /* we are doomed, but not really, because most codecs set their pix_fmt
         * much later
         * FIXME does it make sense here ? */
        p_dec->fmt_out.video.i_chroma = VLC_CODEC_I420;
    }
    p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;

    /* If an aspect-ratio was specified in the input format then force it */
    if( p_dec->fmt_in.video.i_sar_num > 0 && p_dec->fmt_in.video.i_sar_den > 0 )
    {
        p_dec->fmt_out.video.i_sar_num = p_dec->fmt_in.video.i_sar_num;
        p_dec->fmt_out.video.i_sar_den = p_dec->fmt_in.video.i_sar_den;
    }
    else
    {
        p_dec->fmt_out.video.i_sar_num = p_context->sample_aspect_ratio.num;
        p_dec->fmt_out.video.i_sar_den = p_context->sample_aspect_ratio.den;

        if( !p_dec->fmt_out.video.i_sar_num || !p_dec->fmt_out.video.i_sar_den )
        {
            p_dec->fmt_out.video.i_sar_num = 1;
            p_dec->fmt_out.video.i_sar_den = 1;
        }
    }

    if( p_dec->fmt_in.video.i_frame_rate > 0 &&
        p_dec->fmt_in.video.i_frame_rate_base > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate =
            p_dec->fmt_in.video.i_frame_rate;
        p_dec->fmt_out.video.i_frame_rate_base =
            p_dec->fmt_in.video.i_frame_rate_base;
    }
    else if( p_context->time_base.num > 0 && p_context->time_base.den > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate = p_context->time_base.den;
        p_dec->fmt_out.video.i_frame_rate_base = p_context->time_base.num * __MAX( p_context->ticks_per_frame, 1 );
    }

    return decoder_NewPicture( p_dec );
}
コード例 #7
0
ファイル: video.c プロジェクト: bobwxb/vlc
static int lavc_dr_GetFrame(struct AVCodecContext *ctx, AVFrame *frame,
                            picture_t *pic)
{
    decoder_t *dec = (decoder_t *)ctx->opaque;
    decoder_sys_t *sys = dec->p_sys;

    if (ctx->pix_fmt == PIX_FMT_PAL8)
        goto error;

    int width = frame->width;
    int height = frame->height;
    int aligns[AV_NUM_DATA_POINTERS];

    avcodec_align_dimensions2(ctx, &width, &height, aligns);

    /* Check that the picture is suitable for libavcodec */
    assert(pic->p[0].i_pitch >= width * pic->p[0].i_pixel_pitch);
    assert(pic->p[0].i_lines >= height);

    for (int i = 0; i < pic->i_planes; i++)
    {
        if (pic->p[i].i_pitch % aligns[i])
        {
            if (!atomic_exchange(&sys->b_dr_failure, true))
                msg_Warn(dec, "plane %d: pitch not aligned (%d%%%d): %s",
                         i, pic->p[i].i_pitch, aligns[i],
                         "disabling direct rendering");
            goto error;
        }
        if (((uintptr_t)pic->p[i].p_pixels) % aligns[i])
        {
            if (!atomic_exchange(&sys->b_dr_failure, true))
                msg_Warn(dec, "plane %d not aligned: %s", i,
                         "disabling direct rendering");
            goto error;
        }
    }

    /* Allocate buffer references and initialize planes */
    assert(pic->i_planes < PICTURE_PLANE_MAX);
    static_assert(PICTURE_PLANE_MAX <= AV_NUM_DATA_POINTERS, "Oops!");

    for (int i = 0; i < pic->i_planes; i++)
    {
        uint8_t *data = pic->p[i].p_pixels;
        int size = pic->p[i].i_pitch * pic->p[i].i_lines;

        frame->data[i] = data;
        frame->linesize[i] = pic->p[i].i_pitch;
        frame->buf[i] = av_buffer_create(data, size, lavc_ReleaseFrame,
                                         pic, 0);
        if (unlikely(frame->buf[i] == NULL))
        {
            while (i > 0)
                av_buffer_unref(&frame->buf[--i]);
            goto error;
        }
        picture_Hold(pic);
    }

    frame->opaque = pic;
    /* The loop above held one reference to the picture for each plane. */
    picture_Release(pic);
    return 0;
error:
    picture_Release(pic);
    return -1;
}
コード例 #8
0
ファイル: video.c プロジェクト: bobwxb/vlc
/**
 * Sets the decoder output format.
 */
static int lavc_UpdateVideoFormat( decoder_t *p_dec, AVCodecContext *p_context,
                                   bool hwaccel )
{
    int width = p_context->coded_width;
    int height = p_context->coded_height;

    if( !hwaccel )
    {
        int aligns[AV_NUM_DATA_POINTERS];

        if (GetVlcChroma(&p_dec->fmt_out.video, p_context->pix_fmt))
            return -1;

        avcodec_align_dimensions2(p_context, &width, &height, aligns);
    }
    p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;

    if( width == 0 || height == 0 || width > 8192 || height > 8192 )
    {
        msg_Err( p_dec, "Invalid frame size %dx%d.", width, height );
        return -1; /* invalid display size */
    }
    p_dec->fmt_out.video.i_width = width;
    p_dec->fmt_out.video.i_height = height;

    if( width != p_context->width || height != p_context->height )
    {
        p_dec->fmt_out.video.i_visible_width = p_context->width;
        p_dec->fmt_out.video.i_visible_height = p_context->height;
    }
    else
    {
        p_dec->fmt_out.video.i_visible_width = width;
        p_dec->fmt_out.video.i_visible_height = height;
    }

    /* If an aspect-ratio was specified in the input format then force it */
    if( p_dec->fmt_in.video.i_sar_num > 0 && p_dec->fmt_in.video.i_sar_den > 0 )
    {
        p_dec->fmt_out.video.i_sar_num = p_dec->fmt_in.video.i_sar_num;
        p_dec->fmt_out.video.i_sar_den = p_dec->fmt_in.video.i_sar_den;
    }
    else
    {
        p_dec->fmt_out.video.i_sar_num = p_context->sample_aspect_ratio.num;
        p_dec->fmt_out.video.i_sar_den = p_context->sample_aspect_ratio.den;

        if( !p_dec->fmt_out.video.i_sar_num || !p_dec->fmt_out.video.i_sar_den )
        {
            p_dec->fmt_out.video.i_sar_num = 1;
            p_dec->fmt_out.video.i_sar_den = 1;
        }
    }

    if( p_dec->fmt_in.video.i_frame_rate > 0 &&
        p_dec->fmt_in.video.i_frame_rate_base > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate =
            p_dec->fmt_in.video.i_frame_rate;
        p_dec->fmt_out.video.i_frame_rate_base =
            p_dec->fmt_in.video.i_frame_rate_base;
    }
    else if( p_context->time_base.num > 0 && p_context->time_base.den > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate = p_context->time_base.den;
        p_dec->fmt_out.video.i_frame_rate_base = p_context->time_base.num * __MAX( p_context->ticks_per_frame, 1 );
    }
    return decoder_UpdateVideoFormat( p_dec );
}
コード例 #9
0
ファイル: decode.c プロジェクト: Rodeo314/tim-libav
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
{
    FramePool *pool = avctx->internal->pool;
    int i, ret;

    switch (avctx->codec_type) {
    case AVMEDIA_TYPE_VIDEO: {
        uint8_t *data[4];
        int linesize[4];
        int size[4] = { 0 };
        int w = frame->width;
        int h = frame->height;
        int tmpsize, unaligned;

        if (pool->format == frame->format &&
            pool->width == frame->width && pool->height == frame->height)
            return 0;

        avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);

        do {
            // NOTE: do not align linesizes individually, this breaks e.g. assumptions
            // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
            av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
            // increase alignment of w for next try (rhs gives the lowest bit set in w)
            w += w & ~(w - 1);

            unaligned = 0;
            for (i = 0; i < 4; i++)
                unaligned |= linesize[i] % pool->stride_align[i];
        } while (unaligned);

        tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
                                         NULL, linesize);
        if (tmpsize < 0)
            return -1;

        for (i = 0; i < 3 && data[i + 1]; i++)
            size[i] = data[i + 1] - data[i];
        size[i] = tmpsize - (data[i] - data[0]);

        for (i = 0; i < 4; i++) {
            av_buffer_pool_uninit(&pool->pools[i]);
            pool->linesize[i] = linesize[i];
            if (size[i]) {
                pool->pools[i] = av_buffer_pool_init(size[i] + 16, NULL);
                if (!pool->pools[i]) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
            }
        }
        pool->format = frame->format;
        pool->width  = frame->width;
        pool->height = frame->height;

        break;
        }
    case AVMEDIA_TYPE_AUDIO: {
        int ch     = av_get_channel_layout_nb_channels(frame->channel_layout);
        int planar = av_sample_fmt_is_planar(frame->format);
        int planes = planar ? ch : 1;

        if (pool->format == frame->format && pool->planes == planes &&
            pool->channels == ch && frame->nb_samples == pool->samples)
            return 0;

        av_buffer_pool_uninit(&pool->pools[0]);
        ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
                                         frame->nb_samples, frame->format, 0);
        if (ret < 0)
            goto fail;

        pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
        if (!pool->pools[0]) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        pool->format     = frame->format;
        pool->planes     = planes;
        pool->channels   = ch;
        pool->samples = frame->nb_samples;
        break;
        }
    default: av_assert0(0);
    }
    return 0;
fail:
    for (i = 0; i < 4; i++)
        av_buffer_pool_uninit(&pool->pools[i]);
    pool->format = -1;
    pool->planes = pool->channels = pool->samples = 0;
    pool->width  = pool->height = 0;
    return ret;
}
コード例 #10
0
static int as_video_get_buffer
(
    AVCodecContext *ctx,
    AVFrame        *av_frame,
    int             flags
)
{
    lw_video_output_handler_t *lw_vohp = (lw_video_output_handler_t *)ctx->opaque;
    as_video_output_handler_t *as_vohp = (as_video_output_handler_t *)lw_vohp->private_handler;
    lw_video_scaler_handler_t *vshp    = &lw_vohp->scaler;
    enum AVPixelFormat pix_fmt = ctx->pix_fmt;
    avoid_yuv_scale_conversion( &pix_fmt );
    av_frame->format = pix_fmt; /* Don't use AV_PIX_FMT_YUVJ*. */
    if( vshp->output_pixel_format != pix_fmt
     || !as_check_dr_available( ctx, pix_fmt, as_vohp->stacked_format ) )
        return avcodec_default_get_buffer2( ctx, av_frame, 0 );
    /* New AviSynth video frame buffer. */
    as_video_buffer_handler_t *as_vbhp = new as_video_buffer_handler_t;
    if( !as_vbhp )
    {
        av_frame_unref( av_frame );
        return AVERROR( ENOMEM );
    }
    av_frame->opaque = as_vbhp;
    as_vbhp->as_frame_buffer = as_vohp->env->NewVideoFrame( *as_vohp->vi, 32 );
    int aligned_width  = ctx->width << (as_vohp->bitdepth_minus_8 ? 1 : 0);
    int aligned_height = ctx->height;
    avcodec_align_dimensions2( ctx, &aligned_width, &aligned_height, av_frame->linesize );
    if( lw_vohp->output_width != aligned_width || lw_vohp->output_height != aligned_height )
        as_vohp->make_black_background( as_vbhp->as_frame_buffer, as_vohp->bitdepth_minus_8 );
    /* Create frame buffers for the decoder.
     * The callback as_video_release_buffer_handler() shall be called when no reference to the video buffer handler is present.
     * The callback as_video_unref_buffer_handler() decrements the reference-counter by 1. */
    memset( av_frame->buf,      0, sizeof(av_frame->buf) );
    memset( av_frame->data,     0, sizeof(av_frame->data) );
    memset( av_frame->linesize, 0, sizeof(av_frame->linesize) );
    AVBufferRef *as_buffer_handler = av_buffer_create( NULL, 0, as_video_release_buffer_handler, as_vbhp, 0 );
    if( !as_buffer_handler )
    {
        delete as_vbhp;
        av_frame_unref( av_frame );
        return AVERROR( ENOMEM );
    }
#define CREATE_PLANE_BUFFER( PLANE, PLANE_ID )                                                      \
    do                                                                                              \
    {                                                                                               \
        AVBufferRef *as_buffer_ref = av_buffer_ref( as_buffer_handler );                            \
        if( !as_buffer_ref )                                                                        \
        {                                                                                           \
            av_buffer_unref( &as_buffer_handler );                                                  \
            goto fail;                                                                              \
        }                                                                                           \
        av_frame->linesize[PLANE] = as_vbhp->as_frame_buffer->GetPitch( PLANE_ID );                 \
        int as_plane_size = as_vbhp->as_frame_buffer->GetHeight( PLANE_ID )                         \
                          * av_frame->linesize[PLANE];                                              \
        av_frame->buf[PLANE] = av_buffer_create( as_vbhp->as_frame_buffer->GetWritePtr( PLANE_ID ), \
                                                 as_plane_size,                                     \
                                                 as_video_unref_buffer_handler,                     \
                                                 as_buffer_ref,                                     \
                                                 0 );                                               \
        if( !av_frame->buf[PLANE] )                                                                 \
            goto fail;                                                                              \
        av_frame->data[PLANE] = av_frame->buf[PLANE]->data;                                         \
    } while( 0 )
    if( as_vohp->vi->pixel_type & VideoInfo::CS_INTERLEAVED )
        CREATE_PLANE_BUFFER( 0, );
    else
        for( int i = 0; i < 3; i++ )
コード例 #11
0
int
FFmpegH264Decoder<LIBAV_VER>::AllocateYUV420PVideoBuffer(
  AVCodecContext* aCodecContext, AVFrame* aFrame)
{
  bool needAlign = aCodecContext->codec->capabilities & CODEC_CAP_DR1;
  int edgeWidth =  needAlign ? avcodec_get_edge_width() : 0;
  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  // Make sure the decodeWidth is a multiple of 32, so a UV plane stride will be
  // a multiple of 16. FFmpeg uses SSE2 accelerated code to copy a frame line by
  // line.
  decodeWidth = (decodeWidth + 31) & ~31;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  if (needAlign) {
    // Align width and height to account for CODEC_FLAG_EMU_EDGE.
    int stride_align[AV_NUM_DATA_POINTERS];
    avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
                              stride_align);
  }

  // Get strides for each plane.
  av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
                          decodeWidth);

  // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
  // need.
  // Note that we're passing |nullptr| here as the base address as we haven't
  // allocated our image yet. We will adjust |aFrame->data| below.
  size_t allocSize =
    av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
                           nullptr /* base address */, aFrame->linesize);

  nsRefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = static_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize + 64);
  // FFmpeg requires a 16/32 bytes-aligned buffer, align it on 64 to be safe
  buffer = reinterpret_cast<uint8_t*>((reinterpret_cast<uintptr_t>(buffer) + 63) & ~63);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  // Now that we've allocated our image, we can add its address to the offsets
  // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
  // pixels of padding here.
  for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    // The C planes are half the resolution of the Y plane, so we need to halve
    // the edge width here.
    uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);

    // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
    // top of the frame, plus |edgeWidth| pixels from the left of the frame.
    aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
      buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
  }

  // Unused, but needs to be non-zero to keep ffmpeg happy.
  aFrame->type = GECKO_FRAME_TYPE;

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  aFrame->opaque = static_cast<void*>(image.forget().take());

  return 0;
}
コード例 #12
0
int
FFmpegH264Decoder::AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
                                              AVFrame* aFrame)
{
  // Older versions of ffmpeg require that edges be allocated* around* the
  // actual image.
  int edgeWidth = avcodec_get_edge_width();
  int decodeWidth = aCodecContext->width + edgeWidth * 2;
  int decodeHeight = aCodecContext->height + edgeWidth * 2;

  // Align width and height to possibly speed up decode.
  int stride_align[AV_NUM_DATA_POINTERS];
  avcodec_align_dimensions2(aCodecContext, &decodeWidth, &decodeHeight,
                            stride_align);

  // Get strides for each plane.
  av_image_fill_linesizes(aFrame->linesize, aCodecContext->pix_fmt,
                          decodeWidth);

  // Let FFmpeg set up its YUV plane pointers and tell us how much memory we
  // need.
  // Note that we're passing |nullptr| here as the base address as we haven't
  // allocated our image yet. We will adjust |aFrame->data| below.
  size_t allocSize =
    av_image_fill_pointers(aFrame->data, aCodecContext->pix_fmt, decodeHeight,
                           nullptr /* base address */, aFrame->linesize);

  nsRefPtr<Image> image =
    mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
  PlanarYCbCrImage* ycbcr = reinterpret_cast<PlanarYCbCrImage*>(image.get());
  uint8_t* buffer = ycbcr->AllocateAndGetNewBuffer(allocSize);

  if (!buffer) {
    NS_WARNING("Failed to allocate buffer for FFmpeg video decoding");
    return -1;
  }

  // Now that we've allocated our image, we can add its address to the offsets
  // set by |av_image_fill_pointers| above. We also have to add |edgeWidth|
  // pixels of padding here.
  for (uint32_t i = 0; i < AV_NUM_DATA_POINTERS; i++) {
    // The C planes are half the resolution of the Y plane, so we need to halve
    // the edge width here.
    uint32_t planeEdgeWidth = edgeWidth / (i ? 2 : 1);

    // Add buffer offset, plus a horizontal bar |edgeWidth| pixels high at the
    // top of the frame, plus |edgeWidth| pixels from the left of the frame.
    aFrame->data[i] += reinterpret_cast<ptrdiff_t>(
      buffer + planeEdgeWidth * aFrame->linesize[i] + planeEdgeWidth);
  }

  // Unused, but needs to be non-zero to keep ffmpeg happy.
  aFrame->type = GECKO_FRAME_TYPE;

  aFrame->extended_data = aFrame->data;
  aFrame->width = aCodecContext->width;
  aFrame->height = aCodecContext->height;

  mozilla::layers::PlanarYCbCrData data;
  PlanarYCbCrDataFromAVFrame(data, aFrame);
  ycbcr->SetDataNoCopy(data);

  mCurrentImage.swap(image);

  return 0;
}