예제 #1
0
파일: subpicture.c 프로젝트: CityFire/vlc
subpicture_region_t *subpicture_region_New( const video_format_t *p_fmt )
{
    subpicture_region_t *p_region = calloc( 1, sizeof(*p_region ) );
    if( !p_region )
        return NULL;

    p_region->fmt = *p_fmt;
    p_region->fmt.p_palette = NULL;
    if( p_fmt->i_chroma == VLC_CODEC_YUVP )
    {
        p_region->fmt.p_palette = calloc( 1, sizeof(*p_region->fmt.p_palette) );
        if( p_fmt->p_palette )
            *p_region->fmt.p_palette = *p_fmt->p_palette;
    }
    p_region->i_alpha = 0xff;

    if( p_fmt->i_chroma == VLC_CODEC_TEXT )
        return p_region;

    p_region->p_picture = picture_NewFromFormat( p_fmt );
    if( !p_region->p_picture )
    {
        free( p_region->fmt.p_palette );
        free( p_region );
        return NULL;
    }

    return p_region;
}
예제 #2
0
static picture_t *video_new_buffer( vlc_object_t *p_this,
                                    decoder_owner_sys_t *p_sys,
                                    es_format_t *fmt_out )
{
    VLC_UNUSED(p_this);
    if( fmt_out->video.i_width != p_sys->video.i_width ||
        fmt_out->video.i_height != p_sys->video.i_height ||
        fmt_out->video.i_chroma != p_sys->video.i_chroma ||
        (int64_t)fmt_out->video.i_sar_num * p_sys->video.i_sar_den !=
        (int64_t)fmt_out->video.i_sar_den * p_sys->video.i_sar_num )
    {
        vlc_ureduce( &fmt_out->video.i_sar_num,
                     &fmt_out->video.i_sar_den,
                     fmt_out->video.i_sar_num,
                     fmt_out->video.i_sar_den, 0 );

        if( !fmt_out->video.i_visible_width ||
            !fmt_out->video.i_visible_height )
        {
            fmt_out->video.i_visible_width = fmt_out->video.i_width;
            fmt_out->video.i_visible_height = fmt_out->video.i_height;
        }

        fmt_out->video.i_chroma = fmt_out->i_codec;
        p_sys->video = fmt_out->video;
    }

    /* */
    fmt_out->video.i_chroma = fmt_out->i_codec;

    return picture_NewFromFormat( &fmt_out->video );
}
예제 #3
0
파일: x11.c 프로젝트: maniacs-m/vlc
/**
 * Return a direct buffer
 */
static picture_pool_t *Pool (vout_display_t *vd, unsigned requested_count)
{
    vout_display_sys_t *sys = vd->sys;
    (void)requested_count;

    if (sys->pool)
        return sys->pool;

    vout_display_place_t place;

    vout_display_PlacePicture (&place, &vd->source, vd->cfg, false);

    /* */
    const uint32_t values[] = { place.x, place.y, place.width, place.height };
    xcb_configure_window (sys->conn, sys->window,
                          XCB_CONFIG_WINDOW_X | XCB_CONFIG_WINDOW_Y |
                          XCB_CONFIG_WINDOW_WIDTH | XCB_CONFIG_WINDOW_HEIGHT,
                          values);

    picture_t *pic = picture_NewFromFormat (&vd->fmt);
    if (!pic)
        return NULL;

    assert (pic->i_planes == 1);

    picture_resource_t res = {
        .p = {
            [0] = {
                .i_lines = pic->p->i_lines,
                .i_pitch = pic->p->i_pitch,
            },
        },
    };
    picture_Release (pic);

    unsigned count;
    picture_t *pic_array[MAX_PICTURES];
    const size_t size = res.p->i_pitch * res.p->i_lines;
    for (count = 0; count < MAX_PICTURES; count++)
    {
        xcb_shm_seg_t seg = (sys->seg_base != 0) ? (sys->seg_base + count) : 0;

        if (XCB_picture_Alloc (vd, &res, size, sys->conn, seg))
            break;
        pic_array[count] = XCB_picture_NewFromResource (&vd->fmt, &res,
                                                        sys->conn);
        if (unlikely(pic_array[count] == NULL))
            break;
    }
    xcb_flush (sys->conn);

    if (count == 0)
        return NULL;

    sys->pool = picture_pool_New (count, pic_array);
    if (unlikely(sys->pool == NULL))
        while (count > 0)
            picture_Release(pic_array[--count]);
    return sys->pool;
}
예제 #4
0
/*****************************************************************************
 * Create
 *****************************************************************************/
static int Create( vlc_object_t *p_this )
{
    filter_t *p_filter = (filter_t *)p_this;

    /* Allocate structure */
    p_filter->p_sys = malloc( sizeof( filter_sys_t ) );
    if( p_filter->p_sys == NULL )
        return VLC_ENOMEM;

    p_filter->p_sys->p_tmp = picture_NewFromFormat( &p_filter->fmt_in.video );
    if( !p_filter->p_sys->p_tmp )
    {
        free( p_filter->p_sys );
        return VLC_ENOMEM;
    }
    p_filter->p_sys->b_first = true;

    p_filter->pf_video_filter = Filter;

    config_ChainParse( p_filter, FILTER_PREFIX, ppsz_filter_options,
                       p_filter->p_cfg );

    p_filter->p_sys->i_factor =
        var_CreateGetIntegerCommand( p_filter, FILTER_PREFIX "factor" );
    vlc_spin_init( &p_filter->p_sys->lock );
    var_AddCallback( p_filter, FILTER_PREFIX "factor",
                     MotionBlurCallback, p_filter->p_sys );


    return VLC_SUCCESS;
}
subpicture_region_t *subpicture_region_New( const video_format_t *p_fmt )
{
    subpicture_region_t *p_region = (subpicture_region_t *)calloc( 1, sizeof(*p_region ) );			// sunqueen modify
    if( !p_region )
        return NULL;

    p_region->fmt = *p_fmt;
    p_region->fmt.p_palette = NULL;
    if( p_fmt->i_chroma == VLC_CODEC_YUVP )
    {
        p_region->fmt.p_palette = (video_palette_t *)calloc( 1, sizeof(*p_region->fmt.p_palette) );			// sunqueen modify
        if( p_fmt->p_palette )
            *p_region->fmt.p_palette = *p_fmt->p_palette;
    }
    p_region->i_alpha = 0xff;
    p_region->p_next = NULL;
    p_region->p_private = NULL;
    p_region->psz_text = NULL;
    p_region->p_style = NULL;
    p_region->p_picture = NULL;

    if( p_fmt->i_chroma == VLC_CODEC_TEXT )
        return p_region;

    p_region->p_picture = picture_NewFromFormat( p_fmt );
    if( !p_region->p_picture )
    {
        free( p_region->fmt.p_palette );
        free( p_region );
        return NULL;
    }

    return p_region;
}
예제 #6
0
picture_pool_t *picture_pool_NewFromFormat(const video_format_t *fmt, int picture_count)
{
//    picture_t *picture[picture_count];
	picture_t **picture = (picture_t **)calloc(picture_count, sizeof(picture_t *));			// sunqueen modify

    for (int i = 0; i < picture_count; i++) {
        picture[i] = picture_NewFromFormat(fmt);
        if (!picture[i])
            goto error;
    }
    picture_pool_t *pool = picture_pool_New(picture_count, picture);
    if (!pool)
        goto error;

	free(picture);			// sunqueen add
    return pool;

error:
    for (int i = 0; i < picture_count; i++) {
        if (!picture[i])
            break;
        picture_Release(picture[i]);
    }
	free(picture);			// sunqueen add
    return NULL;
}
예제 #7
0
파일: motionblur.c 프로젝트: 0xheart0/vlc
/*****************************************************************************
 * Create
 *****************************************************************************/
static int Create( vlc_object_t *p_this )
{
    filter_t *p_filter = (filter_t *)p_this;

    const vlc_chroma_description_t *p_chroma =
        vlc_fourcc_GetChromaDescription( p_filter->fmt_in.video.i_chroma );
    if( p_chroma == NULL || p_chroma->plane_count == 0 )
        return VLC_EGENERIC;

    /* Allocate structure */
    p_filter->p_sys = malloc( sizeof( filter_sys_t ) );
    if( p_filter->p_sys == NULL )
        return VLC_ENOMEM;

    p_filter->p_sys->p_tmp = picture_NewFromFormat( &p_filter->fmt_in.video );
    if( !p_filter->p_sys->p_tmp )
    {
        free( p_filter->p_sys );
        return VLC_ENOMEM;
    }
    p_filter->p_sys->b_first = true;

    p_filter->pf_video_filter = Filter;

    config_ChainParse( p_filter, FILTER_PREFIX, ppsz_filter_options,
                       p_filter->p_cfg );

    atomic_init( &p_filter->p_sys->i_factor,
             var_CreateGetIntegerCommand( p_filter, FILTER_PREFIX "factor" ) );
    var_AddCallback( p_filter, FILTER_PREFIX "factor",
                     MotionBlurCallback, p_filter->p_sys );


    return VLC_SUCCESS;
}
예제 #8
0
static picture_t *Deinterlace(filter_t *filter, picture_t *src)
{
    filter_sys_t *sys = filter->p_sys;
    mtime_t last_pts = sys->last_pts;

    sys->last_pts = src->date;

    vlc_vdp_video_field_t *f1 = src->context;
    if (unlikely(f1 == NULL))
        return src;
    if (f1->structure != VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME)
        return src; /* cannot deinterlace twice */

#ifdef VOUT_CORE_GETS_A_CLUE
    picture_t *dst = filter_NewPicture(filter);
#else
    picture_t *dst = picture_NewFromFormat(&src->format);
#endif
    if (dst == NULL)
        return src; /* cannot deinterlace without copying fields */

    vlc_vdp_video_field_t *f2 = vlc_vdp_video_copy(f1); // shallow copy
    if (unlikely(f2 == NULL))
    {
        picture_Release(dst);
        return src;
    }

    picture_CopyProperties(dst, src);
    dst->context = f2;

    if (last_pts != VLC_TS_INVALID)
        dst->date = (3 * src->date - last_pts) / 2;
    else
    if (filter->fmt_in.video.i_frame_rate != 0)
        dst->date = src->date + ((filter->fmt_in.video.i_frame_rate_base
                            * CLOCK_FREQ) / filter->fmt_in.video.i_frame_rate);
    dst->b_top_field_first = !src->b_top_field_first;
    dst->i_nb_fields = 1;
    src->i_nb_fields = 1;

    assert(src->p_next == NULL);
    src->p_next = dst;

    if (src->b_progressive || src->b_top_field_first)
    {
        f1->structure = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD;
        f2->structure = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD;
    }
    else
    {
        f1->structure = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_BOTTOM_FIELD;
        f2->structure = VDP_VIDEO_MIXER_PICTURE_STRUCTURE_TOP_FIELD;
    }

    src->b_progressive = true;
    dst->b_progressive = true;
    return src;
}
예제 #9
0
/* Internal video allocator functions */
static picture_t *VideoBufferNew( filter_t *p_filter )
{
    const video_format_t *p_fmt = &p_filter->fmt_out.video;

    picture_t *p_picture = picture_NewFromFormat( p_fmt );
    if( !p_picture )
        msg_Err( p_filter, "Failed to allocate picture" );
    return p_picture;
}
예제 #10
0
파일: x11.c 프로젝트: paa/vlc
/**
 * Return a direct buffer
 */
static picture_pool_t *Pool (vout_display_t *vd, unsigned requested_count)
{
    vout_display_sys_t *p_sys = vd->sys;
    (void)requested_count;

    if (!p_sys->pool)
    {
        vout_display_place_t place;

        vout_display_PlacePicture (&place, &vd->source, vd->cfg, false);

        /* */
        const uint32_t values[] = { place.x, place.y, place.width, place.height };
        xcb_configure_window (p_sys->conn, p_sys->window,
                              XCB_CONFIG_WINDOW_X | XCB_CONFIG_WINDOW_Y |
                              XCB_CONFIG_WINDOW_WIDTH | XCB_CONFIG_WINDOW_HEIGHT,
                              values);

        picture_t *pic = picture_NewFromFormat (&vd->fmt);
        if (!pic)
            return NULL;

        assert (pic->i_planes == 1);
        memset (p_sys->resource, 0, sizeof(p_sys->resource));

        unsigned count;
        picture_t *pic_array[MAX_PICTURES];
        for (count = 0; count < MAX_PICTURES; count++)
        {
            picture_resource_t *res = &p_sys->resource[count];

            res->p->i_lines = pic->p->i_lines;
            res->p->i_pitch = pic->p->i_pitch;
            if (PictureResourceAlloc (vd, res, res->p->i_pitch * res->p->i_lines,
                                      p_sys->conn, p_sys->shm))
                break;
            pic_array[count] = picture_NewFromResource (&vd->fmt, res);
            if (!pic_array[count])
            {
                PictureResourceFree (res, p_sys->conn);
                memset (res, 0, sizeof(*res));
                break;
            }
        }
        picture_Release (pic);

        if (count == 0)
            return NULL;

        p_sys->pool = picture_pool_New (count, pic_array);
        /* TODO release picture resources if NULL */
        xcb_flush (p_sys->conn);
    }

    return p_sys->pool;
}
예제 #11
0
inline static picture_t *video_new_buffer_filter( filter_t *p_filter )
{
    if( video_update_format( VLC_OBJECT( p_filter ),
                             (decoder_owner_sys_t *)p_filter->owner.sys,
                             &p_filter->fmt_out ) ) {
        msg_Warn( p_filter, "can't get output picture" );
        return NULL;
    }
    return picture_NewFromFormat( &p_filter->fmt_out.video );
}
예제 #12
0
파일: picture.c 프로젝트: Annovae/vlc
picture_t *picture_New( vlc_fourcc_t i_chroma, int i_width, int i_height, int i_sar_num, int i_sar_den )
{
    video_format_t fmt;

    memset( &fmt, 0, sizeof(fmt) );
    video_format_Setup( &fmt, i_chroma, i_width, i_height,
                        i_width, i_height, i_sar_num, i_sar_den );

    return picture_NewFromFormat( &fmt );
}
예제 #13
0
/**
 * @brief Thread reading frames from the subprocess
 */
static void* outputThread(void* userData)
{
    filter_t*    intf = (filter_t*)userData;
    filter_sys_t* sys = intf->p_sys;

    msg_Info(intf, "outputThread: enter");

    bool gotHeader = false;

    while (true)
    {
        if (!gotHeader)
        {
            video_format_Init(&sys->outFormat, VLC_CODEC_I420);
            if (1 != readY4mHeader(intf, &sys->outFormat, sys->stdout))
                break;
            gotHeader = true;
        }

        picture_t* outPic = picture_NewFromFormat(&sys->outFormat);
        if(1 != readY4mFrame(intf, outPic, sys->stdout))
        {
            picture_Release(outPic);
            break;
        }

        //msg_Info(intf, "outputThread: read picture");

        // fixme: deinterlace filter does this, not sure if we need to;
        // y4m header contains this information
        outPic->b_progressive = true;
        outPic->i_nb_fields = 2;

        picture_fifo_Push(sys->outputFifo, outPic);

        vlc_cond_signal(&sys->outputCond);
    }

    msg_Info(intf, "outputThread: exit");

    sys->threadExit = true;
    vlc_cond_signal(&sys->outputCond);

    return userData;
}
예제 #14
0
/**
 * It updates a picture data/pitches.
 */
int CommonUpdatePicture(picture_t *picture, picture_t **fallback,
                        uint8_t *data, unsigned pitch)
{
    if (fallback) {
        if (*fallback == NULL) {
            *fallback = picture_NewFromFormat(&picture->format);
            if (*fallback == NULL)
                return VLC_EGENERIC;
        }
        for (int n = 0; n < picture->i_planes; n++) {
            const plane_t *src = &(*fallback)->p[n];
            plane_t       *dst = &picture->p[n];
            dst->p_pixels = src->p_pixels;
            dst->i_pitch  = src->i_pitch;
            dst->i_lines  = src->i_lines;
        }
        return VLC_SUCCESS;
    }
    /* fill in buffer info in first plane */
    picture->p->p_pixels = data;
    picture->p->i_pitch  = pitch;
    picture->p->i_lines  = picture->format.i_height;

    /*  Fill chroma planes for planar YUV */
    if (picture->format.i_chroma == VLC_CODEC_I420 ||
            picture->format.i_chroma == VLC_CODEC_J420 ||
            picture->format.i_chroma == VLC_CODEC_YV12) {

        for (int n = 1; n < picture->i_planes; n++) {
            const plane_t *o = &picture->p[n-1];
            plane_t *p = &picture->p[n];

            p->p_pixels = o->p_pixels + o->i_lines * o->i_pitch;
            p->i_pitch  = pitch / 2;
            p->i_lines  = picture->format.i_height / 2;
        }
        /* The dx/d3d buffer is always allocated as YV12 */
        if (vlc_fourcc_AreUVPlanesSwapped(picture->format.i_chroma, VLC_CODEC_YV12)) {
            uint8_t *p_tmp = picture->p[1].p_pixels;
            picture->p[1].p_pixels = picture->p[2].p_pixels;
            picture->p[2].p_pixels = p_tmp;
        }
    }
    return VLC_SUCCESS;
}
예제 #15
0
파일: subpicture.c 프로젝트: 0xheart0/vlc
subpicture_region_t *subpicture_region_New( const video_format_t *p_fmt )
{
    subpicture_region_t *p_region = calloc( 1, sizeof(*p_region ) );
    if( !p_region )
        return NULL;

    if ( p_fmt->i_chroma == VLC_CODEC_YUVP )
    {
        video_format_Copy( &p_region->fmt, p_fmt );
        /* YUVP should have a palette */
        if( p_region->fmt.p_palette == NULL )
        {
            p_region->fmt.p_palette = calloc( 1, sizeof(*p_region->fmt.p_palette) );
            if( p_region->fmt.p_palette == NULL )
            {
                free( p_region );
                return NULL;
            }
        }
    }
    else
    {
        p_region->fmt = *p_fmt;
        p_region->fmt.p_palette = NULL;
    }

    p_region->i_alpha = 0xff;

    if( p_fmt->i_chroma == VLC_CODEC_TEXT )
        return p_region;

    p_region->p_picture = picture_NewFromFormat( p_fmt );
    if( !p_region->p_picture )
    {
        video_format_Clean( &p_region->fmt );
        free( p_region );
        return NULL;
    }

    return p_region;
}
예제 #16
0
파일: video.c 프로젝트: iamnpc/myfaplayer
static picture_t *video_new_buffer_decoder( decoder_t *p_dec )
{
    sout_stream_sys_t *p_ssys = p_dec->p_owner->p_sys;
    if( p_ssys->i_threads >= 1 )
    {
        int i_first_pic = p_ssys->i_first_pic;

        if( p_ssys->i_first_pic != p_ssys->i_last_pic )
        {
            /* Encoder still has stuff to encode, wait to clear-up the list */
            while( p_ssys->i_first_pic == i_first_pic )
            {
#warning THERE IS DEFINITELY A BUG! LOCKING IS INSUFFICIENT!
                msleep( 10000 );
                barrier ();
            }
        }
    }

    p_dec->fmt_out.video.i_chroma = p_dec->fmt_out.i_codec;
    return picture_NewFromFormat( &p_dec->fmt_out.video );
}
예제 #17
0
void vout_snapshot_Set(vout_snapshot_t *snap,
                       const video_format_t *fmt,
                       const picture_t *picture)
{
    if (!fmt)
        fmt = &picture->format;

    vlc_mutex_lock(&snap->lock);
    while (snap->request_count > 0) {
        picture_t *dup = picture_NewFromFormat(fmt);
        if (!dup)
            break;

        picture_Copy(dup, picture);

        dup->p_next = snap->picture;
        snap->picture = dup;
        snap->request_count--;
    }
    vlc_cond_broadcast(&snap->wait);
    vlc_mutex_unlock(&snap->lock);
}
예제 #18
0
picture_pool_t *picture_pool_NewFromFormat(const video_format_t *fmt,
                                           unsigned count)
{
    picture_t *picture[count ? count : 1];
    unsigned i;

    for (i = 0; i < count; i++) {
        picture[i] = picture_NewFromFormat(fmt);
        if (picture[i] == NULL)
            goto error;
    }

    picture_pool_t *pool = picture_pool_New(count, picture);
    if (!pool)
        goto error;

    return pool;

error:
    while (i > 0)
        picture_Release(picture[--i]);
    return NULL;
}
예제 #19
0
picture_pool_t *picture_pool_NewFromFormat(const video_format_t *fmt, int picture_count)
{
    picture_t *picture[picture_count];

    for (int i = 0; i < picture_count; i++) {
        picture[i] = picture_NewFromFormat(fmt);
        if (!picture[i])
            goto error;
    }
    picture_pool_t *pool = picture_pool_New(picture_count, picture);
    if (!pool)
        goto error;

    return pool;

error:
    for (int i = 0; i < picture_count; i++) {
        if (!picture[i])
            break;
        picture_Release(picture[i]);
    }
    return NULL;
}
예제 #20
0
static void SnapshotRatio( filter_t *p_filter, picture_t *p_pic )
{
    filter_sys_t *p_sys = (filter_sys_t *)p_filter->p_sys;

    if( !p_pic ) return;

    if( p_sys->i_frames % p_sys->i_ratio != 0 )
    {
        p_sys->i_frames++;
        return;
    }
    p_sys->i_frames++;

    if( p_sys->scene.p_pic )
        picture_Release( p_sys->scene.p_pic );

    if( (p_sys->i_width <= 0) && (p_sys->i_height > 0) )
    {
        p_sys->i_width = (p_pic->format.i_width * p_sys->i_height) / p_pic->format.i_height;
    }
    else if( (p_sys->i_height <= 0) && (p_sys->i_width > 0) )
    {
        p_sys->i_height = (p_pic->format.i_height * p_sys->i_width) / p_pic->format.i_width;
    }
    else if( (p_sys->i_width <= 0) && (p_sys->i_height <= 0) )
    {
        p_sys->i_width = p_pic->format.i_width;
        p_sys->i_height = p_pic->format.i_height;
    }

    p_sys->scene.p_pic = picture_NewFromFormat( &p_pic->format );
    if( p_sys->scene.p_pic )
    {
        picture_Copy( p_sys->scene.p_pic, p_pic );
        SavePicture( p_filter, p_sys->scene.p_pic );
    }
}
예제 #21
0
파일: video.c 프로젝트: iamnpc/myfaplayer
static picture_t *transcode_video_filter_buffer_new( filter_t *p_filter )
{
    p_filter->fmt_out.video.i_chroma = p_filter->fmt_out.i_codec;
    return picture_NewFromFormat( &p_filter->fmt_out.video );
}
예제 #22
0
static int Open(vlc_object_t *p_this)
{
    vout_display_t *vd = (vout_display_t*)p_this;

    video_format_t fmt = vd->fmt;

    if (fmt.i_chroma != VLC_CODEC_ANDROID_OPAQUE)
        return VLC_EGENERIC;

    /* Allocate structure */
    vout_display_sys_t *sys = (struct vout_display_sys_t*)calloc(1, sizeof(*sys));
    if (!sys)
        return VLC_ENOMEM;

    sys->p_library = LoadNativeWindowAPI(&sys->native_window);
    if (!sys->p_library)
    {
        free(sys);
        msg_Err(vd, "Could not initialize NativeWindow API.");
        return VLC_EGENERIC;
    }
    sys->fmt = fmt;
    video_format_t subpicture_format = sys->fmt;
    subpicture_format.i_chroma = VLC_CODEC_RGBA;
    /* Create a RGBA picture for rendering subtitles. */
    sys->subtitles_picture = picture_NewFromFormat(&subpicture_format);

    /* Export the subpicture capability of this vout. */
    vd->info.subpicture_chromas = subpicture_chromas;

    int i_pictures = POOL_SIZE;
    picture_t** pictures = calloc(sizeof(*pictures), i_pictures);
    if (!pictures)
        goto error;
    for (int i = 0; i < i_pictures; i++)
    {
        picture_sys_t *p_picsys = calloc(1, sizeof(*p_picsys));
        if (unlikely(p_picsys == NULL))
            goto error;

        picture_resource_t resource = { .p_sys = p_picsys };
        picture_t *picture = picture_NewFromResource(&fmt, &resource);
        if (!picture)
        {
            free(p_picsys);
            goto error;
        }
        pictures[i] = picture;
    }

    /* Wrap it into a picture pool */
    picture_pool_configuration_t pool_cfg;
    memset(&pool_cfg, 0, sizeof(pool_cfg));
    pool_cfg.picture_count = i_pictures;
    pool_cfg.picture       = pictures;
    pool_cfg.lock          = LockSurface;
    pool_cfg.unlock        = UnlockSurface;

    sys->pool = picture_pool_NewExtended(&pool_cfg);
    if (!sys->pool)
    {
        for (int i = 0; i < i_pictures; i++)
            picture_Release(pictures[i]);
        goto error;
    }

    /* Setup vout_display */
    vd->sys     = sys;
    vd->fmt     = fmt;
    vd->pool    = Pool;
    vd->display = Display;
    vd->control = Control;
    vd->prepare = NULL;
    vd->manage  = Manage;

    /* Fix initial state */
    vout_display_SendEventFullscreen(vd, false);

    return VLC_SUCCESS;

error:
    free(pictures);
    Close(p_this);
    return VLC_ENOMEM;
}
예제 #23
0
파일: image.c 프로젝트: BossKing/vlc
static picture_t *video_new_buffer( decoder_t *p_dec )
{
    return picture_NewFromFormat( &p_dec->fmt_out.video );
}
예제 #24
0
파일: video.c 프로젝트: AsamQi/vlc
static picture_t *video_new_buffer_decoder( decoder_t *p_dec )
{
    p_dec->fmt_out.video.i_chroma = p_dec->fmt_out.i_codec;
    return picture_NewFromFormat( &p_dec->fmt_out.video );
}
예제 #25
0
파일: video.c 프로젝트: AsamQi/vlc
static picture_t *video_new_buffer_encoder( encoder_t *p_enc )
{
    p_enc->fmt_in.video.i_chroma = p_enc->fmt_in.i_codec;
    return picture_NewFromFormat( &p_enc->fmt_in.video );
}
예제 #26
0
/**
 * @brief VLC filter callback
 * @return picture(s) containing the filtered frames
 */
 static picture_t* y4m_filter(filter_t* intf, picture_t* srcPic)
{
    filter_sys_t* sys = intf->p_sys;

    //msg_Info(intf, ">>>> filter");

    if (!srcPic)
    {
        //msg_Info(intf, ">>> filter: NULL INPUT");
        return NULL;
    }

    // will be stored to sys->lastDate on return
    mtime_t currDate = srcPic->date;


    // if there was a problem with the subprocess then send back
    // the picture unmodified, at least we won't freeze up vlc
    if (sys->startFailed || sys->threadExit)
        goto ECHO_RETURN;


    // start subprocess and write the y4m header
    // fixme: this can go in open() if fmt_in matches the srcPic
    if (!sys->startFailed && !sys->childPid)
    {
        sys->startFailed = !startProcess(intf);
        if (sys->startFailed)
            goto ECHO_RETURN;

        if (0 >= writeY4mHeader(intf, srcPic, sys->stdin))
        {
            msg_Err(intf, "writeY4mHeader failed: errno=%d %s",
                errno, strerror(errno));

            stopProcess(intf);
            sys->startFailed = true;

            goto ECHO_RETURN;
        }
    }

    //
    // control the buffering level by monitoring the input/output fifos
    //
    // the input/output fifos are emptied/filled by input/output threads
    // in response to the subprocess reading/writing frames
    //
    // if the input fifo is empty, then probably the subprocess wants
    // more input, so we should buffer more input
    //
    // if the output fifo is empty, and the input fifo is not empty, then
    // probably the subprocess is about to write out a frame and we
    // can wait for it to arrive. in practice, most of the time there
    // is no waiting needed, unless the filter is too slow to keep up.
    //
    bool inputEmpty = true;
    bool outputEmpty = true;

    picture_t* tmp = picture_fifo_Peek(sys->inputFifo);
    if (tmp)
    {
        picture_Release(tmp);
        inputEmpty = false;
    }

    tmp = picture_fifo_Peek(sys->outputFifo);
    if (tmp)
    {
        picture_Release(tmp);
        outputEmpty = false;
    }

    // copy picture to input fifo, we can't use picture_Hold or else
    // the decoder or vout would run out of pictures in its pool
    picture_t* inPic = picture_NewFromFormat(&srcPic->format);
    picture_Copy(inPic, srcPic);
    picture_fifo_Push(sys->inputFifo, inPic);

    // signal input thread to wake up and write some more data out
    vlc_mutex_lock(&sys->inputMutex);
    sys->bufferedIn++;
    vlc_cond_signal(&sys->inputCond);
    vlc_mutex_unlock(&sys->inputMutex);

    // if echo is enabled, we're done
    // todo: there should be a limiter on the input buffering in case
    // the subprocess can't keep up
    if (sys->echo)
        goto ECHO_RETURN;


    // keeps track of the number of buffered output pictures, assumes
    // an integer ratio
    // fixme: needs modification to support non-integer ratios
    // and ratios < 1
    sys->bufferedOut += sys->bufferRatio;

    // handle buffering
    if (outputEmpty && inputEmpty)
    {
        // we haven't supplied enough input, raise the minimum
        // level of buffer to keep and return
        sys->minBuffered += sys->bufferRatio;

        msg_Info(intf, "buffer more input: buffers:%d:%d:%d",
                sys->bufferedIn, sys->bufferedOut, sys->minBuffered);

        goto NULL_RETURN;
    }

    if (outputEmpty)
        waitForOutput(intf);

    // if we don't know what the frame interval is, make it 0 which
    // probably causes the next frames out to drop
    // note: this happens at least every time y4m_flush() is called
    // for example when seeking
    if (currDate <= sys->lastDate || sys->lastDate == 0)
    {
        //msg_Err(intf, "currDate <= lastDate");
        //goto ECHO_RETURN;
        sys->lastDate = currDate;
    }

    // reference to first and last picture we are returning
    picture_t* first = NULL;
    picture_t* last = NULL;


    picture_t* pic;
    while( (pic = picture_fifo_Pop(sys->outputFifo)) )
    {
        // do output setup when we see the first frame out from the filter,
        // it could have a different frame rate, chroma, size, etc than
        // the frame going in
        if (!sys->gotFirstOutput)
        {
            sys->gotFirstOutput = true;

            // get the in/out frame ratio by comparing frame rates
            float speed =
                    ((float)srcPic->format.i_frame_rate_base * (float)pic->format.i_frame_rate) /
                    ((float)srcPic->format.i_frame_rate      * (float)pic->format.i_frame_rate_base);

            if (speed < 1.0)
            {
                msg_Err(intf, "frame rate reduction isn't supported yet");
            }
            else
            if (speed > 1.0)
            {
                if (ceil(speed) != speed)
                    msg_Err(intf, "frame rate change must be integer ratio");

                sys->bufferRatio = speed;

                // initial ratio was 1.0, need to correct the number of buffered frames
                // now that we know what it is
                sys->bufferedOut *= sys->bufferRatio;
                sys->minBuffered *= sys->bufferRatio;
            }

            intf->fmt_out.video.i_frame_rate = pic->format.i_frame_rate;
            intf->fmt_out.video.i_frame_rate_base = pic->format.i_frame_rate_base;

            if (intf->fmt_out.video.i_chroma != pic->format.i_chroma)
                msg_Err(intf, "filter changed the chroma, expect corruption");

            // this can't be changed after open, crashes the GLX vout
            //intf->fmt_out.i_codec = pic->format.i_chroma;
            //intf->fmt_out.video.i_chroma = pic->format.i_chroma;

            msg_Info(intf, "first output: buffers=%d:%d", sys->bufferedOut, sys->minBuffered);
        }

        sys->numFrames++;
        sys->bufferedOut--;

        // it seems filter_NewPicture is required now. however,
        // sometimes it returns null in which case it seems like
        // the best thing to do is dump frames
        picture_t* copy = first == NULL ? srcPic : filter_NewPicture(intf);
        if (!copy)
        {
            picture_Release(pic);

            // throw away frames

            // vlc already prints warning for this
            //msg_Err(intf, "filter_NewPicture returns null");
            if (sys->bufferedOut < sys->minBuffered)
                break;
            else
                continue;
        }
        else
        {
            picture_CopyPixels(copy, pic);
            picture_Release(pic);
            pic = copy;
        }

        // the time per output frame interval is a fraction of the input frame time
        int frameTime = (currDate - sys->lastDate) / sys->bufferRatio;

        // the pts is whatever the current pts is minus any buffering
        // introduced by the filter
        pic->date = currDate - sys->bufferedOut*frameTime;

//        msg_Info(intf, "frame=%d buffered=%d:%d frameTime=%d ratio:%d:1 fin=%d:%d fout=%d:%d pts=%u",
//            sys->numFrames, sys->bufferedOut, sys->minBuffered,
//            frameTime, sys->bufferRatio,
//            srcPic->format.i_frame_rate, srcPic->format.i_frame_rate_base,
//            pic->format.i_frame_rate, pic->format.i_frame_rate_base,
//            (unsigned int)pic->date);

        if (last)
            last->p_next = pic;
        else
            first = pic;
        last = pic;


        // if we read too many frames on this iteration, on the next
        // one we might not have any frames available which would be
        // bad as vlc would think our intent was to drop frames
        //
        // if we stop reading before the output is completely empty,
        // there will always be some frames for the next iteration,
        // assuming the filter is fast enough to keep up
        if (sys->bufferedOut  < sys->minBuffered)
            break;

        // if there is still some input buffer left, but the fifo is
        // empty, wait for next frame to arrive. otherwise we can
        // build too much input buffering
        if (sys->bufferedIn > 1)
            waitForOutput(intf);
    }

    if (!first)
    {
        // the buffer checks should prevent from getting here, but
        // just in case prevent leaking the input picture
        picture_Release(srcPic);

        sys->minBuffered++;
    }

    sys->lastDate = currDate;
    return first;
ECHO_RETURN:
    sys->lastDate = currDate;
    //msg_Info(intf, "<<<< filter: ECHO");
    return srcPic;
NULL_RETURN:
    sys->lastDate = currDate;
    picture_Release(srcPic);
    //msg_Info(intf, "<<<< filter: NULL");
    return NULL;
}
예제 #27
0
파일: image.c 프로젝트: tguillem/vlc
static picture_t *filter_new_picture( filter_t *p_filter )
{
    return picture_NewFromFormat( &p_filter->fmt_out.video );
}
예제 #28
0
/* This is the filter function. See Open(). */
picture_t *Deinterlace( filter_t *p_filter, picture_t *p_pic )
{
    filter_sys_t *p_sys = p_filter->p_sys;
    picture_t *p_dst[DEINTERLACE_DST_SIZE];

    /* Request output picture */
    p_dst[0] = filter_NewPicture( p_filter );
    if( p_dst[0] == NULL )
    {
        picture_Release( p_pic );
        return NULL;
    }
    picture_CopyProperties( p_dst[0], p_pic );

    /* Any unused p_dst pointers must be NULL, because they are used to
       check how many output frames we have. */
    for( int i = 1; i < DEINTERLACE_DST_SIZE; ++i )
        p_dst[i] = NULL;

    /* Update the input frame history, if the currently active algorithm
       needs it. */
    if( p_sys->b_use_frame_history )
    {
        /* Duplicate the picture
         * TODO when the vout rework is finished, picture_Hold() might be enough
         * but becarefull, the pitches must match */
        picture_t *p_dup = picture_NewFromFormat( &p_pic->format );
        if( p_dup )
            picture_Copy( p_dup, p_pic );

        /* Slide the history */
        if( p_sys->pp_history[0] )
            picture_Release( p_sys->pp_history[0] );
        for( int i = 1; i < HISTORY_SIZE; i++ )
            p_sys->pp_history[i-1] = p_sys->pp_history[i];
        p_sys->pp_history[HISTORY_SIZE-1] = p_dup;
    }

    /* Slide the metadata history. */
    for( int i = 1; i < METADATA_SIZE; i++ )
    {
        p_sys->meta.pi_date[i-1]            = p_sys->meta.pi_date[i];
        p_sys->meta.pi_nb_fields[i-1]       = p_sys->meta.pi_nb_fields[i];
        p_sys->meta.pb_top_field_first[i-1] = p_sys->meta.pb_top_field_first[i];
    }
    /* The last element corresponds to the current input frame. */
    p_sys->meta.pi_date[METADATA_SIZE-1]            = p_pic->date;
    p_sys->meta.pi_nb_fields[METADATA_SIZE-1]       = p_pic->i_nb_fields;
    p_sys->meta.pb_top_field_first[METADATA_SIZE-1] = p_pic->b_top_field_first;

    /* Remember the frame offset that we should use for this frame.
       The value in p_sys will be updated to reflect the correct value
       for the *next* frame when we call the renderer. */
    int i_frame_offset = p_sys->i_frame_offset;
    int i_meta_idx     = (METADATA_SIZE-1) - i_frame_offset;

    /* These correspond to the current *outgoing* frame. */
    bool b_top_field_first;
    int i_nb_fields;
    if( i_frame_offset != CUSTOM_PTS )
    {
        /* Pick the correct values from the history. */
        b_top_field_first = p_sys->meta.pb_top_field_first[i_meta_idx];
        i_nb_fields       = p_sys->meta.pi_nb_fields[i_meta_idx];
    }
    else
    {
        /* Framerate doublers must not request CUSTOM_PTS, as they need the
           original field timings, and need Deinterlace() to allocate the
           correct number of output frames. */
        assert( !p_sys->b_double_rate );

        /* NOTE: i_nb_fields is only used for framerate doublers, so it is
                 unused in this case. b_top_field_first is only passed to the
                 algorithm. We assume that algorithms that request CUSTOM_PTS
                 will, if necessary, extract the TFF/BFF information themselves.
        */
        b_top_field_first = p_pic->b_top_field_first; /* this is not guaranteed
                                                         to be meaningful */
        i_nb_fields       = p_pic->i_nb_fields;       /* unused */
    }

    /* For framerate doublers, determine field duration and allocate
       output frames. */
    mtime_t i_field_dur = 0;
    int i_double_rate_alloc_end = 0; /* One past last for allocated output
                                        frames in p_dst[]. Used only for
                                        framerate doublers. Will be inited
                                        below. Declared here because the
                                        PTS logic needs the result. */
    if( p_sys->b_double_rate )
    {
        /* Calculate one field duration. */
        int i = 0;
        int iend = METADATA_SIZE-1;
        /* Find oldest valid logged date.
           The current input frame doesn't count. */
        for( ; i < iend; i++ )
            if( p_sys->meta.pi_date[i] > VLC_TS_INVALID )
                break;
        if( i < iend )
        {
            /* Count how many fields the valid history entries
               (except the new frame) represent. */
            int i_fields_total = 0;
            for( int j = i ; j < iend; j++ )
                i_fields_total += p_sys->meta.pi_nb_fields[j];
            /* One field took this long. */
            i_field_dur = (p_pic->date - p_sys->meta.pi_date[i]) / i_fields_total;
        }
        /* Note that we default to field duration 0 if it could not be
           determined. This behaves the same as the old code - leaving the
           extra output frame dates the same as p_pic->date if the last cached
           date was not valid.
        */

        i_double_rate_alloc_end = i_nb_fields;
        if( i_nb_fields > DEINTERLACE_DST_SIZE )
        {
            /* Note that the effective buffer size depends also on the constant
               private_picture in vout_wrapper.c, since that determines the
               maximum number of output pictures filter_NewPicture() will
               successfully allocate for one input frame.
            */
            msg_Err( p_filter, "Framerate doubler: output buffer too small; "\
                               "fields = %d, buffer size = %d. Dropping the "\
                               "remaining fields.",
                               i_nb_fields, DEINTERLACE_DST_SIZE );
            i_double_rate_alloc_end = DEINTERLACE_DST_SIZE;
        }

        /* Allocate output frames. */
        for( int i = 1; i < i_double_rate_alloc_end ; ++i )
        {
            p_dst[i-1]->p_next =
            p_dst[i]           = filter_NewPicture( p_filter );
            if( p_dst[i] )
            {
                picture_CopyProperties( p_dst[i], p_pic );
            }
            else
            {
                msg_Err( p_filter, "Framerate doubler: could not allocate "\
                                   "output frame %d", i+1 );
                i_double_rate_alloc_end = i; /* Inform the PTS logic about the
                                                correct end position. */
                break; /* If this happens, the rest of the allocations
                          aren't likely to work, either... */
            }
        }
        /* Now we have allocated *up to* the correct number of frames;
           normally, exactly the correct number. Upon alloc failure,
           we may have succeeded in allocating *some* output frames,
           but fewer than were desired. In such a case, as many will
           be rendered as were successfully allocated.

           Note that now p_dst[i] != NULL
           for 0 <= i < i_double_rate_alloc_end. */
    }
    assert( p_sys->b_double_rate  ||  p_dst[1] == NULL );
    assert( i_nb_fields > 2  ||  p_dst[2] == NULL );

    /* Render */
    switch( p_sys->i_mode )
    {
        case DEINTERLACE_DISCARD:
            RenderDiscard( p_dst[0], p_pic, 0 );
            break;

        case DEINTERLACE_BOB:
            RenderBob( p_dst[0], p_pic, !b_top_field_first );
            if( p_dst[1] )
                RenderBob( p_dst[1], p_pic, b_top_field_first );
            if( p_dst[2] )
                RenderBob( p_dst[2], p_pic, !b_top_field_first );
            break;;

        case DEINTERLACE_LINEAR:
            RenderLinear( p_filter, p_dst[0], p_pic, !b_top_field_first );
            if( p_dst[1] )
                RenderLinear( p_filter, p_dst[1], p_pic, b_top_field_first );
            if( p_dst[2] )
                RenderLinear( p_filter, p_dst[2], p_pic, !b_top_field_first );
            break;

        case DEINTERLACE_MEAN:
            RenderMean( p_filter, p_dst[0], p_pic );
            break;

        case DEINTERLACE_BLEND:
            RenderBlend( p_filter, p_dst[0], p_pic );
            break;

        case DEINTERLACE_X:
            RenderX( p_dst[0], p_pic );
            break;

        case DEINTERLACE_YADIF:
            if( RenderYadif( p_filter, p_dst[0], p_pic, 0, 0 ) )
                goto drop;
            break;

        case DEINTERLACE_YADIF2X:
            if( RenderYadif( p_filter, p_dst[0], p_pic, 0, !b_top_field_first ) )
                goto drop;
            if( p_dst[1] )
                RenderYadif( p_filter, p_dst[1], p_pic, 1, b_top_field_first );
            if( p_dst[2] )
                RenderYadif( p_filter, p_dst[2], p_pic, 2, !b_top_field_first );
            break;

        case DEINTERLACE_PHOSPHOR:
            if( RenderPhosphor( p_filter, p_dst[0], 0,
                                !b_top_field_first ) )
                goto drop;
            if( p_dst[1] )
                RenderPhosphor( p_filter, p_dst[1], 1,
                                b_top_field_first );
            if( p_dst[2] )
                RenderPhosphor( p_filter, p_dst[2], 2,
                                !b_top_field_first );
            break;

        case DEINTERLACE_IVTC:
            /* Note: RenderIVTC will automatically drop the duplicate frames
                     produced by IVTC. This is part of normal operation. */
            if( RenderIVTC( p_filter, p_dst[0] ) )
                goto drop;
            break;
    }

    /* Set output timestamps, if the algorithm didn't request CUSTOM_PTS
       for this frame. */
    assert( i_frame_offset <= METADATA_SIZE  ||  i_frame_offset == CUSTOM_PTS );
    if( i_frame_offset != CUSTOM_PTS )
    {
        mtime_t i_base_pts = p_sys->meta.pi_date[i_meta_idx];

        /* Note: in the usual case (i_frame_offset = 0  and
                 b_double_rate = false), this effectively does nothing.
                 This is needed to correct the timestamp
                 when i_frame_offset > 0. */
        p_dst[0]->date = i_base_pts;

        if( p_sys->b_double_rate )
        {
            /* Processing all actually allocated output frames. */
            for( int i = 1; i < i_double_rate_alloc_end; ++i )
            {
                /* XXX it's not really good especially for the first picture, but
                 * I don't think that delaying by one frame is worth it */
                if( i_base_pts > VLC_TS_INVALID )
                    p_dst[i]->date = i_base_pts + i * i_field_dur;
                else
                    p_dst[i]->date = VLC_TS_INVALID;
            }
        }
    }

    for( int i = 0; i < DEINTERLACE_DST_SIZE; ++i )
    {
        if( p_dst[i] )
        {
            p_dst[i]->b_progressive = true;
            p_dst[i]->i_nb_fields = 2;
        }
    }

    picture_Release( p_pic );
    return p_dst[0];

drop:
    picture_Release( p_dst[0] );
    for( int i = 1; i < DEINTERLACE_DST_SIZE; ++i )
    {
        if( p_dst[i] )
            picture_Release( p_dst[i] );
    }
    picture_Release( p_pic );
    return NULL;
}
예제 #29
0
파일: win32text.c 프로젝트: paa/vlc
/*****************************************************************************
 * Render: place string in picture
 *****************************************************************************
 * This function merges the previously rendered win32text glyphs into a picture
 *****************************************************************************/
static int Render( filter_t *p_filter, subpicture_region_t *p_region,
                   uint8_t *p_bitmap, int i_width, int i_height )
{
    uint8_t *p_dst;
    video_format_t fmt;
    int i, i_pitch;
    bool b_outline = true;

    /* Create a new subpicture region */
    memset( &fmt, 0, sizeof(video_format_t) );
    fmt.i_chroma = VLC_CODEC_YUVP;
    fmt.i_width = fmt.i_visible_width = i_width + (b_outline ? 4 : 0);
    fmt.i_height = fmt.i_visible_height = i_height + (b_outline ? 4 : 0);
    fmt.i_x_offset = fmt.i_y_offset = 0;

    /* Build palette */
    fmt.p_palette = calloc( 1, sizeof(*fmt.p_palette) );
    if( !fmt.p_palette )
        return VLC_EGENERIC;
    fmt.p_palette->i_entries = 16;
    for( i = 0; i < fmt.p_palette->i_entries; i++ )
    {
        fmt.p_palette->palette[i][0] = pi_gamma[i];
        fmt.p_palette->palette[i][1] = 128;
        fmt.p_palette->palette[i][2] = 128;
        fmt.p_palette->palette[i][3] = pi_gamma[i];
    }

    p_region->p_picture = picture_NewFromFormat( &fmt );
    if( !p_region->p_picture )
    {
        free( fmt.p_palette );
        return VLC_EGENERIC;
    }
    p_region->fmt = fmt;

    p_dst = p_region->p_picture->Y_PIXELS;
    i_pitch = p_region->p_picture->Y_PITCH;

    if( b_outline )
    {
        memset( p_dst, 0, i_pitch * fmt.i_height );
        p_dst += p_region->p_picture->Y_PITCH * 2 + 2;
    }

    for( i = 0; i < i_height; i++ )
    {
        memcpy( p_dst, p_bitmap, i_width );
        p_bitmap += (i_width+3) & ~3;
        p_dst += i_pitch;
    }

    /* Outlining (find something better than nearest neighbour filtering ?) */
    if( b_outline )
    {
        uint8_t *p_top = p_dst; /* Use 1st line as a cache */
        uint8_t left, current;
        int x, y;

        p_dst = p_region->p_picture->Y_PIXELS;

        for( y = 1; y < (int)fmt.i_height - 1; y++ )
        {
            memcpy( p_top, p_dst, fmt.i_width );
            p_dst += i_pitch;
            left = 0;

            for( x = 1; x < (int)fmt.i_width - 1; x++ )
            {
                current = p_dst[x];
                p_dst[x] = ( 4 * (int)p_dst[x] + left + p_top[x] + p_dst[x+1] +
                             p_dst[x + i_pitch]) / 8;
                left = current;
            }
        }
        memset( p_top, 0, fmt.i_width );
    }

    return VLC_SUCCESS;
}