Пример #1
0
void show_pix_fmts(void)
{
    enum PixelFormat pix_fmt;

    printf(
        "Pixel formats:\n"
        "I.... = Supported Input  format for conversion\n"
        ".O... = Supported Output format for conversion\n"
        "..H.. = Hardware accelerated format\n"
        "...P. = Paletted format\n"
        "....B = Bitstream format\n"
        "FLAGS NAME            NB_COMPONENTS BITS_PER_PIXEL\n"
        "-----\n");

#if !CONFIG_SWSCALE
#   define sws_isSupportedInput(x)  0
#   define sws_isSupportedOutput(x) 0
#endif

    for (pix_fmt = 0; pix_fmt < PIX_FMT_NB; pix_fmt++) {
        const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[pix_fmt];
        printf("%c%c%c%c%c %-16s       %d            %2d\n",
               sws_isSupportedInput (pix_fmt)      ? 'I' : '.',
               sws_isSupportedOutput(pix_fmt)      ? 'O' : '.',
               pix_desc->flags & PIX_FMT_HWACCEL   ? 'H' : '.',
               pix_desc->flags & PIX_FMT_PAL       ? 'P' : '.',
               pix_desc->flags & PIX_FMT_BITSTREAM ? 'B' : '.',
               pix_desc->name,
               pix_desc->nb_components,
               av_get_bits_per_pixel(pix_desc));
    }
}
Пример #2
0
bool ImageConverterFF::check() const
{
    if (!ImageConverter::check())
        return false;
    DPTR_D(const ImageConverterFF);
    if (sws_isSupportedInput((AVPixelFormat)d.fmt_in) <= 0) {
        qWarning("Input pixel format not supported (%s)", av_get_pix_fmt_name((AVPixelFormat)d.fmt_in));
        return false;
    }
    if (sws_isSupportedOutput((AVPixelFormat)d.fmt_out) <= 0) {
        qWarning("Output pixel format not supported (%s)", av_get_pix_fmt_name((AVPixelFormat)d.fmt_out));
        return false;
    }
    return true;
}
Пример #3
0
static int query_format(struct vf_instance *vf, unsigned int fmt)
{
    if (!IMGFMT_IS_HWACCEL(fmt) && imgfmt2pixfmt(fmt) != AV_PIX_FMT_NONE) {
        if (sws_isSupportedInput(imgfmt2pixfmt(fmt)) < 1)
            return 0;
        unsigned int best = find_best_out(vf, fmt);
        int flags;
        if (!best)
            return 0;            // no matching out-fmt
        flags = vf_next_query_format(vf, best);
        if (!(flags & (VFCAP_CSP_SUPPORTED | VFCAP_CSP_SUPPORTED_BY_HW)))
            return 0;
        if (fmt != best)
            flags &= ~VFCAP_CSP_SUPPORTED_BY_HW;
        return flags;
    }
    return 0;   // nomatching in-fmt
}
Пример #4
0
int SWScale::Convert(const uint8_t* in_buffer, const size_t in_buffer_size, uint8_t* out_buffer, const size_t out_buffer_size, enum PixelFormat in_pf, enum PixelFormat out_pf, unsigned int width, unsigned int height) {
	/* Parameter checking */
	if(in_buffer == NULL || out_buffer == NULL) {
		Error("NULL Input or output buffer");
		return -1;
	}
	if(in_pf == 0 || out_pf == 0) {
		Error("Invalid input or output pixel formats");
		return -2;
	}
	if(!width || !height) {
		Error("Invalid width or height");
		return -3;
	}

	/* Warn if the input or output pixelformat is not supported */
	if(!sws_isSupportedInput(in_pf)) {
		Warning("swscale does not support the input format: %c%c%c%c",(in_pf)&0xff,((in_pf)&0xff),((in_pf>>16)&0xff),((in_pf>>24)&0xff));
	}
Пример #5
0
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if !LIBAVFORMAT_VERSION_CHECK(53, 2, 0, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    // Handle options
    AVDictionary *opts = 0;
    StringVector opVect = split(Options(), ",");
    
    // Set transport method as specified by method field, rtpUni is default
    if ( Method() == "rtpMulti" )
    	opVect.push_back("rtsp_transport=udp_multicast");
    else if ( Method() == "rtpRtsp" )
        opVect.push_back("rtsp_transport=tcp");
    else if ( Method() == "rtpRtspHttp" )
        opVect.push_back("rtsp_transport=http");
    
  	Debug(2, "Number of Options: %d",opVect.size());
    for (size_t i=0; i<opVect.size(); i++)
    {
    	StringVector parts = split(opVect[i],"=");
    	if (parts.size() > 1) {
    		parts[0] = trimSpaces(parts[0]);
    		parts[1] = trimSpaces(parts[1]);
    	    if ( av_dict_set(&opts, parts[0].c_str(), parts[1].c_str(), 0) == 0 ) {
    	        Debug(2, "set option %d '%s' to '%s'", i,  parts[0].c_str(), parts[1].c_str());
    	    }
    	    else
    	    {
    	        Warning( "Error trying to set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str() );
    	    }
    		  
    	}
       else
       {
           Warning( "Unable to parse ffmpeg option %d '%s', expecting key=value", i, opVect[i].c_str() );
       }
    }    
	Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    AVDictionaryEntry *e;
    if ((e = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX)) != NULL) {
        Warning( "Option %s not recognized by ffmpeg", e->key);
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    // Locate stream info from avformat_open_input
#if !LIBAVFORMAT_VERSION_CHECK(53, 6, 0, 6, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );

    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if (LIBAVCODEC_VERSION_CHECK(52, 64, 0, 64, 0) || LIBAVUTIL_VERSION_CHECK(50, 14, 0, 14, 0))
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if !LIBAVFORMAT_VERSION_CHECK(53, 8, 0, 8, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
    mRawFrame = av_frame_alloc();
#else
    mRawFrame = avcodec_alloc_frame();
#endif

    // Allocate space for the converted video frame
#if LIBAVCODEC_VERSION_CHECK(55, 28, 1, 45, 101)
    mFrame = av_frame_alloc();
#else
    mFrame = avcodec_alloc_frame();
#endif

    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
Пример #6
0
static int query_format(struct vf_instance *vf, unsigned int fmt)
{
    if (IMGFMT_IS_HWACCEL(fmt) || sws_isSupportedInput(imgfmt2pixfmt(fmt)) < 1)
        return 0;
    return !!find_best_out(vf, fmt);
}
Пример #7
0
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, NULL ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
Пример #8
0
int FfmpegCamera::PrimeCapture()
{
    Info( "Priming capture from %s", mPath.c_str() );
    


    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    // Handle options
    AVDictionary *opts = 0;
    StringVector opVect = split(Options(), ",");
    
    // Set transport method as specified by method field, rtpUni is default
    if ( Method() == "rtpMulti" )
    	opVect.push_back("rtsp_transport=udp_multicast");
    else if ( Method() == "rtpRtsp" )
        opVect.push_back("rtsp_transport=tcp");
    else if ( Method() == "rtpRtspHttp" )
        opVect.push_back("rtsp_transport=http");
    
  	Debug(2, "Number of Options: %d",opVect.size());
    for (size_t i=0; i<opVect.size(); i++)
    {
    	StringVector parts = split(opVect[i],"=");
    	if (parts.size() > 1) {
    		parts[0] = trimSpaces(parts[0]);
    		parts[1] = trimSpaces(parts[1]);
    	    if ( av_dict_set(&opts, parts[0].c_str(), parts[1].c_str(), 0) == 0 ) {
    	        Debug(2, "set option %d '%s' to '%s'", i,  parts[0].c_str(), parts[1].c_str());
    	    }
    	    else
    	    {
    	        Warning( "Error trying to set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str() );
    	    }
    		  
    	}
    }

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) !=0 )
#endif
        Fatal( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );

    // Locate stream info from input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
	if(mRawFrame == NULL || mFrame == NULL)
		Fatal( "Unable to allocate frame for %s", mPath.c_str() );
	
	int pSize = avpicture_get_size( imagePixFormat, width, height );
	if( (unsigned int)pSize != imagesize) {
		Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
	}
	
#if HAVE_LIBSWSCALE
	if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
		Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
	}
Пример #9
0
Файл: vo_x11.c Проект: chyiz/mpv
static bool resize(struct vo *vo)
{
    struct priv *p = vo->priv;
    struct vo_x11_state *x11 = vo->x11;

    for (int i = 0; i < 2; i++)
        freeMyXImage(p, i);

    vo_get_src_dst_rects(vo, &p->src, &p->dst, &p->osd);

    p->src_w = p->src.x1 - p->src.x0;
    p->src_h = p->src.y1 - p->src.y0;
    p->dst_w = p->dst.x1 - p->dst.x0;
    p->dst_h = p->dst.y1 - p->dst.y0;

    // p->osd contains the parameters assuming OSD rendering in window
    // coordinates, but OSD can only be rendered in the intersection
    // between window and video rectangle (i.e. not into panscan borders).
    p->osd.w = p->dst_w;
    p->osd.h = p->dst_h;
    p->osd.mt = MPMIN(0, p->osd.mt);
    p->osd.mb = MPMIN(0, p->osd.mb);
    p->osd.mr = MPMIN(0, p->osd.mr);
    p->osd.ml = MPMIN(0, p->osd.ml);

    mp_input_set_mouse_transform(vo->input_ctx, &p->dst, NULL);

    p->image_width = (p->dst_w + 7) & (~7);
    p->image_height = p->dst_h;

    for (int i = 0; i < 2; i++) {
        if (!getMyXImage(p, i))
            return -1;
    }

    const struct fmt_entry *fmte = mp_to_x_fmt;
    while (fmte->mpfmt) {
        if (fmte->depth == p->myximage[0]->bits_per_pixel &&
            fmte->byte_order == p->myximage[0]->byte_order &&
            fmte->red_mask == p->myximage[0]->red_mask &&
            fmte->green_mask == p->myximage[0]->green_mask &&
            fmte->blue_mask == p->myximage[0]->blue_mask)
            break;
        fmte++;
    }
    if (!fmte->mpfmt) {
        MP_ERR(vo, "X server image format not supported, use another VO.\n");
        return -1;
    }

    mp_sws_set_from_cmdline(p->sws, vo->opts->sws_opts);
    p->sws->dst = (struct mp_image_params) {
        .imgfmt = fmte->mpfmt,
        .w = p->dst_w,
        .h = p->dst_h,
        .p_w = 1,
        .p_h = 1,
    };
    mp_image_params_guess_csp(&p->sws->dst);

    if (mp_sws_reinit(p->sws) < 0)
        return false;

    XFillRectangle(x11->display, x11->window, p->gc, 0, 0, vo->dwidth, vo->dheight);

    vo->want_redraw = true;
    return true;
}

static void Display_Image(struct priv *p, XImage *myximage)
{
    struct vo *vo = p->vo;

    XImage *x_image = p->myximage[p->current_buf];

#if HAVE_SHM && HAVE_XEXT
    if (p->Shmem_Flag) {
        XShmPutImage(vo->x11->display, vo->x11->window, p->gc, x_image,
                     0, 0, p->dst.x0, p->dst.y0, p->dst_w, p->dst_h,
                     True);
        vo->x11->ShmCompletionWaitCount++;
    } else
#endif
    {
        XPutImage(vo->x11->display, vo->x11->window, p->gc, x_image,
                  0, 0, p->dst.x0, p->dst.y0, p->dst_w, p->dst_h);
    }
}

static struct mp_image get_x_buffer(struct priv *p, int buf_index)
{
    struct mp_image img = {0};
    mp_image_set_params(&img, &p->sws->dst);

    img.planes[0] = p->myximage[buf_index]->data;
    img.stride[0] =
        p->image_width * ((p->myximage[buf_index]->bits_per_pixel + 7) / 8);

    return img;
}

static void wait_for_completion(struct vo *vo, int max_outstanding)
{
#if HAVE_SHM && HAVE_XEXT
    struct priv *ctx = vo->priv;
    struct vo_x11_state *x11 = vo->x11;
    if (ctx->Shmem_Flag) {
        while (x11->ShmCompletionWaitCount > max_outstanding) {
            if (!ctx->Shm_Warned_Slow) {
                MP_WARN(vo, "can't keep up! Waiting"
                            " for XShm completion events...\n");
                ctx->Shm_Warned_Slow = 1;
            }
            mp_sleep_us(1000);
            vo_x11_check_events(vo);
        }
    }
#endif
}

static void flip_page(struct vo *vo)
{
    struct priv *p = vo->priv;
    Display_Image(p, p->myximage[p->current_buf]);
    p->current_buf = (p->current_buf + 1) % 2;
}

// Note: REDRAW_FRAME can call this with NULL.
static void draw_image(struct vo *vo, mp_image_t *mpi)
{
    struct priv *p = vo->priv;

    wait_for_completion(vo, 1);

    struct mp_image img = get_x_buffer(p, p->current_buf);

    if (mpi) {
        struct mp_image src = *mpi;
        struct mp_rect src_rc = p->src;
        src_rc.x0 = MP_ALIGN_DOWN(src_rc.x0, src.fmt.align_x);
        src_rc.y0 = MP_ALIGN_DOWN(src_rc.y0, src.fmt.align_y);
        mp_image_crop_rc(&src, src_rc);

        mp_sws_scale(p->sws, &img, &src);
    } else {
        mp_image_clear(&img, 0, 0, img.w, img.h);
    }

    osd_draw_on_image(vo->osd, p->osd, mpi ? mpi->pts : 0, 0, &img);

    if (mpi != p->original_image) {
        talloc_free(p->original_image);
        p->original_image = mpi;
    }
}

static int query_format(struct vo *vo, int format)
{
    if (sws_isSupportedInput(imgfmt2pixfmt(format)))
        return 1;
    return 0;
}

static void uninit(struct vo *vo)
{
    struct priv *p = vo->priv;
    if (p->myximage[0])
        freeMyXImage(p, 0);
    if (p->myximage[1])
        freeMyXImage(p, 1);
    if (p->gc)
        XFreeGC(vo->x11->display, p->gc);

    talloc_free(p->original_image);

    vo_x11_uninit(vo);
}

static int preinit(struct vo *vo)
{
    struct priv *p = vo->priv;
    p->vo = vo;
    p->sws = mp_sws_alloc(vo);

    if (!vo_x11_init(vo))
        goto error;
    struct vo_x11_state *x11 = vo->x11;

    XWindowAttributes attribs;
    XGetWindowAttributes(x11->display, x11->rootwin, &attribs);
    p->depth = attribs.depth;

    if (!XMatchVisualInfo(x11->display, x11->screen, p->depth,
                          TrueColor, &p->vinfo))
        goto error;

    MP_VERBOSE(vo, "selected visual: %d\n", (int)p->vinfo.visualid);

    if (!vo_x11_create_vo_window(vo, &p->vinfo, "x11"))
        goto error;

    p->gc = XCreateGC(x11->display, x11->window, 0, NULL);
    MP_WARN(vo, "Warning: this legacy VO has bad performance. Consider fixing "
                "your graphics drivers, or not forcing the x11 VO.\n");
    return 0;

error:
    uninit(vo);
    return -1;
}

static int control(struct vo *vo, uint32_t request, void *data)
{
    struct priv *p = vo->priv;
    switch (request) {
    case VOCTRL_SET_PANSCAN:
        if (vo->config_ok)
            resize(vo);
        return VO_TRUE;
    case VOCTRL_REDRAW_FRAME:
        draw_image(vo, p->original_image);
        return true;
    }

    int events = 0;
    int r = vo_x11_control(vo, &events, request, data);
    if (vo->config_ok && (events & (VO_EVENT_EXPOSE | VO_EVENT_RESIZE)))
        resize(vo);
    vo_event(vo, events);
    return r;
}

const struct vo_driver video_out_x11 = {
    .description = "X11 (slow, old crap)",
    .name = "x11",
    .priv_size = sizeof(struct priv),
    .preinit = preinit,
    .query_format = query_format,
    .reconfig = reconfig,
    .control = control,
    .draw_image = draw_image,
    .flip_page = flip_page,
    .wakeup = vo_x11_wakeup,
    .wait_events = vo_x11_wait_events,
    .uninit = uninit,
};
Пример #10
0
int FfmpegCamera::OpenFfmpeg() {

    Debug ( 2, "OpenFfmpeg called." );

    mOpenStart = time(NULL);
    mIsOpening = true;

    // Open the input, not necessarily a file
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_open_input_file" );
    if ( av_open_input_file( &mFormatContext, mPath.c_str(), NULL, 0, NULL ) !=0 )
#else
    // Handle options
    AVDictionary *opts = 0;
    StringVector opVect = split(Options(), ",");
    
    // Set transport method as specified by method field, rtpUni is default
    if ( Method() == "rtpMulti" )
    	opVect.push_back("rtsp_transport=udp_multicast");
    else if ( Method() == "rtpRtsp" )
        opVect.push_back("rtsp_transport=tcp");
    else if ( Method() == "rtpRtspHttp" )
        opVect.push_back("rtsp_transport=http");
    
  	Debug(2, "Number of Options: %d",opVect.size());
    for (size_t i=0; i<opVect.size(); i++)
    {
    	StringVector parts = split(opVect[i],"=");
    	if (parts.size() > 1) {
    		parts[0] = trimSpaces(parts[0]);
    		parts[1] = trimSpaces(parts[1]);
    	    if ( av_dict_set(&opts, parts[0].c_str(), parts[1].c_str(), 0) == 0 ) {
    	        Debug(2, "set option %d '%s' to '%s'", i,  parts[0].c_str(), parts[1].c_str());
    	    }
    	    else
    	    {
    	        Warning( "Error trying to set option %d '%s' to '%s'", i, parts[0].c_str(), parts[1].c_str() );
    	    }
    		  
    	}
    }    
	Debug ( 1, "Calling avformat_open_input" );

    mFormatContext = avformat_alloc_context( );
    mFormatContext->interrupt_callback.callback = FfmpegInterruptCallback;
    mFormatContext->interrupt_callback.opaque = this;

    if ( avformat_open_input( &mFormatContext, mPath.c_str(), NULL, &opts ) !=0 )
#endif
    {
        mIsOpening = false;
        Error( "Unable to open input %s due to: %s", mPath.c_str(), strerror(errno) );
        return -1;
    }

    mIsOpening = false;
    Debug ( 1, "Opened input" );

    Info( "Stream open %s", mPath.c_str() );
    startTime=av_gettime();//FIXME here or after find_Stream_info
    
    //FIXME can speed up initial analysis but need sensible parameters...
    //mFormatContext->probesize = 32;
    //mFormatContext->max_analyze_duration = 32;
    // Locate stream info from avformat_open_input
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 4, 0)
    Debug ( 1, "Calling av_find_stream_info" );
    if ( av_find_stream_info( mFormatContext ) < 0 )
#else
    Debug ( 1, "Calling avformat_find_stream_info" );
    if ( avformat_find_stream_info( mFormatContext, 0 ) < 0 )
#endif
        Fatal( "Unable to find stream info from %s due to: %s", mPath.c_str(), strerror(errno) );
    
    Info( "Find stream info complete %s", mPath.c_str() );
    Debug ( 1, "Got stream info" );

    // Find first video stream present
    mVideoStreamId = -1;
    for (unsigned int i=0; i < mFormatContext->nb_streams; i++ )
    {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
        if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
        if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
		{
			mVideoStreamId = i;
			break;
		}
        if(mAudioStreamId == -1) //FIXME best way to copy all other streams?
        {
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
		    if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
#else
		    if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO )
#endif
		    {
                mAudioStreamId = i;
		    }
        }
    }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream in %s", mPath.c_str() );

    Debug ( 1, "Found video stream" );

    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Try and get the codec from the codec context
    if ( (mCodec = avcodec_find_decoder( mCodecContext->codec_id )) == NULL )
        Fatal( "Can't find codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Found decoder" );

    // Open the codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    Debug ( 1, "Calling avcodec_open" );
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    Debug ( 1, "Calling avcodec_open2" );
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Fatal( "Unable to open codec for video stream from %s", mPath.c_str() );

    Debug ( 1, "Opened codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
    if(mRawFrame == NULL || mFrame == NULL)
        Fatal( "Unable to allocate frame for %s", mPath.c_str() );

    Debug ( 1, "Allocated frames" );
    
    int pSize = avpicture_get_size( imagePixFormat, width, height );
    if( (unsigned int)pSize != imagesize) {
        Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
    }

    Debug ( 1, "Validated imagesize" );
    
#if HAVE_LIBSWSCALE
    Debug ( 1, "Calling sws_isSupportedInput" );
    if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
        Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
    }
Пример #11
0
int RemoteCameraRtsp::PrimeCapture()
{
    Debug( 2, "Waiting for sources" );
    for ( int i = 0; i < 100 && !rtspThread->hasSources(); i++ )
    {
        usleep( 100000 );
    }
    if ( !rtspThread->hasSources() )
        Fatal( "No RTSP sources" );

    Debug( 2, "Got sources" );

    mFormatContext = rtspThread->getFormatContext();

    // Find first video stream present
    mVideoStreamId = -1;
    
    for ( unsigned int i = 0; i < mFormatContext->nb_streams; i++ )
#if LIBAVUTIL_VERSION_INT >= AV_VERSION_INT(51,2,1)
	if ( mFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO )
#else
	if ( mFormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
#endif
        {
            mVideoStreamId = i;
            break;
        }
    if ( mVideoStreamId == -1 )
        Fatal( "Unable to locate video stream" );

    // Get a pointer to the codec context for the video stream
    mCodecContext = mFormatContext->streams[mVideoStreamId]->codec;

    // Find the decoder for the video stream
    mCodec = avcodec_find_decoder( mCodecContext->codec_id );
    if ( mCodec == NULL )
        Panic( "Unable to locate codec %d decoder", mCodecContext->codec_id );

    // Open codec
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53, 7, 0)
    if ( avcodec_open( mCodecContext, mCodec ) < 0 )
#else
    if ( avcodec_open2( mCodecContext, mCodec, 0 ) < 0 )
#endif
        Panic( "Can't open codec" );

    // Allocate space for the native video frame
    mRawFrame = avcodec_alloc_frame();

    // Allocate space for the converted video frame
    mFrame = avcodec_alloc_frame();
    
	if(mRawFrame == NULL || mFrame == NULL)
		Fatal( "Unable to allocate frame(s)");
	
	int pSize = avpicture_get_size( imagePixFormat, width, height );
	if( (unsigned int)pSize != imagesize) {
		Fatal("Image size mismatch. Required: %d Available: %d",pSize,imagesize);
	}
	
#if HAVE_LIBSWSCALE
	if(!sws_isSupportedInput(mCodecContext->pix_fmt)) {
		Fatal("swscale does not support the codec format: %c%c%c%c",(mCodecContext->pix_fmt)&0xff,((mCodecContext->pix_fmt>>8)&0xff),((mCodecContext->pix_fmt>>16)&0xff),((mCodecContext->pix_fmt>>24)&0xff));
	}
Пример #12
0
static void modeset_destroy_fb(int fd, struct modeset_buf *buf)
{
    if (buf->map) {
        munmap(buf->map, buf->size);
    }
    if (buf->fb) {
        drmModeRmFB(fd, buf->fb);
    }
    if (buf->handle) {
        struct drm_mode_destroy_dumb dreq = {
            .handle = buf->handle,
        };
        drmIoctl(fd, DRM_IOCTL_MODE_DESTROY_DUMB, &dreq);
    }
}

static int modeset_create_fb(struct vo *vo, int fd, struct modeset_buf *buf)
{
    int ret = 0;

    buf->handle = 0;

    // create dumb buffer
    struct drm_mode_create_dumb creq = {
        .width = buf->width,
        .height = buf->height,
        .bpp = 32,
    };
    ret = drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &creq);
    if (ret < 0) {
        MP_ERR(vo, "Cannot create dumb buffer: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }
    buf->stride = creq.pitch;
    buf->size = creq.size;
    buf->handle = creq.handle;

    // create framebuffer object for the dumb-buffer
    ret = drmModeAddFB(fd, buf->width, buf->height, 24, 32, buf->stride,
                       buf->handle, &buf->fb);
    if (ret) {
        MP_ERR(vo, "Cannot create framebuffer: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }

    // prepare buffer for memory mapping
    struct drm_mode_map_dumb mreq = {
        .handle = buf->handle,
    };
    ret = drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &mreq);
    if (ret) {
        MP_ERR(vo, "Cannot map dumb buffer: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }

    // perform actual memory mapping
    buf->map = mmap(0, buf->size, PROT_READ | PROT_WRITE, MAP_SHARED,
                    fd, mreq.offset);
    if (buf->map == MAP_FAILED) {
        MP_ERR(vo, "Cannot map dumb buffer: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }

    memset(buf->map, 0, buf->size);

end:
    if (ret == 0) {
        return 0;
    }

    modeset_destroy_fb(fd, buf);
    return ret;
}

static int modeset_find_crtc(struct vo *vo, int fd, drmModeRes *res,
                             drmModeConnector *conn, struct modeset_dev *dev)
{
    for (unsigned int i = 0; i < conn->count_encoders; ++i) {
        drmModeEncoder *enc = drmModeGetEncoder(fd, conn->encoders[i]);
        if (!enc) {
            MP_WARN(vo, "Cannot retrieve encoder %u:%u: %s\n",
                    i, conn->encoders[i], mp_strerror(errno));
            continue;
        }

        // iterate all global CRTCs
        for (unsigned int j = 0; j < res->count_crtcs; ++j) {
            // check whether this CRTC works with the encoder
            if (!(enc->possible_crtcs & (1 << j)))
                continue;

            dev->enc = enc;
            dev->crtc = enc->crtc_id;
            return 0;
        }

        drmModeFreeEncoder(enc);
    }

    MP_ERR(vo, "Connector %u has no suitable CRTC\n", conn->connector_id);
    return -ENOENT;
}

static bool is_connector_valid(struct vo *vo, int conn_id,
                               drmModeConnector *conn, bool silent)
{
    if (!conn) {
        if (!silent) {
            MP_ERR(vo, "Cannot get connector %d: %s\n", conn_id,
                   mp_strerror(errno));
        }
        return false;
    }

    if (conn->connection != DRM_MODE_CONNECTED) {
        if (!silent) {
            MP_ERR(vo, "Connector %d is disconnected\n", conn_id);
        }
        return false;
    }

    if (conn->count_modes == 0) {
        if (!silent) {
            MP_ERR(vo, "Connector %d has no valid modes\n", conn_id);
        }
        return false;
    }

    return true;
}

static int modeset_prepare_dev(struct vo *vo, int fd, int conn_id,
                               struct modeset_dev **out)
{
    struct modeset_dev *dev = NULL;
    drmModeConnector *conn = NULL;

    int ret = 0;
    *out = NULL;

    drmModeRes *res = drmModeGetResources(fd);
    if (!res) {
        MP_ERR(vo, "Cannot retrieve DRM resources: %s\n", mp_strerror(errno));
        ret = -errno;
        goto end;
    }

    if (conn_id == -1) {
        // get the first connected connector
        for (int i = 0; i < res->count_connectors; i++) {
            conn = drmModeGetConnector(fd, res->connectors[i]);
            if (is_connector_valid(vo, i, conn, true)) {
                conn_id = i;
                break;
            }
            if (conn) {
                drmModeFreeConnector(conn);
                conn = NULL;
            }
        }
        if (conn_id == -1) {
            MP_ERR(vo, "No connected connectors found\n");
            ret = -ENODEV;
            goto end;
        }
    }

    if (conn_id < 0 || conn_id >= res->count_connectors) {
        MP_ERR(vo, "Bad connector ID. Max valid connector ID = %u\n",
               res->count_connectors);
        ret = -ENODEV;
        goto end;
    }

    conn = drmModeGetConnector(fd, res->connectors[conn_id]);
    if (!is_connector_valid(vo, conn_id, conn, false)) {
        ret = -ENODEV;
        goto end;
    }

    dev = talloc_zero(vo->priv, struct modeset_dev);
    dev->conn = conn->connector_id;
    dev->front_buf = 0;
    dev->mode = conn->modes[0];
    dev->bufs[0].width = conn->modes[0].hdisplay;
    dev->bufs[0].height = conn->modes[0].vdisplay;
    dev->bufs[1].width = conn->modes[0].hdisplay;
    dev->bufs[1].height = conn->modes[0].vdisplay;

    MP_INFO(vo, "Connector using mode %ux%u\n",
            dev->bufs[0].width, dev->bufs[0].height);

    ret = modeset_find_crtc(vo, fd, res, conn, dev);
    if (ret) {
        MP_ERR(vo, "Connector %d has no valid CRTC\n", conn_id);
        goto end;
    }

    for (unsigned int i = 0; i < BUF_COUNT; i++) {
        ret = modeset_create_fb(vo, fd, &dev->bufs[i]);
        if (ret) {
            MP_ERR(vo, "Cannot create framebuffer for connector %d\n",
                   conn_id);
            for (unsigned int j = 0; j < i; j++) {
                modeset_destroy_fb(fd, &dev->bufs[j]);
            }
            goto end;
        }
    }

end:
    if (conn) {
        drmModeFreeConnector(conn);
        conn = NULL;
    }
    if (res) {
        drmModeFreeResources(res);
        res = NULL;
    }
    if (ret == 0) {
        *out = dev;
    } else {
        talloc_free(dev);
    }
    return ret;
}

static void modeset_page_flipped(int fd, unsigned int frame, unsigned int sec,
                                 unsigned int usec, void *data)
{
    struct priv *p = data;
    p->pflip_happening = false;
}



static int setup_vo_crtc(struct vo *vo)
{
    struct priv *p = vo->priv;
    if (p->active)
        return 0;
    p->old_crtc = drmModeGetCrtc(p->fd, p->dev->crtc);
    int ret = drmModeSetCrtc(p->fd, p->dev->crtc,
                          p->dev->bufs[p->dev->front_buf + BUF_COUNT - 1].fb,
                          0, 0, &p->dev->conn, 1, &p->dev->mode);
    p->active = true;
    return ret;
}

static void release_vo_crtc(struct vo *vo)
{
    struct priv *p = vo->priv;

    if (!p->active)
        return;
    p->active = false;

    // wait for current page flip
    while (p->pflip_happening) {
        int ret = drmHandleEvent(p->fd, &p->ev);
        if (ret) {
            MP_ERR(vo, "drmHandleEvent failed: %i\n", ret);
            break;
        }
    }

    if (p->old_crtc) {
        drmModeSetCrtc(p->fd,
                       p->old_crtc->crtc_id,
                       p->old_crtc->buffer_id,
                       p->old_crtc->x,
                       p->old_crtc->y,
                       &p->dev->conn,
                       1,
                       &p->dev->mode);
        drmModeFreeCrtc(p->old_crtc);
        p->old_crtc = NULL;
    }
}

static void release_vt(void *data)
{
    struct vo *vo = data;
    release_vo_crtc(vo);
    if (USE_MASTER) {
        //this function enables support for switching to x, weston etc.
        //however, for whatever reason, it can be called only by root users.
        //until things change, this is commented.
        struct priv *p = vo->priv;
        if (drmDropMaster(p->fd)) {
            MP_WARN(vo, "Failed to drop DRM master: %s\n", mp_strerror(errno));
        }
    }
}

static void acquire_vt(void *data)
{
    struct vo *vo = data;
    if (USE_MASTER) {
        struct priv *p = vo->priv;
        if (drmSetMaster(p->fd)) {
            MP_WARN(vo, "Failed to acquire DRM master: %s\n", mp_strerror(errno));
        }
    }

    setup_vo_crtc(vo);
}



static int wait_events(struct vo *vo, int64_t until_time_us)
{
    struct priv *p = vo->priv;
    int64_t wait_us = until_time_us - mp_time_us();
    int timeout_ms = MPCLAMP((wait_us + 500) / 1000, 0, 10000);
    vt_switcher_poll(&p->vt_switcher, timeout_ms);
    return 0;
}

static void wakeup(struct vo *vo)
{
    struct priv *p = vo->priv;
    vt_switcher_interrupt_poll(&p->vt_switcher);
}

static int reconfig(struct vo *vo, struct mp_image_params *params, int flags)
{
    struct priv *p = vo->priv;

    vo->dwidth = p->device_w;
    vo->dheight = p->device_h;
    vo_get_src_dst_rects(vo, &p->src, &p->dst, &p->osd);

    int32_t w = p->dst.x1 - p->dst.x0;
    int32_t h = p->dst.y1 - p->dst.y0;

    // p->osd contains the parameters assuming OSD rendering in window
    // coordinates, but OSD can only be rendered in the intersection
    // between window and video rectangle (i.e. not into panscan borders).
    p->osd.w = w;
    p->osd.h = h;
    p->osd.mt = MPMIN(0, p->osd.mt);
    p->osd.mb = MPMIN(0, p->osd.mb);
    p->osd.mr = MPMIN(0, p->osd.mr);
    p->osd.ml = MPMIN(0, p->osd.ml);

    p->x = (p->device_w - w) >> 1;
    p->y = (p->device_h - h) >> 1;

    mp_sws_set_from_cmdline(p->sws, vo->opts->sws_opts);
    p->sws->src = *params;
    p->sws->dst = (struct mp_image_params) {
        .imgfmt = IMGFMT_BGR0,
        .w = w,
        .h = h,
        .d_w = w,
        .d_h = h,
    };

    talloc_free(p->cur_frame);
    p->cur_frame = mp_image_alloc(IMGFMT_BGR0, p->device_w, p->device_h);
    mp_image_params_guess_csp(&p->sws->dst);
    mp_image_set_params(p->cur_frame, &p->sws->dst);

    struct modeset_buf *buf = p->dev->bufs;
    memset(buf[0].map, 0, buf[0].size);
    memset(buf[1].map, 0, buf[1].size);

    if (mp_sws_reinit(p->sws) < 0)
        return -1;

    vo->want_redraw = true;
    return 0;
}

static void draw_image(struct vo *vo, mp_image_t *mpi)
{
    struct priv *p = vo->priv;

    if (p->active) {
        struct mp_image src = *mpi;
        struct mp_rect src_rc = p->src;
        src_rc.x0 = MP_ALIGN_DOWN(src_rc.x0, mpi->fmt.align_x);
        src_rc.y0 = MP_ALIGN_DOWN(src_rc.y0, mpi->fmt.align_y);
        mp_image_crop_rc(&src, src_rc);
        mp_sws_scale(p->sws, p->cur_frame, &src);
        osd_draw_on_image(vo->osd, p->osd, src.pts, 0, p->cur_frame);

        struct modeset_buf *front_buf = &p->dev->bufs[p->dev->front_buf];
        int32_t shift = (p->device_w * p->y + p->x) * 4;
        memcpy_pic(front_buf->map + shift,
                   p->cur_frame->planes[0],
                   (p->dst.x1 - p->dst.x0) * 4,
                   p->dst.y1 - p->dst.y0,
                   p->device_w * 4,
                   p->cur_frame->stride[0]);
    }

    if (mpi != p->last_input) {
        talloc_free(p->last_input);
        p->last_input = mpi;
    }
}

static void flip_page(struct vo *vo)
{
    struct priv *p = vo->priv;
    if (!p->active || p->pflip_happening)
        return;

    int ret = drmModePageFlip(p->fd, p->dev->crtc,
                              p->dev->bufs[p->dev->front_buf].fb,
                              DRM_MODE_PAGE_FLIP_EVENT, p);
    if (ret) {
        MP_WARN(vo, "Cannot flip page for connector\n");
    } else {
        p->dev->front_buf++;
        p->dev->front_buf %= BUF_COUNT;
        p->pflip_happening = true;
    }

    // poll page flip finish event
    const int timeout_ms = 3000;
    struct pollfd fds[1] = {
        { .events = POLLIN, .fd = p->fd },
    };
    poll(fds, 1, timeout_ms);
    if (fds[0].revents & POLLIN) {
        ret = drmHandleEvent(p->fd, &p->ev);
        if (ret != 0) {
            MP_ERR(vo, "drmHandleEvent failed: %i\n", ret);
            return;
        }
    }
}

static void uninit(struct vo *vo)
{
    struct priv *p = vo->priv;

    if (p->dev) {
        release_vo_crtc(vo);

        modeset_destroy_fb(p->fd, &p->dev->bufs[1]);
        modeset_destroy_fb(p->fd, &p->dev->bufs[0]);
        drmModeFreeEncoder(p->dev->enc);
    }

    vt_switcher_destroy(&p->vt_switcher);
    talloc_free(p->last_input);
    talloc_free(p->cur_frame);
    talloc_free(p->dev);
    close(p->fd);
}

static int preinit(struct vo *vo)
{
    struct priv *p = vo->priv;
    p->sws = mp_sws_alloc(vo);
    p->fd = -1;
    p->ev.version = DRM_EVENT_CONTEXT_VERSION;
    p->ev.page_flip_handler = modeset_page_flipped;

    if (vt_switcher_init(&p->vt_switcher, vo->log))
        goto err;

    vt_switcher_acquire(&p->vt_switcher, acquire_vt, vo);
    vt_switcher_release(&p->vt_switcher, release_vt, vo);

    if (modeset_open(vo, &p->fd, p->device_path))
        goto err;

    if (modeset_prepare_dev(vo, p->fd, p->connector_id, &p->dev))
        goto err;

    assert(p->dev);
    p->device_w = p->dev->bufs[0].width;
    p->device_h = p->dev->bufs[0].height;

    if (setup_vo_crtc(vo)) {
        MP_ERR(vo, "Cannot set CRTC for connector %u: %s\n", p->connector_id,
               mp_strerror(errno));
        goto err;
    }

    return 0;

err:
    uninit(vo);
    return -1;
}

static int query_format(struct vo *vo, int format)
{
    return sws_isSupportedInput(imgfmt2pixfmt(format));
}

static int control(struct vo *vo, uint32_t request, void *data)
{
    struct priv *p = vo->priv;
    switch (request) {
    case VOCTRL_SCREENSHOT_WIN:
        *(struct mp_image**)data = mp_image_new_copy(p->cur_frame);
        return VO_TRUE;
    case VOCTRL_REDRAW_FRAME:
        draw_image(vo, p->last_input);
        return VO_TRUE;
    case VOCTRL_GET_PANSCAN:
        return VO_TRUE;
    case VOCTRL_SET_PANSCAN:
        if (vo->config_ok)
            reconfig(vo, vo->params, 0);
        return VO_TRUE;
    }
    return VO_NOTIMPL;
}

#define OPT_BASE_STRUCT struct priv

const struct vo_driver video_out_drm = {
    .name = "drm",
    .description = "Direct Rendering Manager",
    .preinit = preinit,
    .query_format = query_format,
    .reconfig = reconfig,
    .control = control,
    .draw_image = draw_image,
    .flip_page = flip_page,
    .uninit = uninit,
    .wait_events = wait_events,
    .wakeup = wakeup,
    .priv_size = sizeof(struct priv),
    .options = (const struct m_option[]) {
        OPT_STRING("devpath", device_path, 0),
        OPT_INT("connector", connector_id, 0),
        {0},
    },
    .priv_defaults = &(const struct priv) {