Esempio n. 1
0
static void udl_fbdev_destroy(struct drm_device *dev,
			      struct udl_fbdev *ufbdev)
{
	drm_fb_helper_unregister_fbi(&ufbdev->helper);
	drm_fb_helper_release_fbi(&ufbdev->helper);
	drm_fb_helper_fini(&ufbdev->helper);
	drm_framebuffer_unregister_private(&ufbdev->ufb.base);
	drm_framebuffer_cleanup(&ufbdev->ufb.base);
	drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
}
Esempio n. 2
0
static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
                                    struct virtio_gpu_fbdev *vgfbdev)
{
    struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;

    drm_fb_helper_unregister_fbi(&vgfbdev->helper);
    drm_fb_helper_release_fbi(&vgfbdev->helper);

    if (vgfb->obj)
        vgfb->obj = NULL;
    drm_fb_helper_fini(&vgfbdev->helper);
    drm_framebuffer_cleanup(&vgfb->base);

    return 0;
}
Esempio n. 3
0
static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
{
	struct qxl_framebuffer *qfb = &qfbdev->qfb;

	drm_fb_helper_unregister_fbi(&qfbdev->helper);
	drm_fb_helper_release_fbi(&qfbdev->helper);

	if (qfb->obj) {
		qxlfb_destroy_pinned_object(qfb->obj);
		qfb->obj = NULL;
	}
	drm_fb_helper_fini(&qfbdev->helper);
	vfree(qfbdev->shadow);
	drm_framebuffer_cleanup(&qfb->base);

	return 0;
}
Esempio n. 4
0
void armada_fbdev_fini(struct drm_device *dev)
{
	struct armada_private *priv = dev->dev_private;
	struct drm_fb_helper *fbh = priv->fbdev;

	if (fbh) {
		drm_fb_helper_unregister_fbi(fbh);
		drm_fb_helper_release_fbi(fbh);

		drm_fb_helper_fini(fbh);

		if (fbh->fb)
			fbh->fb->funcs->destroy(fbh->fb);

		priv->fbdev = NULL;
	}
}
Esempio n. 5
0
static int bochs_fbdev_destroy(struct bochs_device *bochs)
{
	struct bochs_framebuffer *gfb = &bochs->fb.gfb;

	DRM_DEBUG_DRIVER("\n");

	drm_fb_helper_unregister_fbi(&bochs->fb.helper);
	drm_fb_helper_release_fbi(&bochs->fb.helper);

	if (gfb->obj) {
		drm_gem_object_unreference_unlocked(gfb->obj);
		gfb->obj = NULL;
	}

	drm_fb_helper_fini(&bochs->fb.helper);
	drm_framebuffer_unregister_private(&gfb->base);
	drm_framebuffer_cleanup(&gfb->base);

	return 0;
}
Esempio n. 6
0
int armada_fbdev_init(struct drm_device *dev)
{
	struct armada_private *priv = dev->dev_private;
	struct drm_fb_helper *fbh;
	int ret;

	fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
	if (!fbh)
		return -ENOMEM;

	priv->fbdev = fbh;

	drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);

	ret = drm_fb_helper_init(dev, fbh, 1, 1);
	if (ret) {
		DRM_ERROR("failed to initialize drm fb helper\n");
		goto err_fb_helper;
	}

	ret = drm_fb_helper_single_add_all_connectors(fbh);
	if (ret) {
		DRM_ERROR("failed to add fb connectors\n");
		goto err_fb_setup;
	}

	ret = drm_fb_helper_initial_config(fbh, 32);
	if (ret) {
		DRM_ERROR("failed to set initial config\n");
		goto err_fb_setup;
	}

	return 0;
 err_fb_setup:
	drm_fb_helper_release_fbi(fbh);
	drm_fb_helper_fini(fbh);
 err_fb_helper:
	priv->fbdev = NULL;
	return ret;
}
Esempio n. 7
0
static int qxlfb_create(struct qxl_fbdev *qfbdev,
			struct drm_fb_helper_surface_size *sizes)
{
	struct qxl_device *qdev = qfbdev->qdev;
	struct fb_info *info;
	struct drm_framebuffer *fb = NULL;
	struct drm_mode_fb_cmd2 mode_cmd;
	struct drm_gem_object *gobj = NULL;
	struct qxl_bo *qbo = NULL;
	int ret;
	int size;
	int bpp = sizes->surface_bpp;
	int depth = sizes->surface_depth;
	void *shadow;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;

	mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);

	ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj);
	if (ret < 0)
		return ret;

	qbo = gem_to_qxl_bo(gobj);
	QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width,
		 mode_cmd.height, mode_cmd.pitches[0]);

	shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height);
	/* TODO: what's the usual response to memory allocation errors? */
	BUG_ON(!shadow);
	QXL_INFO(qdev,
	"surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n",
		 qxl_bo_gpu_offset(qbo),
		 qxl_bo_mmap_offset(qbo),
		 qbo->kptr,
		 shadow);
	size = mode_cmd.pitches[0] * mode_cmd.height;

	info = drm_fb_helper_alloc_fbi(&qfbdev->helper);
	if (IS_ERR(info)) {
		ret = PTR_ERR(info);
		goto out_unref;
	}

	info->par = qfbdev;

	qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj,
			     &qxlfb_fb_funcs);

	fb = &qfbdev->qfb.base;

	/* setup helper with fb data */
	qfbdev->helper.fb = fb;

	qfbdev->shadow = shadow;
	strcpy(info->fix.id, "qxldrmfb");

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);

	info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT;
	info->fbops = &qxlfb_ops;

	/*
	 * TODO: using gobj->size in various places in this function. Not sure
	 * what the difference between the different sizes is.
	 */
	info->fix.smem_start = qdev->vram_base; /* TODO - correct? */
	info->fix.smem_len = gobj->size;
	info->screen_base = qfbdev->shadow;
	info->screen_size = gobj->size;

	drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width,
			       sizes->fb_height);

	/* setup aperture base/size for vesafb takeover */
	info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base;
	info->apertures->ranges[0].size = qdev->vram_size;

	info->fix.mmio_start = 0;
	info->fix.mmio_len = 0;

	if (info->screen_base == NULL) {
		ret = -ENOSPC;
		goto out_destroy_fbi;
	}

	info->fbdefio = &qxl_defio;
	fb_deferred_io_init(info);

	qdev->fbdev_info = info;
	qdev->fbdev_qfb = &qfbdev->qfb;
	DRM_INFO("fb mappable at 0x%lX, size %lu\n",  info->fix.smem_start, (unsigned long)info->screen_size);
	DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height);
	return 0;

out_destroy_fbi:
	drm_fb_helper_release_fbi(&qfbdev->helper);
out_unref:
	if (qbo) {
		ret = qxl_bo_reserve(qbo, false);
		if (likely(ret == 0)) {
			qxl_bo_kunmap(qbo);
			qxl_bo_unpin(qbo);
			qxl_bo_unreserve(qbo);
		}
	}
	if (fb && ret) {
		drm_gem_object_unreference_unlocked(gobj);
		drm_framebuffer_cleanup(fb);
		kfree(fb);
	}
	drm_gem_object_unreference_unlocked(gobj);
	return ret;
}
Esempio n. 8
0
static int udlfb_create(struct drm_fb_helper *helper,
			struct drm_fb_helper_surface_size *sizes)
{
	struct udl_fbdev *ufbdev =
		container_of(helper, struct udl_fbdev, helper);
	struct drm_device *dev = ufbdev->helper.dev;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd;
	struct udl_gem_object *obj;
	uint32_t size;
	int ret = 0;

	if (sizes->surface_bpp == 24)
		sizes->surface_bpp = 32;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);

	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = mode_cmd.pitches[0] * mode_cmd.height;
	size = ALIGN(size, PAGE_SIZE);

	obj = udl_gem_alloc_object(dev, size);
	if (!obj)
		goto out;

	ret = udl_gem_vmap(obj);
	if (ret) {
		DRM_ERROR("failed to vmap fb\n");
		goto out_gfree;
	}

	info = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(info)) {
		ret = PTR_ERR(info);
		goto out_gfree;
	}
	info->par = ufbdev;

	ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
	if (ret)
		goto out_destroy_fbi;

	fb = &ufbdev->ufb.base;

	ufbdev->helper.fb = fb;

	strcpy(info->fix.id, "udldrmfb");

	info->screen_base = ufbdev->ufb.obj->vmapping;
	info->fix.smem_len = size;
	info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &udlfb_ops;
	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);

	DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
		      fb->width, fb->height,
		      ufbdev->ufb.obj->vmapping);

	return ret;
out_destroy_fbi:
	drm_fb_helper_release_fbi(helper);
out_gfree:
	drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
out:
	return ret;
}
Esempio n. 9
0
static int bochsfb_create(struct drm_fb_helper *helper,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct bochs_device *bochs =
		container_of(helper, struct bochs_device, fb.helper);
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct drm_mode_fb_cmd2 mode_cmd;
	struct drm_gem_object *gobj = NULL;
	struct bochs_bo *bo = NULL;
	int size, ret;

	if (sizes->surface_bpp != 32)
		return -EINVAL;

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
	mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);
	size = mode_cmd.pitches[0] * mode_cmd.height;

	/* alloc, pin & map bo */
	ret = bochsfb_create_object(bochs, &mode_cmd, &gobj);
	if (ret) {
		DRM_ERROR("failed to create fbcon backing object %d\n", ret);
		return ret;
	}

	bo = gem_to_bochs_bo(gobj);

	ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
	if (ret)
		return ret;

	ret = bochs_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
	if (ret) {
		DRM_ERROR("failed to pin fbcon\n");
		ttm_bo_unreserve(&bo->bo);
		return ret;
	}

	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages,
			  &bo->kmap);
	if (ret) {
		DRM_ERROR("failed to kmap fbcon\n");
		ttm_bo_unreserve(&bo->bo);
		return ret;
	}

	ttm_bo_unreserve(&bo->bo);

	/* init fb device */
	info = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(info))
		return PTR_ERR(info);

	info->par = &bochs->fb.helper;

	ret = bochs_framebuffer_init(bochs->dev, &bochs->fb.gfb, &mode_cmd, gobj);
	if (ret) {
		drm_fb_helper_release_fbi(helper);
		return ret;
	}

	bochs->fb.size = size;

	/* setup helper */
	fb = &bochs->fb.gfb.base;
	bochs->fb.helper.fb = fb;

	strcpy(info->fix.id, "bochsdrmfb");

	info->flags = FBINFO_DEFAULT;
	info->fbops = &bochsfb_ops;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
	drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width,
			       sizes->fb_height);

	info->screen_base = bo->kmap.virtual;
	info->screen_size = size;

	drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
	info->fix.smem_start = 0;
	info->fix.smem_len = size;

	return 0;
}
Esempio n. 10
0
static int virtio_gpufb_create(struct drm_fb_helper *helper,
                               struct drm_fb_helper_surface_size *sizes)
{
    struct virtio_gpu_fbdev *vfbdev =
        container_of(helper, struct virtio_gpu_fbdev, helper);
    struct drm_device *dev = helper->dev;
    struct virtio_gpu_device *vgdev = dev->dev_private;
    struct fb_info *info;
    struct drm_framebuffer *fb;
    struct drm_mode_fb_cmd2 mode_cmd = {};
    struct virtio_gpu_object *obj;
    uint32_t resid, format, size;
    int ret;

    mode_cmd.width = sizes->surface_width;
    mode_cmd.height = sizes->surface_height;
    mode_cmd.pitches[0] = mode_cmd.width * 4;
    mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24);

    switch (mode_cmd.pixel_format) {
#ifdef __BIG_ENDIAN
    case DRM_FORMAT_XRGB8888:
        format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
        break;
    case DRM_FORMAT_ARGB8888:
        format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
        break;
    case DRM_FORMAT_BGRX8888:
        format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
        break;
    case DRM_FORMAT_BGRA8888:
        format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
        break;
    case DRM_FORMAT_RGBX8888:
        format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
        break;
    case DRM_FORMAT_RGBA8888:
        format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
        break;
    case DRM_FORMAT_XBGR8888:
        format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
        break;
    case DRM_FORMAT_ABGR8888:
        format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
        break;
#else
    case DRM_FORMAT_XRGB8888:
        format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
        break;
    case DRM_FORMAT_ARGB8888:
        format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
        break;
    case DRM_FORMAT_BGRX8888:
        format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
        break;
    case DRM_FORMAT_BGRA8888:
        format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
        break;
    case DRM_FORMAT_RGBX8888:
        format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
        break;
    case DRM_FORMAT_RGBA8888:
        format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
        break;
    case DRM_FORMAT_XBGR8888:
        format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
        break;
    case DRM_FORMAT_ABGR8888:
        format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
        break;
#endif
    default:
        DRM_ERROR("failed to find virtio gpu format for %d\n",
                  mode_cmd.pixel_format);
        return -EINVAL;
    }

    size = mode_cmd.pitches[0] * mode_cmd.height;
    obj = virtio_gpu_alloc_object(dev, size, false, true);
    if (IS_ERR(obj))
        return PTR_ERR(obj);

    virtio_gpu_resource_id_get(vgdev, &resid);
    virtio_gpu_cmd_create_resource(vgdev, resid, format,
                                   mode_cmd.width, mode_cmd.height);

    ret = virtio_gpu_vmap_fb(vgdev, obj);
    if (ret) {
        DRM_ERROR("failed to vmap fb %d\n", ret);
        goto err_obj_vmap;
    }

    /* attach the object to the resource */
    ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
    if (ret)
        goto err_obj_attach;

    info = drm_fb_helper_alloc_fbi(helper);
    if (IS_ERR(info)) {
        ret = PTR_ERR(info);
        goto err_fb_alloc;
    }

    info->par = helper;

    ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
                                      &mode_cmd, &obj->gem_base);
    if (ret)
        goto err_fb_init;

    fb = &vfbdev->vgfb.base;

    vfbdev->helper.fb = fb;

    strcpy(info->fix.id, "virtiodrmfb");
    info->flags = FBINFO_DEFAULT;
    info->fbops = &virtio_gpufb_ops;
    info->pixmap.flags = FB_PIXMAP_SYSTEM;

    info->screen_base = obj->vmap;
    info->screen_size = obj->gem_base.size;
    drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
    drm_fb_helper_fill_var(info, &vfbdev->helper,
                           sizes->fb_width, sizes->fb_height);

    info->fix.mmio_start = 0;
    info->fix.mmio_len = 0;
    return 0;

err_fb_init:
    drm_fb_helper_release_fbi(helper);
err_fb_alloc:
    virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
err_obj_attach:
err_obj_vmap:
    virtio_gpu_gem_free_object(&obj->gem_base);
    return ret;
}
Esempio n. 11
0
static int intelfb_create(struct drm_fb_helper *helper,
			  struct drm_fb_helper_surface_size *sizes)
{
	struct intel_fbdev *ifbdev =
		container_of(helper, struct intel_fbdev, helper);
	struct intel_framebuffer *intel_fb = ifbdev->fb;
	struct drm_device *dev = helper->dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct pci_dev *pdev = dev_priv->drm.pdev;
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	struct fb_info *info;
	struct drm_framebuffer *fb;
	struct i915_vma *vma;
	bool prealloc = false;
	void __iomem *vaddr;
	int ret;

	if (intel_fb &&
	    (sizes->fb_width > intel_fb->base.width ||
	     sizes->fb_height > intel_fb->base.height)) {
		DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
			      " releasing it\n",
			      intel_fb->base.width, intel_fb->base.height,
			      sizes->fb_width, sizes->fb_height);
		drm_framebuffer_unreference(&intel_fb->base);
		intel_fb = ifbdev->fb = NULL;
	}
	if (!intel_fb || WARN_ON(!intel_fb->obj)) {
		DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
		ret = intelfb_alloc(helper, sizes);
		if (ret)
			return ret;
		intel_fb = ifbdev->fb;
	} else {
		DRM_DEBUG_KMS("re-using BIOS fb\n");
		prealloc = true;
		sizes->fb_width = intel_fb->base.width;
		sizes->fb_height = intel_fb->base.height;
	}

	mutex_lock(&dev->struct_mutex);

	/* Pin the GGTT vma for our access via info->screen_base.
	 * This also validates that any existing fb inherited from the
	 * BIOS is suitable for own access.
	 */
	vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto out_unlock;
	}

	info = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(info)) {
		DRM_ERROR("Failed to allocate fb_info\n");
		ret = PTR_ERR(info);
		goto out_unpin;
	}

	info->par = helper;

	fb = &ifbdev->fb->base;

	ifbdev->helper.fb = fb;

	strcpy(info->fix.id, "inteldrmfb");

	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
	info->fbops = &intelfb_ops;

	/* setup aperture base/size for vesafb takeover */
	info->apertures->ranges[0].base = dev->mode_config.fb_base;
	info->apertures->ranges[0].size = ggtt->mappable_end;

	info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
	info->fix.smem_len = vma->node.size;

	vaddr = i915_vma_pin_iomap(vma);
	if (IS_ERR(vaddr)) {
		DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
		ret = PTR_ERR(vaddr);
		goto out_destroy_fbi;
	}
	info->screen_base = vaddr;
	info->screen_size = vma->node.size;

	/* This driver doesn't need a VT switch to restore the mode on resume */
	info->skip_vt_switch = true;

	drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);

	/* If the object is shmemfs backed, it will have given us zeroed pages.
	 * If the object is stolen however, it will be full of whatever
	 * garbage was left in there.
	 */
	if (intel_fb->obj->stolen && !prealloc)
		memset_io(info->screen_base, 0, info->screen_size);

	/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
		      fb->width, fb->height, i915_ggtt_offset(vma));
	ifbdev->vma = vma;

	mutex_unlock(&dev->struct_mutex);
	vga_switcheroo_client_fb_set(pdev, info);
	return 0;

out_destroy_fbi:
	drm_fb_helper_release_fbi(helper);
out_unpin:
	intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}