Beispiel #1
0
void radeon_bo_kunmap(struct radeon_bo *bo)
{
	if (bo->kptr == NULL)
		return;
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
}
Beispiel #2
0
static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
	struct nouveau_bo *nvbo = dma_buf->priv;
	struct drm_device *dev = nvbo->gem->dev;

	mutex_lock(&dev->struct_mutex);
	nvbo->vmapping_count--;
	if (nvbo->vmapping_count == 0) {
		ttm_bo_kunmap(&nvbo->dma_buf_vmap);
	}
	mutex_unlock(&dev->struct_mutex);
}
static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
	struct radeon_bo *bo = dma_buf->priv;
	struct drm_device *dev = bo->rdev->ddev;

	mutex_lock(&dev->struct_mutex);
	bo->vmapping_count--;
	if (bo->vmapping_count == 0) {
		ttm_bo_kunmap(&bo->dma_buf_vmap);
	}
	mutex_unlock(&dev->struct_mutex);
}
Beispiel #4
0
static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
	struct radeon_bo *bo = dma_buf->priv;
	struct drm_device *dev = bo->rdev->ddev;

	rw_enter_write(&dev->struct_rwlock);
	bo->vmapping_count--;
	if (bo->vmapping_count == 0) {
		ttm_bo_kunmap(&bo->dma_buf_vmap);
	}
	rw_exit_write(&dev->struct_rwlock);
}
int vsp_deinit(struct drm_device *dev)
{
	struct drm_psb_private *dev_priv = dev->dev_private;
	struct vsp_private *vsp_priv = dev_priv->vsp_private;

	VSP_DEBUG("free VSP firmware/context buffer\n");

	if (vsp_priv->cmd_queue) {
		ttm_bo_kunmap(&vsp_priv->cmd_kmap);
		vsp_priv->cmd_queue = NULL;
	}

	if (vsp_priv->ack_queue) {
		ttm_bo_kunmap(&vsp_priv->ack_kmap);
		vsp_priv->ack_queue = NULL;
	}
	if (vsp_priv->setting) {
		ttm_bo_kunmap(&vsp_priv->setting_kmap);
		vsp_priv->setting = NULL;
	}

	if (vsp_priv->ack_queue_bo)
		ttm_bo_unref(&vsp_priv->ack_queue_bo);
	if (vsp_priv->cmd_queue_bo)
		ttm_bo_unref(&vsp_priv->cmd_queue_bo);
	if (vsp_priv->setting_bo)
		ttm_bo_unref(&vsp_priv->setting_bo);

	device_remove_file(&dev->pdev->dev, &dev_attr_vsp_pmstate);
	sysfs_put(vsp_priv->sysfs_pmstate);

	VSP_DEBUG("free VSP private structure\n");
	kfree(dev_priv->vsp_private);

	return 0;
}
Beispiel #6
0
/**
 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
 *
 * @dev_priv: A device private structure.
 *
 * This function creates a small buffer object that holds the query
 * result for dummy queries emitted as query barriers.
 * The function will then map the first page and initialize a pending
 * occlusion query result structure, Finally it will unmap the buffer.
 * No interruptible waits are done within this function.
 *
 * Returns an error if bo creation or initialization fails.
 */
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
	int ret;
	struct vmw_dma_buffer *vbo;
	struct ttm_bo_kmap_obj map;
	volatile SVGA3dQueryResult *result;
	bool dummy;

	/*
	 * Create the vbo as pinned, so that a tryreserve will
	 * immediately succeed. This is because we're the only
	 * user of the bo currently.
	 */
	vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
	if (!vbo)
		return -ENOMEM;

	ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
			      &vmw_sys_ne_placement, false,
			      &vmw_dmabuf_bo_free);
	if (unlikely(ret != 0))
		return ret;

	ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
	BUG_ON(ret != 0);
	vmw_bo_pin_reserved(vbo, true);

	ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
	if (likely(ret == 0)) {
		result = ttm_kmap_obj_virtual(&map, &dummy);
		result->totalSize = sizeof(*result);
		result->state = SVGA3D_QUERYSTATE_PENDING;
		result->result32 = 0xff;
		ttm_bo_kunmap(&map);
	}
	vmw_bo_pin_reserved(vbo, false);
	ttm_bo_unreserve(&vbo->base);

	if (unlikely(ret != 0)) {
		DRM_ERROR("Dummy query buffer map failed.\n");
		vmw_dmabuf_unreference(&vbo);
	} else
		dev_priv->dummy_query_bo = vbo;

	return ret;
}
Beispiel #7
0
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
	struct drm_device *dev = dev_priv->dev;
	struct nouveau_bo *nvbo = nouveau_bo(bo);

	ttm_bo_kunmap(&nvbo->kmap);

	if (unlikely(nvbo->gem))
		DRM_ERROR("bo %p still attached to GEM object\n", bo);

	if (nvbo->tile)
		nv10_mem_expire_tiling(dev, nvbo->tile, NULL);

	spin_lock(&dev_priv->ttm.bo_list_lock);
	list_del(&nvbo->head);
	spin_unlock(&dev_priv->ttm.bo_list_lock);
	kfree(nvbo);
}
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
			   uint32_t handle, uint32_t width, uint32_t height)
{
	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
	struct vmw_surface *surface = NULL;
	struct vmw_dma_buffer *dmabuf = NULL;
	int ret;

	if (handle) {
		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
						     handle, &surface);
		if (!ret) {
			if (!surface->snooper.image) {
				DRM_ERROR("surface not suitable for cursor\n");
				return -EINVAL;
			}
		} else {
			ret = vmw_user_dmabuf_lookup(tfile,
						     handle, &dmabuf);
			if (ret) {
				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
				return -EINVAL;
			}
		}
	}

	/* takedown old cursor */
	if (du->cursor_surface) {
		du->cursor_surface->snooper.crtc = NULL;
		vmw_surface_unreference(&du->cursor_surface);
	}
	if (du->cursor_dmabuf)
		vmw_dmabuf_unreference(&du->cursor_dmabuf);

	/* setup new image */
	if (surface) {
		/* vmw_user_surface_lookup takes one reference */
		du->cursor_surface = surface;

		du->cursor_surface->snooper.crtc = crtc;
		du->cursor_age = du->cursor_surface->snooper.age;
		vmw_cursor_update_image(dev_priv, surface->snooper.image,
					64, 64, du->hotspot_x, du->hotspot_y);
	} else if (dmabuf) {
		struct ttm_bo_kmap_obj map;
		unsigned long kmap_offset;
		unsigned long kmap_num;
		void *virtual;
		bool dummy;

		/* vmw_user_surface_lookup takes one reference */
		du->cursor_dmabuf = dmabuf;

		kmap_offset = 0;
		kmap_num = (64*64*4) >> PAGE_SHIFT;

		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
		if (unlikely(ret != 0)) {
			DRM_ERROR("reserve failed\n");
			return -EINVAL;
		}

		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
		if (unlikely(ret != 0))
			goto err_unreserve;

		virtual = ttm_kmap_obj_virtual(&map, &dummy);
		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
					du->hotspot_x, du->hotspot_y);

		ttm_bo_kunmap(&map);
err_unreserve:
		ttm_bo_unreserve(&dmabuf->base);

	} else {
Beispiel #9
0
void
nouveau_bo_unmap(struct nouveau_bo *nvbo)
{
    if (nvbo)
        ttm_bo_kunmap(&nvbo->kmap);
}
Beispiel #10
0
void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
	struct radeon_bo *bo = gem_to_radeon_bo(obj);

	ttm_bo_kunmap(&bo->dma_buf_vmap);
}
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);

	ttm_bo_kunmap(&bo->dma_buf_vmap);
}
Beispiel #12
0
static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
                            uint32_t handle, uint32_t width, uint32_t height,
                            int32_t hot_x, int32_t hot_y)
{
    struct vbox_private *vbox = crtc->dev->dev_private;
    struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
    struct drm_gem_object *obj;
    struct vbox_bo *bo;
    int ret, rc;
    struct ttm_bo_kmap_obj uobj_map;
    u8 *src;
    u8 *dst = NULL;
    size_t cbData, cbMask;
    bool src_isiomem;

    if (!handle) {
        /* Hide cursor. */
        VBoxHGSMIUpdatePointerShape(&vbox->Ctx, 0, 0, 0, 0, 0, NULL, 0);
        return 0;
    }
    if (   width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT
        || width == 0 || hot_x > width || height == 0 || hot_y > height)
        return -EINVAL;

    obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
    if (obj)
    {
        bo = gem_to_vbox_bo(obj);
        ret = vbox_bo_reserve(bo, false);
        if (!ret)
        {
            /* The mask must be calculated based on the alpha channel, one bit
             * per ARGB word, and must be 32-bit padded. */
            cbMask  = ((width + 7) / 8 * height + 3) & ~3;
            cbData = width * height * 4 + cbMask;
            dst = kmalloc(cbData, GFP_KERNEL);
            if (dst)
            {
                ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
                if (!ret)
                {
                    src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
                    if (!src_isiomem)
                    {
                        uint32_t fFlags =   VBOX_MOUSE_POINTER_VISIBLE
                                          | VBOX_MOUSE_POINTER_SHAPE
                                          | VBOX_MOUSE_POINTER_ALPHA;
                        copy_cursor_image(src, dst, width, height, cbMask);
                        rc = VBoxHGSMIUpdatePointerShape(&vbox->Ctx, fFlags,
                                                         hot_x, hot_y, width,
                                                         height, dst, cbData);
                        ret = RTErrConvertToErrno(rc);
                    }
                    else
                        DRM_ERROR("src cursor bo should be in main memory\n");
                    ttm_bo_kunmap(&uobj_map);
                }
                kfree(dst);
            }
            vbox_bo_unreserve(bo);
        }
        drm_gem_object_unreference_unlocked(obj);
    }
    else
    {
        DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
        ret = -ENOENT;
    }
    return ret;
}
Beispiel #13
0
void vbox_cursor_fini(struct drm_device *dev)
{
    struct vbox_private *vbox = dev->dev_private;
    ttm_bo_kunmap(&vbox->cache_kmap);
    drm_gem_object_unreference_unlocked(vbox->cursor_cache);
}
int vsp_init(struct drm_device *dev)
{
	struct drm_psb_private *dev_priv = dev->dev_private;
	struct ttm_bo_device *bdev = &dev_priv->bdev;
	struct vsp_private *vsp_priv;
	bool is_iomem;
	int ret;
	unsigned int context_size;

	VSP_DEBUG("init vsp private data structure\n");
	vsp_priv = kmalloc(sizeof(struct vsp_private), GFP_KERNEL);
	if (vsp_priv == NULL)
		return -1;

	memset(vsp_priv, 0, sizeof(*vsp_priv));

	/* get device --> drm_device --> drm_psb_private --> vsp_priv
	 * for psb_vsp_pmstate_show: vsp_pmpolicy
	 * if not pci_set_drvdata, can't get drm_device from device
	 */
	/* pci_set_drvdata(dev->pdev, dev); */
	if (device_create_file(&dev->pdev->dev,
			       &dev_attr_vsp_pmstate))
		DRM_ERROR("TOPAZ: could not create sysfs file\n");

	vsp_priv->sysfs_pmstate = sysfs_get_dirent(
		dev->pdev->dev.kobj.sd, NULL,
		"vsp_pmstate");

	vsp_priv->vsp_cmd_num = 0;
	vsp_priv->fw_loaded = VSP_FW_NONE;
	vsp_priv->current_sequence = 0;
	vsp_priv->vsp_state = VSP_STATE_DOWN;
	vsp_priv->dev = dev;
	vsp_priv->coded_buf = NULL;

	vsp_priv->context_num = 0;
	atomic_set(&dev_priv->vsp_mmu_invaldc, 0);

	dev_priv->vsp_private = vsp_priv;

	vsp_priv->cmd_queue_sz = VSP_CMD_QUEUE_SIZE *
		sizeof(struct vss_command_t);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_buffer_object_create(bdev,
				       vsp_priv->cmd_queue_sz,
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, 0, NULL, &vsp_priv->cmd_queue_bo);
#else
	ret = ttm_buffer_object_create(bdev,
				       vsp_priv->cmd_queue_sz,
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, NULL, &vsp_priv->cmd_queue_bo);
#endif
	if (ret != 0) {
		DRM_ERROR("VSP: failed to allocate VSP cmd queue\n");
		goto out_clean;
	}

	vsp_priv->ack_queue_sz = VSP_ACK_QUEUE_SIZE *
		sizeof(struct vss_response_t);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_buffer_object_create(bdev,
				       vsp_priv->ack_queue_sz,
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, 0, NULL, &vsp_priv->ack_queue_bo);
#else
	ret = ttm_buffer_object_create(bdev,
				       vsp_priv->ack_queue_sz,
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, NULL, &vsp_priv->ack_queue_bo);
#endif
	if (ret != 0) {
		DRM_ERROR("VSP: failed to allocate VSP cmd ack queue\n");
		goto out_clean;
	}

	/* Create setting buffer */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret =  ttm_buffer_object_create(bdev,
				       sizeof(struct vsp_settings_t),
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, 0, NULL, &vsp_priv->setting_bo);
#else
	ret =  ttm_buffer_object_create(bdev,
				       sizeof(struct vsp_settings_t),
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, NULL, &vsp_priv->setting_bo);
#endif
	if (ret != 0) {
		DRM_ERROR("VSP: failed to allocate VSP setting buffer\n");
		goto out_clean;
	}

	/* map cmd queue */
	ret = ttm_bo_kmap(vsp_priv->cmd_queue_bo, 0,
			  vsp_priv->cmd_queue_bo->num_pages,
			  &vsp_priv->cmd_kmap);
	if (ret) {
		DRM_ERROR("drm_bo_kmap failed: %d\n", ret);
		ttm_bo_unref(&vsp_priv->cmd_queue_bo);
		ttm_bo_kunmap(&vsp_priv->cmd_kmap);
		goto out_clean;
	}

	vsp_priv->cmd_queue = ttm_kmap_obj_virtual(&vsp_priv->cmd_kmap,
						   &is_iomem);


	/* map ack queue */
	ret = ttm_bo_kmap(vsp_priv->ack_queue_bo, 0,
			  vsp_priv->ack_queue_bo->num_pages,
			  &vsp_priv->ack_kmap);
	if (ret) {
		DRM_ERROR("drm_bo_kmap failed: %d\n", ret);
		ttm_bo_unref(&vsp_priv->ack_queue_bo);
		ttm_bo_kunmap(&vsp_priv->ack_kmap);
		goto out_clean;
	}

	vsp_priv->ack_queue = ttm_kmap_obj_virtual(&vsp_priv->ack_kmap,
						   &is_iomem);

	/* map vsp setting */
	ret = ttm_bo_kmap(vsp_priv->setting_bo, 0,
			  vsp_priv->setting_bo->num_pages,
			  &vsp_priv->setting_kmap);
	if (ret) {
		DRM_ERROR("drm_bo_kmap setting_bo failed: %d\n", ret);
		ttm_bo_unref(&vsp_priv->setting_bo);
		ttm_bo_kunmap(&vsp_priv->setting_kmap);
		goto out_clean;
	}
	vsp_priv->setting = ttm_kmap_obj_virtual(&vsp_priv->setting_kmap,
						 &is_iomem);

	vsp_priv->vp8_filp[0] = NULL;
	vsp_priv->vp8_filp[1] = NULL;
	vsp_priv->context_vp8_num = 0;

	vsp_priv->vp8_cmd_num = 0;

	spin_lock_init(&vsp_priv->lock);
	mutex_init(&vsp_priv->vsp_mutex);

	INIT_DELAYED_WORK(&vsp_priv->vsp_suspend_wq,
			&psb_powerdown_vsp);
	INIT_DELAYED_WORK(&vsp_priv->vsp_irq_wq,
			&vsp_irq_task);

	return 0;
out_clean:
	vsp_deinit(dev);
	return -1;
}
Beispiel #15
0
static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
			       struct drm_fb_helper_surface_size *sizes)
{
	struct hibmc_fbdev *hi_fbdev =
		container_of(helper, struct hibmc_fbdev, helper);
	struct hibmc_drm_private *priv = helper->dev->dev_private;
	struct fb_info *info;
	struct drm_mode_fb_cmd2 mode_cmd;
	struct drm_gem_object *gobj = NULL;
	int ret = 0;
	int ret1;
	size_t size;
	unsigned int bytes_per_pixel;
	struct hibmc_bo *bo = NULL;

	DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n",
			 sizes->surface_width, sizes->surface_height,
			 sizes->surface_bpp);

	bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);

	mode_cmd.width = sizes->surface_width;
	mode_cmd.height = sizes->surface_height;
	mode_cmd.pitches[0] = mode_cmd.width * bytes_per_pixel;
	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
							  sizes->surface_depth);

	size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height);

	ret = hibmcfb_create_object(priv, &mode_cmd, &gobj);
	if (ret) {
		DRM_ERROR("failed to create fbcon backing object: %d\n", ret);
		return -ENOMEM;
	}

	bo = gem_to_hibmc_bo(gobj);

	ret = ttm_bo_reserve(&bo->bo, true, false, NULL);
	if (ret) {
		DRM_ERROR("failed to reserve ttm_bo: %d\n", ret);
		goto out_unref_gem;
	}

	ret = hibmc_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
	if (ret) {
		DRM_ERROR("failed to pin fbcon: %d\n", ret);
		goto out_unreserve_ttm_bo;
	}

	ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
	if (ret) {
		DRM_ERROR("failed to kmap fbcon: %d\n", ret);
		goto out_unpin_bo;
	}
	ttm_bo_unreserve(&bo->bo);

	info = drm_fb_helper_alloc_fbi(helper);
	if (IS_ERR(info)) {
		ret = PTR_ERR(info);
		DRM_ERROR("failed to allocate fbi: %d\n", ret);
		goto out_release_fbi;
	}

	info->par = hi_fbdev;

	hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
	if (IS_ERR(hi_fbdev->fb)) {
		ret = PTR_ERR(hi_fbdev->fb);
		hi_fbdev->fb = NULL;
		DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
		goto out_release_fbi;
	}

	priv->fbdev->size = size;
	hi_fbdev->helper.fb = &hi_fbdev->fb->fb;

	strcpy(info->fix.id, "hibmcdrmfb");

	info->fbops = &hibmc_drm_fb_ops;

	drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
			       hi_fbdev->fb->fb.format->depth);
	drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width,
			       sizes->fb_height);

	info->screen_base = bo->kmap.virtual;
	info->screen_size = size;

	info->fix.smem_start = bo->bo.mem.bus.offset + bo->bo.mem.bus.base;
	info->fix.smem_len = size;
	return 0;

out_release_fbi:
	ret1 = ttm_bo_reserve(&bo->bo, true, false, NULL);
	if (ret1) {
		DRM_ERROR("failed to rsv ttm_bo when release fbi: %d\n", ret1);
		goto out_unref_gem;
	}
	ttm_bo_kunmap(&bo->kmap);
out_unpin_bo:
	hibmc_bo_unpin(bo);
out_unreserve_ttm_bo:
	ttm_bo_unreserve(&bo->bo);
out_unref_gem:
	drm_gem_object_put_unlocked(gobj);

	return ret;
}