Ejemplo n.º 1
0
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
	struct ttm_buffer_object *bo = &nvbo->bo;
	struct device *dev = drm->dev->dev;
	int ret;

	ret = pm_runtime_get_sync(dev);
	if (WARN_ON(ret < 0 && ret != -EACCES))
		return;

	if (gem->import_attach)
		drm_prime_gem_destroy(gem, nvbo->bo.sg);

	drm_gem_object_release(gem);

	/* reset filp so nouveau_bo_del_ttm() can test for it */
	gem->filp = NULL;
	ttm_bo_unref(&bo);

	pm_runtime_mark_last_busy(dev);
	pm_runtime_put_autosuspend(dev);
}
Ejemplo n.º 2
0
static void
ttm_bo_vm_dtor(void *handle)
{
	struct ttm_buffer_object *bo = handle;

	ttm_bo_unref(&bo);
}
Ejemplo n.º 3
0
static void bochs_bo_unref(struct bochs_bo **bo)
{
	struct ttm_buffer_object *tbo;

	if ((*bo) == NULL)
		return;

	tbo = &((*bo)->bo);
	ttm_bo_unref(&tbo);
	*bo = NULL;
}
void radeon_bo_unref(struct radeon_bo **bo)
{
	struct ttm_buffer_object *tbo;

	if ((*bo) == NULL)
		return;
	tbo = &((*bo)->tbo);
	ttm_bo_unref(&tbo);
	if (tbo == NULL)
		*bo = NULL;
}
Ejemplo n.º 5
0
static void psb_unreference_buffers(struct psb_context *context)
{
	struct ttm_validate_buffer *entry, *next;
	struct psb_validate_buffer *vbuf;
	struct list_head *list = &context->validate_list;

	list_for_each_entry_safe(entry, next, list, head) {
		vbuf =
			container_of(entry, struct psb_validate_buffer, base);
		list_del(&entry->head);
		ttm_bo_unref(&entry->bo);
	}
void mgag200_bo_unref(struct mgag200_bo **bo)
{
	struct ttm_buffer_object *tbo;

	if ((*bo) == NULL)
		return;

	tbo = &((*bo)->bo);
	ttm_bo_unref(&tbo);
	if (tbo == NULL)
		*bo = NULL;

}
Ejemplo n.º 7
0
void qxl_gem_object_free(struct drm_gem_object *gobj)
{
	struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
	struct qxl_device *qdev;
	struct ttm_buffer_object *tbo;

	qdev = (struct qxl_device *)gobj->dev->dev_private;

	qxl_surface_evict(qdev, qobj, false);

	tbo = &qobj->tbo;
	ttm_bo_unref(&tbo);
}
Ejemplo n.º 8
0
/**
 * Free the backing store.
 */
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
				 struct vmw_screen_object_unit *sou)
{
	struct ttm_buffer_object *bo;

	if (unlikely(sou->buffer == NULL))
		return;

	bo = &sou->buffer->base;
	ttm_bo_unref(&bo);
	sou->buffer = NULL;
	sou->buffer_size = 0;
}
int vsp_deinit(struct drm_device *dev)
{
	struct drm_psb_private *dev_priv = dev->dev_private;
	struct vsp_private *vsp_priv = dev_priv->vsp_private;

	VSP_DEBUG("free VSP firmware/context buffer\n");

	if (vsp_priv->cmd_queue) {
		ttm_bo_kunmap(&vsp_priv->cmd_kmap);
		vsp_priv->cmd_queue = NULL;
	}

	if (vsp_priv->ack_queue) {
		ttm_bo_kunmap(&vsp_priv->ack_kmap);
		vsp_priv->ack_queue = NULL;
	}
	if (vsp_priv->setting) {
		ttm_bo_kunmap(&vsp_priv->setting_kmap);
		vsp_priv->setting = NULL;
	}

	if (vsp_priv->ack_queue_bo)
		ttm_bo_unref(&vsp_priv->ack_queue_bo);
	if (vsp_priv->cmd_queue_bo)
		ttm_bo_unref(&vsp_priv->cmd_queue_bo);
	if (vsp_priv->setting_bo)
		ttm_bo_unref(&vsp_priv->setting_bo);

	device_remove_file(&dev->pdev->dev, &dev_attr_vsp_pmstate);
	sysfs_put(vsp_priv->sysfs_pmstate);

	VSP_DEBUG("free VSP private structure\n");
	kfree(dev_priv->vsp_private);

	return 0;
}
Ejemplo n.º 10
0
void radeon_bo_unref(struct radeon_bo **bo)
{
	struct ttm_buffer_object *tbo;
	struct radeon_device *rdev;

	if ((*bo) == NULL)
		return;
	rdev = (*bo)->rdev;
	tbo = &((*bo)->tbo);
	mutex_lock(&rdev->vram_mutex);
	ttm_bo_unref(&tbo);
	mutex_unlock(&rdev->vram_mutex);
	if (tbo == NULL)
		*bo = NULL;
}
Ejemplo n.º 11
0
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct ttm_buffer_object *bo = &nvbo->bo;

	if (gem->import_attach)
		drm_prime_gem_destroy(gem, nvbo->bo.sg);

	drm_gem_object_release(gem);

	/* reset filp so nouveau_bo_del_ttm() can test for it */
	gem->filp = NULL;
	ttm_bo_unref(&bo);
}
static void ttm_bo_user_release(struct ttm_base_object **p_base)
{
	struct ttm_bo_user_object *user_bo;
	struct ttm_base_object *base = *p_base;
	struct ttm_buffer_object *bo;

	*p_base = NULL;

	if (unlikely(base == NULL))
		return;

	user_bo = container_of(base, struct ttm_bo_user_object, base);
	bo = &user_bo->bo;
	ttm_bo_unref(&bo);
}
Ejemplo n.º 13
0
int
ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size,
    struct vm_object **obj_res, int nprot)
{
	struct ttm_bo_driver *driver;
	struct ttm_buffer_object *bo;
	struct vm_object *vm_obj;
	int ret;

	*obj_res = NULL;

	lockmgr(&bdev->vm_lock, LK_EXCLUSIVE);
	bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
	if (likely(bo != NULL))
		kref_get(&bo->kref);
	lockmgr(&bdev->vm_lock, LK_RELEASE);

	if (unlikely(bo == NULL)) {
		kprintf("[TTM] Could not find buffer object to map\n");
		return (EINVAL);
	}

	driver = bo->bdev->driver;
	if (unlikely(!driver->verify_access)) {
		ret = EPERM;
		goto out_unref;
	}
	ret = -driver->verify_access(bo);
	if (unlikely(ret != 0))
		goto out_unref;

	vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
	    size, nprot, 0, curthread->td_ucred);

	if (vm_obj == NULL) {
		ret = EINVAL;
		goto out_unref;
	}
	/*
	 * Note: We're transferring the bo reference to vm_obj->handle here.
	 */
	*offset = 0;
	*obj_res = vm_obj;
	return 0;
out_unref:
	ttm_bo_unref(&bo);
	return ret;
}
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = gem->driver_private;
	struct ttm_buffer_object *bo = &nvbo->bo;

	if (!nvbo)
		return;
	nvbo->gem = NULL;

	if (unlikely(nvbo->pin_refcnt)) {
		nvbo->pin_refcnt = 1;
		nouveau_bo_unpin(nvbo);
	}

	ttm_bo_unref(&bo);

	drm_gem_object_release(gem);
	kfree(gem);
}
Ejemplo n.º 15
0
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
	struct ttm_buffer_object *bo = &nvbo->bo;

#ifndef __NetBSD__		/* XXX drm prime */
	if (gem->import_attach)
		drm_prime_gem_destroy(gem, nvbo->bo.sg);
#endif

	drm_gem_object_release(gem);

	/* reset filp so nouveau_bo_del_ttm() can test for it */
#ifdef __NetBSD__
	/* XXX Whattakludge!  */
	gem->gemo_shm_uao = NULL;
#else
	gem->filp = NULL;
#endif
	ttm_bo_unref(&bo);
}
int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile,
			   struct ttm_lock *lock, void *data)
{
	union ttm_pl_setstatus_arg *arg = data;
	struct ttm_pl_setstatus_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_bo_device *bdev;
	struct ttm_placement placement = default_placement;
	uint32_t flags[2];
	int ret;

	bo = ttm_buffer_object_lookup(tfile, req->handle);
	if (unlikely(bo == NULL)) {
		printk(KERN_ERR
		       "Could not find buffer object for setstatus.\n");
		return -EINVAL;
	}

	bdev = bo->bdev;

	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0))
		goto out_err0;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err1;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_wait_cpu(bo, false);
	if (unlikely(ret != 0))
		goto out_err2;
#endif

	flags[0] = req->set_placement;
	flags[1] = req->clr_placement;

	placement.num_placement = 2;
	placement.placement = flags;

	/* spin_lock(&bo->lock); */ /* Already get reserve lock */

	ret = psb_ttm_bo_check_placement(bo, &placement);
	if (unlikely(ret != 0))
		goto out_err2;

	placement.num_placement = 1;
	flags[0] = (req->set_placement | bo->mem.placement) & ~req->clr_placement;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_validate(bo, &placement, true, false, false);
#else
	ret = ttm_bo_validate(bo, &placement, true, false);
#endif
	if (unlikely(ret != 0))
		goto out_err2;

	ttm_pl_fill_rep(bo, rep);
out_err2:
	/* spin_unlock(&bo->lock); */
	ttm_bo_unreserve(bo);
out_err1:
	ttm_read_unlock(lock);
out_err0:
	ttm_bo_unref(&bo);
	return ret;
}
int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
			   struct ttm_bo_device *bdev,
			   struct ttm_lock *lock, void *data)
{
	union ttm_pl_create_ub_arg *arg = data;
	struct ttm_pl_create_ub_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_buffer_object *tmp;
	struct ttm_bo_user_object *user_bo;
	uint32_t flags;
	int ret = 0;
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
	struct ttm_placement placement = default_placement;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
	size_t acc_size =
		ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
#else
	size_t acc_size = ttm_bo_acc_size(bdev, req->size,
		sizeof(struct ttm_buffer_object));
#endif
	if (req->user_address & ~PAGE_MASK) {
		printk(KERN_ERR "User pointer buffer need page alignment\n");
		return -EFAULT;
	}

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (unlikely(ret != 0))
		return ret;

	flags = req->placement;
	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
	if (unlikely(user_bo == NULL)) {
		ttm_mem_global_free(mem_glob, acc_size);
		return -ENOMEM;
	}
	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0)) {
		ttm_mem_global_free(mem_glob, acc_size);
		kfree(user_bo);
		return ret;
	}
	bo = &user_bo->bo;

	placement.num_placement = 1;
	placement.placement = &flags;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))

/*  For kernel 3.0, use the desired type. */
#define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_user

#else
/*  TTM_HACK_WORKAROUND_ttm_bo_type_user -- Hack for porting,
    as ttm_bo_type_user is no longer implemented.
    This will not result in working code.
    FIXME - to be removed. */

#warning warning: ttm_bo_type_user no longer supported

/*  For kernel 3.3+, use the wrong type, which will compile but not work. */
#define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_kernel

#endif

#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0))
		/* Handle frame buffer allocated in user space, Convert
		  user space virtual address into pages list */
		unsigned int page_nr = 0;
		struct vm_area_struct *vma = NULL;
		struct sg_table *sg = NULL;
		unsigned long num_pages = 0;
		struct page **pages = 0;

		num_pages = (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
		pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
		if (unlikely(pages == NULL)) {
			printk(KERN_ERR "kzalloc pages failed\n");
			return -ENOMEM;
		}

		down_read(&current->mm->mmap_sem);
		vma = find_vma(current->mm, req->user_address);
		if (unlikely(vma == NULL)) {
			up_read(&current->mm->mmap_sem);
			kfree(pages);
			printk(KERN_ERR "find_vma failed\n");
			return -EFAULT;
		}
		unsigned long before_flags = vma->vm_flags;
		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
			vma->vm_flags = vma->vm_flags & ((~VM_IO) & (~VM_PFNMAP));
		page_nr = get_user_pages(current, current->mm,
					 req->user_address,
					 (int)(num_pages), 1, 0, pages,
					 NULL);
		vma->vm_flags = before_flags;
		up_read(&current->mm->mmap_sem);

		/* can be written by caller, not forced */
		if (unlikely(page_nr < num_pages)) {
			kfree(pages);
			pages = 0;
			printk(KERN_ERR "get_user_pages err.\n");
			return -ENOMEM;
		}
		sg = drm_prime_pages_to_sg(pages, num_pages);
		if (unlikely(sg == NULL)) {
			kfree(pages);
			printk(KERN_ERR "drm_prime_pages_to_sg err.\n");
			return -ENOMEM;
		}
		kfree(pages);
#endif

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  TTM_HACK_WORKAROUND_ttm_bo_type_user,
			  &placement,
			  req->page_alignment,
			  req->user_address,
			  true,
			  NULL,
			  acc_size,
			  NULL,
			  &ttm_bo_user_destroy);
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  ttm_bo_type_sg,
			  &placement,
			  req->page_alignment,
			  req->user_address,
			  true,
			  NULL,
			  acc_size,
			  sg,
			  &ttm_ub_bo_user_destroy);
#else
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  ttm_bo_type_sg,
			  &placement,
			  req->page_alignment,
			  true,
			  NULL,
			  acc_size,
			  sg,
			  &ttm_ub_bo_user_destroy);
#endif

	/*
	 * Note that the ttm_buffer_object_init function
	 * would've called the destroy function on failure!!
	 */
	ttm_read_unlock(lock);
	if (unlikely(ret != 0))
		goto out;

	tmp = ttm_bo_reference(bo);
	ret = ttm_base_object_init(tfile, &user_bo->base,
				   flags & TTM_PL_FLAG_SHARED,
				   ttm_buffer_type,
				   &ttm_bo_user_release,
				   &ttm_bo_user_ref_release);
	if (unlikely(ret != 0))
		goto out_err;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err;
	ttm_pl_fill_rep(bo, rep);
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);
out:
	return 0;
out_err:
	ttm_bo_unref(&tmp);
	ttm_bo_unref(&bo);
	return ret;
}
int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
			struct ttm_bo_device *bdev,
			struct ttm_lock *lock, void *data)
{
	union ttm_pl_create_arg *arg = data;
	struct ttm_pl_create_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_buffer_object *tmp;
	struct ttm_bo_user_object *user_bo;
	uint32_t flags;
	int ret = 0;
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
	struct ttm_placement placement = default_placement;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
	size_t acc_size =
		ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
#else
	size_t acc_size = ttm_bo_acc_size(bdev, req->size,
		sizeof(struct ttm_buffer_object));
#endif
	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (unlikely(ret != 0))
		return ret;

	flags = req->placement;
	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
	if (unlikely(user_bo == NULL)) {
		ttm_mem_global_free(mem_glob, acc_size);
		return -ENOMEM;
	}

	bo = &user_bo->bo;
	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0)) {
		ttm_mem_global_free(mem_glob, acc_size);
		kfree(user_bo);
		return ret;
	}

	placement.num_placement = 1;
	placement.placement = &flags;

	if ((flags & TTM_PL_MASK_CACHING) == 0)
		flags |=  TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_init(bdev, bo, req->size,
			  ttm_bo_type_device, &placement,
			  req->page_alignment, 0, true,
			  NULL, acc_size, NULL, &ttm_bo_user_destroy);
#else
	ret = ttm_bo_init(bdev, bo, req->size,
			  ttm_bo_type_device, &placement,
			  req->page_alignment, true,
			  NULL, acc_size, NULL, &ttm_bo_user_destroy);
#endif
	ttm_read_unlock(lock);
	/*
	 * Note that the ttm_buffer_object_init function
	 * would've called the destroy function on failure!!
	 */

	if (unlikely(ret != 0))
		goto out;

	tmp = ttm_bo_reference(bo);
	ret = ttm_base_object_init(tfile, &user_bo->base,
				   flags & TTM_PL_FLAG_SHARED,
				   ttm_buffer_type,
				   &ttm_bo_user_release,
				   &ttm_bo_user_ref_release);
	if (unlikely(ret != 0))
		goto out_err;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err;
	ttm_pl_fill_rep(bo, rep);
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);
out:
	return 0;
out_err:
	ttm_bo_unref(&tmp);
	ttm_bo_unref(&bo);
	return ret;
}
int vsp_init(struct drm_device *dev)
{
	struct drm_psb_private *dev_priv = dev->dev_private;
	struct ttm_bo_device *bdev = &dev_priv->bdev;
	struct vsp_private *vsp_priv;
	bool is_iomem;
	int ret;
	unsigned int context_size;

	VSP_DEBUG("init vsp private data structure\n");
	vsp_priv = kmalloc(sizeof(struct vsp_private), GFP_KERNEL);
	if (vsp_priv == NULL)
		return -1;

	memset(vsp_priv, 0, sizeof(*vsp_priv));

	/* get device --> drm_device --> drm_psb_private --> vsp_priv
	 * for psb_vsp_pmstate_show: vsp_pmpolicy
	 * if not pci_set_drvdata, can't get drm_device from device
	 */
	/* pci_set_drvdata(dev->pdev, dev); */
	if (device_create_file(&dev->pdev->dev,
			       &dev_attr_vsp_pmstate))
		DRM_ERROR("TOPAZ: could not create sysfs file\n");

	vsp_priv->sysfs_pmstate = sysfs_get_dirent(
		dev->pdev->dev.kobj.sd, NULL,
		"vsp_pmstate");

	vsp_priv->vsp_cmd_num = 0;
	vsp_priv->fw_loaded = VSP_FW_NONE;
	vsp_priv->current_sequence = 0;
	vsp_priv->vsp_state = VSP_STATE_DOWN;
	vsp_priv->dev = dev;
	vsp_priv->coded_buf = NULL;

	vsp_priv->context_num = 0;
	atomic_set(&dev_priv->vsp_mmu_invaldc, 0);

	dev_priv->vsp_private = vsp_priv;

	vsp_priv->cmd_queue_sz = VSP_CMD_QUEUE_SIZE *
		sizeof(struct vss_command_t);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_buffer_object_create(bdev,
				       vsp_priv->cmd_queue_sz,
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, 0, NULL, &vsp_priv->cmd_queue_bo);
#else
	ret = ttm_buffer_object_create(bdev,
				       vsp_priv->cmd_queue_sz,
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, NULL, &vsp_priv->cmd_queue_bo);
#endif
	if (ret != 0) {
		DRM_ERROR("VSP: failed to allocate VSP cmd queue\n");
		goto out_clean;
	}

	vsp_priv->ack_queue_sz = VSP_ACK_QUEUE_SIZE *
		sizeof(struct vss_response_t);
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_buffer_object_create(bdev,
				       vsp_priv->ack_queue_sz,
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, 0, NULL, &vsp_priv->ack_queue_bo);
#else
	ret = ttm_buffer_object_create(bdev,
				       vsp_priv->ack_queue_sz,
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, NULL, &vsp_priv->ack_queue_bo);
#endif
	if (ret != 0) {
		DRM_ERROR("VSP: failed to allocate VSP cmd ack queue\n");
		goto out_clean;
	}

	/* Create setting buffer */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret =  ttm_buffer_object_create(bdev,
				       sizeof(struct vsp_settings_t),
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, 0, NULL, &vsp_priv->setting_bo);
#else
	ret =  ttm_buffer_object_create(bdev,
				       sizeof(struct vsp_settings_t),
				       ttm_bo_type_kernel,
				       DRM_PSB_FLAG_MEM_MMU |
				       TTM_PL_FLAG_NO_EVICT,
				       0, 0, NULL, &vsp_priv->setting_bo);
#endif
	if (ret != 0) {
		DRM_ERROR("VSP: failed to allocate VSP setting buffer\n");
		goto out_clean;
	}

	/* map cmd queue */
	ret = ttm_bo_kmap(vsp_priv->cmd_queue_bo, 0,
			  vsp_priv->cmd_queue_bo->num_pages,
			  &vsp_priv->cmd_kmap);
	if (ret) {
		DRM_ERROR("drm_bo_kmap failed: %d\n", ret);
		ttm_bo_unref(&vsp_priv->cmd_queue_bo);
		ttm_bo_kunmap(&vsp_priv->cmd_kmap);
		goto out_clean;
	}

	vsp_priv->cmd_queue = ttm_kmap_obj_virtual(&vsp_priv->cmd_kmap,
						   &is_iomem);


	/* map ack queue */
	ret = ttm_bo_kmap(vsp_priv->ack_queue_bo, 0,
			  vsp_priv->ack_queue_bo->num_pages,
			  &vsp_priv->ack_kmap);
	if (ret) {
		DRM_ERROR("drm_bo_kmap failed: %d\n", ret);
		ttm_bo_unref(&vsp_priv->ack_queue_bo);
		ttm_bo_kunmap(&vsp_priv->ack_kmap);
		goto out_clean;
	}

	vsp_priv->ack_queue = ttm_kmap_obj_virtual(&vsp_priv->ack_kmap,
						   &is_iomem);

	/* map vsp setting */
	ret = ttm_bo_kmap(vsp_priv->setting_bo, 0,
			  vsp_priv->setting_bo->num_pages,
			  &vsp_priv->setting_kmap);
	if (ret) {
		DRM_ERROR("drm_bo_kmap setting_bo failed: %d\n", ret);
		ttm_bo_unref(&vsp_priv->setting_bo);
		ttm_bo_kunmap(&vsp_priv->setting_kmap);
		goto out_clean;
	}
	vsp_priv->setting = ttm_kmap_obj_virtual(&vsp_priv->setting_kmap,
						 &is_iomem);

	vsp_priv->vp8_filp[0] = NULL;
	vsp_priv->vp8_filp[1] = NULL;
	vsp_priv->context_vp8_num = 0;

	vsp_priv->vp8_cmd_num = 0;

	spin_lock_init(&vsp_priv->lock);
	mutex_init(&vsp_priv->vsp_mutex);

	INIT_DELAYED_WORK(&vsp_priv->vsp_suspend_wq,
			&psb_powerdown_vsp);
	INIT_DELAYED_WORK(&vsp_priv->vsp_irq_wq,
			&vsp_irq_task);

	return 0;
out_clean:
	vsp_deinit(dev);
	return -1;
}
Ejemplo n.º 20
0
int ttm_bo_init(struct ttm_bo_device *bdev,
		struct ttm_buffer_object *bo,
		unsigned long size,
		enum ttm_bo_type type,
		struct ttm_placement *placement,
		uint32_t page_alignment,
		unsigned long buffer_start,
		bool interruptible,
		struct file *persistent_swap_storage,
		size_t acc_size,
		void (*destroy) (struct ttm_buffer_object *))
{
	int ret = 0;
	unsigned long num_pages;

	size += buffer_start & ~PAGE_MASK;
	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
	if (num_pages == 0) {
		printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
		if (destroy)
			(*destroy)(bo);
		else
			kfree(bo);
		return -EINVAL;
	}
	bo->destroy = destroy;

	kref_init(&bo->kref);
	kref_init(&bo->list_kref);
	atomic_set(&bo->cpu_writers, 0);
	atomic_set(&bo->reserved, 1);
	init_waitqueue_head(&bo->event_queue);
	INIT_LIST_HEAD(&bo->lru);
	INIT_LIST_HEAD(&bo->ddestroy);
	INIT_LIST_HEAD(&bo->swap);
	INIT_LIST_HEAD(&bo->io_reserve_lru);
	bo->bdev = bdev;
	bo->glob = bdev->glob;
	bo->type = type;
	bo->num_pages = num_pages;
	bo->mem.size = num_pages << PAGE_SHIFT;
	bo->mem.mem_type = TTM_PL_SYSTEM;
	bo->mem.num_pages = bo->num_pages;
	bo->mem.mm_node = NULL;
	bo->mem.page_alignment = page_alignment;
	bo->mem.bus.io_reserved_vm = false;
	bo->mem.bus.io_reserved_count = 0;
	bo->buffer_start = buffer_start & PAGE_MASK;
	bo->priv_flags = 0;
	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
	bo->seq_valid = false;
	bo->persistent_swap_storage = persistent_swap_storage;
	bo->acc_size = acc_size;
	atomic_inc(&bo->glob->bo_count);

	ret = ttm_bo_check_placement(bo, placement);
	if (unlikely(ret != 0))
		goto out_err;

	/*
	 * For ttm_bo_type_device buffers, allocate
	 * address space from the device.
	 */
	if (bo->type == ttm_bo_type_device) {
		ret = ttm_bo_setup_vm(bo);
		if (ret)
			goto out_err;
	}

	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
	if (ret)
		goto out_err;

	ttm_bo_unreserve(bo);
	return 0;

out_err:
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);

	return ret;
}