Ejemplo n.º 1
0
void drm_sman_takedown(struct drm_sman * sman)
{
	drm_ht_remove(&sman->user_hash_tab);
	drm_ht_remove(&sman->owner_hash_tab);
	if (sman->mm)
		drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
		    DRM_MEM_MM);
}
Ejemplo n.º 2
0
int
drm_gem_init(struct drm_device *dev)
{
    struct drm_gem_mm *mm;

    spin_lock_init(&dev->object_name_lock);
    idr_init(&dev->object_name_idr);

    mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
    if (!mm) {
        DRM_ERROR("out of memory\n");
        return -ENOMEM;
    }

    dev->mm_private = mm;

    if (drm_ht_create(&mm->offset_hash, 12)) {
        kfree(mm);
        return -ENOMEM;
    }

    if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
                    DRM_FILE_PAGE_OFFSET_SIZE)) {
        drm_ht_remove(&mm->offset_hash);
        kfree(mm);
        return -ENOMEM;
    }

    return 0;
}
int
drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
	      unsigned int user_order, unsigned int owner_order)
{
	int ret = 0;

	sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
	if (!sman->mm) {
		ret = -ENOMEM;
		goto out;
	}
	sman->num_managers = num_managers;
	INIT_LIST_HEAD(&sman->owner_items);
	ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
	if (ret)
		goto out1;
	ret = drm_ht_create(&sman->user_hash_tab, user_order);
	if (!ret)
		goto out;

	drm_ht_remove(&sman->owner_hash_tab);
out1:
	kfree(sman->mm);
out:
	return ret;
}
Ejemplo n.º 4
0
int
drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
	      unsigned int user_order, unsigned int owner_order)
{
	int ret = 0;

	sman->mm = (drm_sman_mm_t *) drm_calloc(num_managers, sizeof(*sman->mm),
						DRM_MEM_MM);
	if (!sman->mm) {
		ret = -ENOMEM;
		goto out;
	}
	sman->num_managers = num_managers;
	INIT_LIST_HEAD(&sman->owner_items);
	ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
	if (ret)
		goto out1;
	ret = drm_ht_create(&sman->user_hash_tab, user_order);
	if (!ret)
		goto out;

	drm_ht_remove(&sman->owner_hash_tab);
out1:
	drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
out:
	return ret;
}
Ejemplo n.º 5
0
void
drm_gem_destroy(struct drm_device *dev)
{
    struct drm_gem_mm *mm = dev->mm_private;

    drm_mm_takedown(&mm->offset_manager);
    drm_ht_remove(&mm->offset_hash);
    kfree(mm);
    dev->mm_private = NULL;
}
Ejemplo n.º 6
0
void
drm_gem_destroy(struct drm_device *dev)
{
    struct drm_gem_mm *mm = dev->mm_private;

    dev->mm_private = NULL;
    drm_ht_remove(&mm->offset_hash);
    delete_unrhdr(mm->idxunr);
    free(mm, DRM_MEM_DRIVER);
    drm_gem_names_fini(&dev->object_names);
}
Ejemplo n.º 7
0
static int vmw_driver_unload(struct drm_device *dev)
{
    struct vmw_private *dev_priv = vmw_priv(dev);
    enum vmw_res_type i;

    unregister_pm_notifier(&dev_priv->pm_nb);

    if (dev_priv->ctx.res_ht_initialized)
        drm_ht_remove(&dev_priv->ctx.res_ht);
    vfree(dev_priv->ctx.cmd_bounce);
    if (dev_priv->enable_fb) {
        vmw_fb_off(dev_priv);
        vmw_fb_close(dev_priv);
        vmw_fifo_resource_dec(dev_priv);
        vmw_svga_disable(dev_priv);
    }

    vmw_kms_close(dev_priv);
    vmw_overlay_close(dev_priv);

    if (dev_priv->has_gmr)
        (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
    (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);

    vmw_release_device_early(dev_priv);
    if (dev_priv->has_mob)
        (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
    (void) ttm_bo_device_release(&dev_priv->bdev);
    vmw_release_device_late(dev_priv);
    vmw_fence_manager_takedown(dev_priv->fman);
    if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
        drm_irq_uninstall(dev_priv->dev);
    if (dev_priv->stealth)
        pci_release_region(dev->pdev, 2);
    else
        pci_release_regions(dev->pdev);

    ttm_object_device_release(&dev_priv->tdev);
    iounmap(dev_priv->mmio_virt);
    arch_phys_wc_del(dev_priv->mmio_mtrr);
    if (dev_priv->ctx.staged_bindings)
        vmw_binding_state_free(dev_priv->ctx.staged_bindings);
    vmw_ttm_global_release(dev_priv);

    for (i = vmw_res_context; i < vmw_res_max; ++i)
        idr_destroy(&dev_priv->res_idr[i]);

    kfree(dev_priv);

    return 0;
}
Ejemplo n.º 8
0
void
drm_gem_destroy(struct drm_device *dev)
{
	struct drm_gem_mm *mm = dev->mm_private;

	drm_mm_takedown(&mm->offset_manager);
	drm_ht_remove(&mm->offset_hash);
	kfree(mm);
	dev->mm_private = NULL;

	idr_destroy(&dev->object_name_idr);
#ifdef __NetBSD__
	spin_lock_destroy(&dev->object_name_lock);
#endif
}
Ejemplo n.º 9
0
static void drm_dev_release(struct kref *ref)
{
	struct drm_device *dev = container_of(ref, struct drm_device, ref);

	if (drm_core_check_feature(dev, DRIVER_GEM))
		drm_gem_destroy(dev);

	drm_legacy_ctxbitmap_cleanup(dev);
	drm_ht_remove(&dev->map_hash);
	drm_fs_inode_free(dev->anon_inode);

	drm_minor_free(dev, DRM_MINOR_PRIMARY);
	drm_minor_free(dev, DRM_MINOR_RENDER);
	drm_minor_free(dev, DRM_MINOR_CONTROL);

	mutex_destroy(&dev->master_mutex);
	kfree(dev->unique);
	kfree(dev);
}
Ejemplo n.º 10
0
void drm_sman_takedown(struct drm_sman * sman)
{
	drm_ht_remove(&sman->user_hash_tab);
	drm_ht_remove(&sman->owner_hash_tab);
	kfree(sman->mm);
}
Ejemplo n.º 11
0
/**
 * drm_dev_init - Initialise new DRM device
 * @dev: DRM device
 * @driver: DRM driver
 * @parent: Parent device object
 *
 * Initialize a new DRM device. No device registration is done.
 * Call drm_dev_register() to advertice the device to user space and register it
 * with other core subsystems. This should be done last in the device
 * initialization sequence to make sure userspace can't access an inconsistent
 * state.
 *
 * The initial ref-count of the object is 1. Use drm_dev_ref() and
 * drm_dev_unref() to take and drop further ref-counts.
 *
 * Note that for purely virtual devices @parent can be NULL.
 *
 * Drivers that do not want to allocate their own device struct
 * embedding struct &drm_device can call drm_dev_alloc() instead.
 *
 * RETURNS:
 * 0 on success, or error code on failure.
 */
int drm_dev_init(struct drm_device *dev,
		 struct drm_driver *driver,
		 struct device *parent)
{
	int ret;

	kref_init(&dev->ref);
	dev->dev = parent;
	dev->driver = driver;

	INIT_LIST_HEAD(&dev->filelist);
	INIT_LIST_HEAD(&dev->ctxlist);
	INIT_LIST_HEAD(&dev->vmalist);
	INIT_LIST_HEAD(&dev->maplist);
	INIT_LIST_HEAD(&dev->vblank_event_list);

	spin_lock_init(&dev->buf_lock);
	spin_lock_init(&dev->event_lock);
	mutex_init(&dev->struct_mutex);
	mutex_init(&dev->filelist_mutex);
	mutex_init(&dev->ctxlist_mutex);
	mutex_init(&dev->master_mutex);

	dev->anon_inode = drm_fs_inode_new();
	if (IS_ERR(dev->anon_inode)) {
		ret = PTR_ERR(dev->anon_inode);
		DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
		goto err_free;
	}

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
		if (ret)
			goto err_minors;
	}

	if (drm_core_check_feature(dev, DRIVER_RENDER)) {
		ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
		if (ret)
			goto err_minors;
	}

	ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
	if (ret)
		goto err_minors;

	ret = drm_ht_create(&dev->map_hash, 12);
	if (ret)
		goto err_minors;

	drm_legacy_ctxbitmap_init(dev);

	if (drm_core_check_feature(dev, DRIVER_GEM)) {
		ret = drm_gem_init(dev);
		if (ret) {
			DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
			goto err_ctxbitmap;
		}
	}

	/* Use the parent device name as DRM device unique identifier, but fall
	 * back to the driver name for virtual devices like vgem. */
	ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
	if (ret)
		goto err_setunique;

	return 0;

err_setunique:
	if (drm_core_check_feature(dev, DRIVER_GEM))
		drm_gem_destroy(dev);
err_ctxbitmap:
	drm_legacy_ctxbitmap_cleanup(dev);
	drm_ht_remove(&dev->map_hash);
err_minors:
	drm_minor_free(dev, DRM_MINOR_PRIMARY);
	drm_minor_free(dev, DRM_MINOR_RENDER);
	drm_minor_free(dev, DRM_MINOR_CONTROL);
	drm_fs_inode_free(dev->anon_inode);
err_free:
	mutex_destroy(&dev->master_mutex);
	return ret;
}