Пример #1
0
static int drm_minor_register(struct drm_device *dev, unsigned int type)
{
	struct drm_minor *minor;
	unsigned long flags;
	int ret;

	DRM_DEBUG("\n");

	minor = *drm_minor_get_slot(dev, type);
	if (!minor)
		return 0;

	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
	if (ret) {
		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
		return ret;
	}

	ret = device_add(minor->kdev);
	if (ret)
		goto err_debugfs;

	/* replace NULL with @minor so lookups will succeed from now on */
	spin_lock_irqsave(&drm_minor_lock, flags);
	idr_replace(&drm_minors_idr, minor, minor->index);
	spin_unlock_irqrestore(&drm_minor_lock, flags);

	DRM_DEBUG("new minor registered %d\n", minor->index);
	return 0;

err_debugfs:
	drm_debugfs_cleanup(minor);
	return ret;
}
Пример #2
0
void drm_mode_object_register(struct drm_device *dev,
			      struct drm_mode_object *obj)
{
	mutex_lock(&dev->mode_config.idr_mutex);
	idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
	mutex_unlock(&dev->mode_config.idr_mutex);
}
Пример #3
0
/*
 * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
 *
 * Signal and consume a fence ealier attached to a vGEM handle using
 * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
 *
 * All fences must be signaled within 10s of attachment or otherwise they
 * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
 *
 * Signaling a fence indicates to all consumers of the dma-buf that the
 * client has completed the operation associated with the fence, and that the
 * buffer is then ready for consumption.
 *
 * If the fence does not exist (or has already been signaled by the client),
 * vgem_fence_signal_ioctl returns -ENOENT.
 */
int vgem_fence_signal_ioctl(struct drm_device *dev,
			    void *data,
			    struct drm_file *file)
{
	struct vgem_file *vfile = file->driver_priv;
	struct drm_vgem_fence_signal *arg = data;
	struct dma_fence *fence;
	int ret = 0;

	if (arg->flags)
		return -EINVAL;

	mutex_lock(&vfile->fence_mutex);
	fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
	mutex_unlock(&vfile->fence_mutex);
	if (!fence)
		return -ENOENT;
	if (IS_ERR(fence))
		return PTR_ERR(fence);

	if (dma_fence_is_signaled(fence))
		ret = -ETIMEDOUT;

	dma_fence_signal(fence);
	dma_fence_put(fence);
	return ret;
}
Пример #4
0
/**
 * Get a secondary minor number.
 *
 * \param dev device data structure
 * \param sec-minor structure to hold the assigned minor
 * \return negative number on failure.
 *
 * Search an empty entry and initialize it to the given parameters, and
 * create the proc init entry via proc_init(). This routines assigns
 * minor numbers to secondary heads of multi-headed cards
 */
static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
{
	struct drm_minor *new_minor;
	int ret;
	int minor_id;

	DRM_DEBUG("\n");

	minor_id = drm_minor_get_id(dev, type);
	if (minor_id < 0)
		return minor_id;

	new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
	if (!new_minor) {
		ret = -ENOMEM;
		goto err_idr;
	}

	new_minor->type = type;
	new_minor->device = MKDEV(DRM_MAJOR, minor_id);
	new_minor->dev = dev;
	new_minor->index = minor_id;

	idr_replace(&drm_minors_idr, new_minor, minor_id);

	if (type == DRM_MINOR_LEGACY) {
		ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
		if (ret) {
			DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
			goto err_mem;
		}
	} else
		new_minor->dev_root = NULL;

	ret = drm_sysfs_device_add(new_minor);
	if (ret) {
		printk(KERN_ERR
		       "DRM: Error sysfs_device_add.\n");
		goto err_g2;
	}
	*minor = new_minor;

	DRM_DEBUG("new minor assigned %d\n", minor_id);
	return 0;


err_g2:
	if (new_minor->type == DRM_MINOR_LEGACY)
		drm_proc_cleanup(new_minor, drm_proc_root);
err_mem:
	kfree(new_minor);
err_idr:
	idr_remove(&drm_minors_idr, minor_id);
	*minor = NULL;
	return ret;
}
Пример #5
0
int qxl_hw_surface_alloc(struct qxl_device *qdev,
			 struct qxl_bo *surf,
			 struct ttm_mem_reg *new_mem)
{
	struct qxl_surface_cmd *cmd;
	struct qxl_release *release;
	int ret;

	if (surf->hw_surf_alloc)
		return 0;

	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE,
						 NULL,
						 &release);
	if (ret)
		return ret;

	ret = qxl_release_reserve_list(release, true);
	if (ret)
		return ret;

	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
	cmd->type = QXL_SURFACE_CMD_CREATE;
	cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
	cmd->u.surface_create.format = surf->surf.format;
	cmd->u.surface_create.width = surf->surf.width;
	cmd->u.surface_create.height = surf->surf.height;
	cmd->u.surface_create.stride = surf->surf.stride;
	if (new_mem) {
		int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot;
		struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]);

		/* TODO - need to hold one of the locks to read tbo.offset */
		cmd->u.surface_create.data = slot->high_bits;

		cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset;
	} else
		cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0);
	cmd->surface_id = surf->surface_id;
	qxl_release_unmap(qdev, release, &cmd->release_info);

	surf->surf_create = release;

	/* no need to add a release to the fence for this surface bo,
	   since it is only released when we ask to destroy the surface
	   and it would never signal otherwise */
	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
	qxl_release_fence_buffer_objects(release);

	surf->hw_surf_alloc = true;
	spin_lock(&qdev->surf_id_idr_lock);
	idr_replace(&qdev->surf_id_idr, surf, surf->surface_id);
	spin_unlock(&qdev->surf_id_idr_lock);
	return 0;
}
Пример #6
0
/*
 * SETCLIENTID just did a callback update with the callback ident in
 * "drop," but server trunking discovery claims "drop" and "keep" are
 * actually the same server.  Swap the callback IDs so that "keep"
 * will continue to use the callback ident the server now knows about,
 * and so that "keep"'s original callback ident is destroyed when
 * "drop" is freed.
 */
static void nfs4_swap_callback_idents(struct nfs_client *keep,
				      struct nfs_client *drop)
{
	struct nfs_net *nn = net_generic(keep->cl_net, nfs_net_id);
	unsigned int save = keep->cl_cb_ident;

	if (keep->cl_cb_ident == drop->cl_cb_ident)
		return;

	dprintk("%s: keeping callback ident %u and dropping ident %u\n",
		__func__, keep->cl_cb_ident, drop->cl_cb_ident);

	spin_lock(&nn->nfs_client_lock);

	idr_replace(&nn->cb_ident_idr, keep, drop->cl_cb_ident);
	keep->cl_cb_ident = drop->cl_cb_ident;

	idr_replace(&nn->cb_ident_idr, drop, save);
	drop->cl_cb_ident = save;

	spin_unlock(&nn->nfs_client_lock);
}
Пример #7
0
static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
{
	struct drm_minor *minor;
	unsigned long flags;

	minor = *drm_minor_get_slot(dev, type);
	if (!minor || !device_is_registered(minor->kdev))
		return;

	/* replace @minor with NULL so lookups will fail from now on */
	spin_lock_irqsave(&drm_minor_lock, flags);
	idr_replace(&drm_minors_idr, NULL, minor->index);
	spin_unlock_irqrestore(&drm_minor_lock, flags);

	device_del(minor->kdev);
	dev_set_drvdata(minor->kdev, NULL); /* safety belt */
	drm_debugfs_cleanup(minor);
}
Пример #8
0
int qxl_hw_surface_dealloc(struct qxl_device *qdev,
			   struct qxl_bo *surf)
{
	struct qxl_surface_cmd *cmd;
	struct qxl_release *release;
	int ret;
	int id;

	if (!surf->hw_surf_alloc)
		return 0;

	ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY,
						 surf->surf_create,
						 &release);
	if (ret)
		return ret;

	surf->surf_create = NULL;
	/* remove the surface from the idr, but not the surface id yet */
	spin_lock(&qdev->surf_id_idr_lock);
	idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id);
	spin_unlock(&qdev->surf_id_idr_lock);
	surf->hw_surf_alloc = false;

	id = surf->surface_id;
	surf->surface_id = 0;

	release->surface_release_id = id;
	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
	cmd->type = QXL_SURFACE_CMD_DESTROY;
	cmd->surface_id = id;
	qxl_release_unmap(qdev, release, &cmd->release_info);

	qxl_fence_releaseable(qdev, release);

	qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);

	qxl_release_unreserve(qdev, release);


	return 0;
}
Пример #9
0
void dm_put(struct mapped_device *md)
{
	struct dm_table *map;

	BUG_ON(test_bit(DMF_FREEING, &md->flags));

	if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
		map = dm_get_table(md);
		idr_replace(&_minor_idr, MINOR_ALLOCED, dm_disk(md)->first_minor);
		set_bit(DMF_FREEING, &md->flags);
		spin_unlock(&_minor_lock);
		if (!dm_suspended(md)) {
			dm_table_presuspend_targets(map);
			dm_table_postsuspend_targets(map);
		}
		__unbind(md);
		dm_table_put(map);
		free_dev(md);
	}
}
Пример #10
0
/*
 * Allocate and initialise a blank device with a given minor.
 */
static struct mapped_device *alloc_dev(int minor)
{
	int r;
	struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
	void *old_md;

	if (!md) {
		DMWARN("unable to allocate device, out of memory.");
		return NULL;
	}

	if (!try_module_get(THIS_MODULE))
		goto bad0;

	/* get a minor number for the dev */
	if (minor == DM_ANY_MINOR)
		r = next_free_minor(md, &minor);
	else
		r = specific_minor(md, minor);
	if (r < 0)
		goto bad1;

	memset(md, 0, sizeof(*md));
	init_rwsem(&md->io_lock);
	init_MUTEX(&md->suspend_lock);
	spin_lock_init(&md->pushback_lock);
	rwlock_init(&md->map_lock);
	atomic_set(&md->holders, 1);
	atomic_set(&md->open_count, 0);
	atomic_set(&md->event_nr, 0);

	md->queue = blk_alloc_queue(GFP_KERNEL);
	if (!md->queue)
		goto bad1_free_minor;

	md->queue->queuedata = md;
	md->queue->backing_dev_info.congested_fn = dm_any_congested;
	md->queue->backing_dev_info.congested_data = md;
	blk_queue_make_request(md->queue, dm_request);
	blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
	md->queue->unplug_fn = dm_unplug_all;
	md->queue->issue_flush_fn = dm_flush_all;

	md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
	if (!md->io_pool)
		goto bad2;

	md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
	if (!md->tio_pool)
		goto bad3;

	md->bs = bioset_create(16, 16);
	if (!md->bs)
		goto bad_no_bioset;

	md->disk = alloc_disk(1);
	if (!md->disk)
		goto bad4;

	atomic_set(&md->pending, 0);
	init_waitqueue_head(&md->wait);
	init_waitqueue_head(&md->eventq);

	md->disk->major = _major;
	md->disk->first_minor = minor;
	md->disk->fops = &dm_blk_dops;
	md->disk->queue = md->queue;
	md->disk->private_data = md;
	sprintf(md->disk->disk_name, "dm-%d", minor);
	add_disk(md->disk);
	format_dev_t(md->name, MKDEV(_major, minor));

	/* Populate the mapping, nobody knows we exist yet */
	spin_lock(&_minor_lock);
	old_md = idr_replace(&_minor_idr, md, minor);
	spin_unlock(&_minor_lock);

	BUG_ON(old_md != MINOR_ALLOCED);

	return md;

 bad4:
	bioset_free(md->bs);
 bad_no_bioset:
	mempool_destroy(md->tio_pool);
 bad3:
	mempool_destroy(md->io_pool);
 bad2:
	blk_cleanup_queue(md->queue);
 bad1_free_minor:
	free_minor(minor);
 bad1:
	module_put(THIS_MODULE);
 bad0:
	kfree(md);
	return NULL;
}
Пример #11
0
Файл: user.c Проект: teg/bus1
/**
 * bus1_user_acquire_by_uid() - get a user object for a uid in the given domain
 * @domain:		domain of the user
 * @uid:		uid of the user
 *
 * Find and return the user object for the uid if it exists, otherwise create it
 * first. The caller is responsible to release their reference (and all derived
 * references) before the parent domain is deactivated!
 *
 * Return: A user object for the given uid, ERR_PTR on failure.
 */
struct bus1_user *
bus1_user_acquire_by_uid(struct bus1_domain *domain, kuid_t uid)
{
	struct bus1_user *user, *old_user, *new_user;
	int r = 0;

	WARN_ON(!uid_valid(uid));

	lockdep_assert_held(&domain->active);

	/* try to get the user without taking a lock */
	user = bus1_user_get(domain->info, uid);
	if (user)
		return user;

	/* didn't exist, allocate a new one */
	new_user = bus1_user_new(domain->info, uid);
	if (IS_ERR(new_user))
		return new_user;

	/*
	 * Allocate the smallest possible internal id for this user; used in
	 * arrays for accounting user quota in receiver pools.
	 */
	r = ida_simple_get(&domain->info->user_ida, 0, 0, GFP_KERNEL);
	if (r < 0)
		goto exit;

	new_user->id = r;

	mutex_lock(&domain->info->lock);
	/*
	 * Someone else might have raced us outside the lock, so check if the
	 * user still does not exist.
	 */
	old_user = idr_find(&domain->info->user_idr, __kuid_val(uid));
	if (likely(!old_user)) {
		/* user does not exist, link the newly created one */
		r = idr_alloc(&domain->info->user_idr, new_user,
			      __kuid_val(uid), __kuid_val(uid) + 1, GFP_KERNEL);
		if (r < 0)
			goto exit;
	} else {
		/* another allocation raced us, try re-using that one */
		if (likely(kref_get_unless_zero(&old_user->ref))) {
			user = old_user;
			goto exit;
		} else {
			/* the other one is getting destroyed, replace it */
			idr_replace(&domain->info->user_idr, new_user,
				    __kuid_val(uid));
			old_user->uid = INVALID_UID; /* mark old as removed */
		}
	}

	user = new_user;
	new_user = NULL;

exit:
	mutex_unlock(&domain->info->lock);
	bus1_user_release(new_user);
	if (r < 0)
		return ERR_PTR(r);
	return user;
}
Пример #12
0
int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
    struct drm_update_draw *update = data;
    unsigned long irqflags;
    struct drm_clip_rect *rects;
    struct drm_drawable_info *info;
    int err;

    info = idr_find(&dev->drw_idr, update->handle);
    if (!info) {
        info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
        if (!info)
            return -ENOMEM;
        if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
            DRM_ERROR("No such drawable %d\n", update->handle);
            drm_free(info, sizeof(*info), DRM_MEM_BUFS);
            return -EINVAL;
        }
    }

    switch (update->type) {
    case DRM_DRAWABLE_CLIPRECTS:
        if (update->num != info->num_rects) {
            rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
                     DRM_MEM_BUFS);
        } else
            rects = info->rects;

        if (update->num && !rects) {
            DRM_ERROR("Failed to allocate cliprect memory\n");
            err = -ENOMEM;
            goto error;
        }

        if (update->num && DRM_COPY_FROM_USER(rects,
                             (struct drm_clip_rect __user *)
                             (unsigned long)update->data,
                             update->num *
                             sizeof(*rects))) {
            DRM_ERROR("Failed to copy cliprects from userspace\n");
            err = -EFAULT;
            goto error;
        }

        spin_lock_irqsave(&dev->drw_lock, irqflags);

        if (rects != info->rects) {
            drm_free(info->rects, info->num_rects *
                 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
        }

        info->rects = rects;
        info->num_rects = update->num;

        spin_unlock_irqrestore(&dev->drw_lock, irqflags);

        DRM_DEBUG("Updated %d cliprects for drawable %d\n",
              info->num_rects, update->handle);
        break;
    default:
        DRM_ERROR("Invalid update type %d\n", update->type);
        return -EINVAL;
    }

    return 0;

error:
    if (rects != info->rects)
        drm_free(rects, update->num * sizeof(struct drm_clip_rect),
             DRM_MEM_BUFS);

    return err;
}
Пример #13
0
/*
 * create_hdev - create habanalabs device instance
 *
 * @dev: will hold the pointer to the new habanalabs device structure
 * @pdev: pointer to the pci device
 * @asic_type: in case of simulator device, which device is it
 * @minor: in case of simulator device, the minor of the device
 *
 * Allocate memory for habanalabs device and initialize basic fields
 * Identify the ASIC type
 * Allocate ID (minor) for the device (only for real devices)
 */
int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
		enum hl_asic_type asic_type, int minor)
{
	struct hl_device *hdev;
	int rc;

	*dev = NULL;

	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
	if (!hdev)
		return -ENOMEM;

	hdev->major = hl_major;
	hdev->reset_on_lockup = reset_on_lockup;

	/* Parameters for bring-up - set them to defaults */
	hdev->mmu_enable = 1;
	hdev->cpu_enable = 1;
	hdev->reset_pcilink = 0;
	hdev->cpu_queues_enable = 1;
	hdev->fw_loading = 1;
	hdev->pldm = 0;
	hdev->heartbeat = 1;

	/* If CPU is disabled, no point in loading FW */
	if (!hdev->cpu_enable)
		hdev->fw_loading = 0;

	/* If we don't load FW, no need to initialize CPU queues */
	if (!hdev->fw_loading)
		hdev->cpu_queues_enable = 0;

	/* If CPU queues not enabled, no way to do heartbeat */
	if (!hdev->cpu_queues_enable)
		hdev->heartbeat = 0;

	if (timeout_locked)
		hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
	else
		hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;

	hdev->disabled = true;
	hdev->pdev = pdev; /* can be NULL in case of simulator device */

	if (pdev) {
		hdev->asic_type = get_asic_type(pdev->device);
		if (hdev->asic_type == ASIC_INVALID) {
			dev_err(&pdev->dev, "Unsupported ASIC\n");
			rc = -ENODEV;
			goto free_hdev;
		}
	} else {
		hdev->asic_type = asic_type;
	}

	/* Set default DMA mask to 32 bits */
	hdev->dma_mask = 32;

	mutex_lock(&hl_devs_idr_lock);

	if (minor == -1) {
		rc = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
				GFP_KERNEL);
	} else {
		void *old_idr = idr_replace(&hl_devs_idr, hdev, minor);

		if (IS_ERR_VALUE(old_idr)) {
			rc = PTR_ERR(old_idr);
			pr_err("Error %d when trying to replace minor %d\n",
				rc, minor);
			mutex_unlock(&hl_devs_idr_lock);
			goto free_hdev;
		}
		rc = minor;
	}

	mutex_unlock(&hl_devs_idr_lock);

	if (rc < 0) {
		if (rc == -ENOSPC) {
			pr_err("too many devices in the system\n");
			rc = -EBUSY;
		}
		goto free_hdev;
	}

	hdev->id = rc;

	*dev = hdev;

	return 0;

free_hdev:
	kfree(hdev);
	return rc;
}
Пример #14
0
int drm_get_platform_dev(struct platform_device *platdev,
			 struct drm_driver *driver)
{
	struct drm_device *dev;
	int ret;

	DRM_DEBUG("\n");

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;

	dev->platformdev = platdev;
	dev->dev = &platdev->dev;

	mutex_lock(&drm_global_mutex);

	ret = drm_fill_in_dev(dev, NULL, driver);

	if (ret) {
		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
		goto err_g1;
	}

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		dev_set_drvdata(&platdev->dev, dev);
		ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
		if (ret)
			goto err_g1;
	}

	ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
	if (ret)
		goto err_g2;

	if (dev->driver->load) {
		ret = dev->driver->load(dev, 0);
		if (ret)
			goto err_g3;
	}

	/* setup the grouping for the legacy output */
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = drm_mode_group_init_legacy_group(dev,
				&dev->primary->mode_group);
		if (ret)
			goto err_g3;
	}

	list_add_tail(&dev->driver_item, &driver->device_list);

	if (drm_core_check_feature(dev, DRIVER_MODESET))
		idr_replace(&drm_minors_idr, dev->control, dev->control->index);
	idr_replace(&drm_minors_idr, dev->primary, dev->primary->index);

	mutex_unlock(&drm_global_mutex);

	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
		 driver->name, driver->major, driver->minor, driver->patchlevel,
		 driver->date, dev->primary->index);

	return 0;

err_g3:
	drm_put_minor(&dev->primary);
err_g2:
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		drm_put_minor(&dev->control);
err_g1:
	kfree(dev);
	mutex_unlock(&drm_global_mutex);
	return ret;
}