Esempio n. 1
0
int devpts_new_index(struct pts_fs_info *fsi)
{
	int index;
	int ida_ret;

retry:
	if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
		return -ENOMEM;

	mutex_lock(&allocated_ptys_lock);
	if (pty_count >= (pty_limit -
			  (fsi->mount_opts.reserve ? 0 : pty_reserve))) {
		mutex_unlock(&allocated_ptys_lock);
		return -ENOSPC;
	}

	ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
	if (ida_ret < 0) {
		mutex_unlock(&allocated_ptys_lock);
		if (ida_ret == -EAGAIN)
			goto retry;
		return -EIO;
	}

	if (index >= fsi->mount_opts.max) {
		ida_remove(&fsi->allocated_ptys, index);
		mutex_unlock(&allocated_ptys_lock);
		return -ENOSPC;
	}
	pty_count++;
	mutex_unlock(&allocated_ptys_lock);
	return index;
}
Esempio n. 2
0
int devpts_new_index(struct inode *ptmx_inode)
{
	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
	struct pts_fs_info *fsi = DEVPTS_SB(sb);
	int index;
	int ida_ret;

retry:
	if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
		return -ENOMEM;

	mutex_lock(&allocated_ptys_lock);
	if (pty_count >= pty_limit -
			(fsi->mount_opts.newinstance ? pty_reserve : 0)) {
		mutex_unlock(&allocated_ptys_lock);
		return -ENOSPC;
	}

	ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
	if (ida_ret < 0) {
		mutex_unlock(&allocated_ptys_lock);
		if (ida_ret == -EAGAIN)
			goto retry;
		return -EIO;
	}

	if (index >= fsi->mount_opts.max) {
		ida_remove(&fsi->allocated_ptys, index);
		mutex_unlock(&allocated_ptys_lock);
		return -ENOSPC;
	}
	pty_count++;
	mutex_unlock(&allocated_ptys_lock);
	return index;
}
Esempio n. 3
0
int devpts_new_index(struct inode *ptmx_inode)
{
	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
	struct pts_fs_info *fsi = DEVPTS_SB(sb);
	int index;
	int ida_ret;

retry:
	if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
		return -ENOMEM;

	mutex_lock(&allocated_ptys_lock);
	ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
	if (ida_ret < 0) {
		mutex_unlock(&allocated_ptys_lock);
		if (ida_ret == -EAGAIN)
			goto retry;
		return -EIO;
	}

	if (index >= pty_limit) {
		ida_remove(&fsi->allocated_ptys, index);
		mutex_unlock(&allocated_ptys_lock);
		return -EIO;
	}
	mutex_unlock(&allocated_ptys_lock);
	return index;
}
Esempio n. 4
0
static int vas_assign_window_id(struct ida *ida)
{
	int rc, winid;

	do {
		rc = ida_pre_get(ida, GFP_KERNEL);
		if (!rc)
			return -EAGAIN;

		spin_lock(&vas_ida_lock);
		rc = ida_get_new(ida, &winid);
		spin_unlock(&vas_ida_lock);
	} while (rc == -EAGAIN);

	if (rc)
		return rc;

	if (winid > VAS_WINDOWS_PER_CHIP) {
		pr_err("Too many (%d) open windows\n", winid);
		vas_release_window_id(ida, winid);
		return -EAGAIN;
	}

	return winid;
}
Esempio n. 5
0
int devpts_new_index(struct inode *ptmx_inode)
{
	int index;
	int ida_ret;

retry:
	if (!ida_pre_get(&allocated_ptys, GFP_KERNEL)) {
		return -ENOMEM;
	}

	mutex_lock(&allocated_ptys_lock);
	ida_ret = ida_get_new(&allocated_ptys, &index);
	if (ida_ret < 0) {
		mutex_unlock(&allocated_ptys_lock);
		if (ida_ret == -EAGAIN)
			goto retry;
		return -EIO;
	}

	if (index >= pty_limit) {
		ida_remove(&allocated_ptys, index);
		mutex_unlock(&allocated_ptys_lock);
		return -EIO;
	}
	mutex_unlock(&allocated_ptys_lock);
	return index;
}
Esempio n. 6
0
/**
 * iommu_group_alloc - Allocate a new group
 * @name: Optional name to associate with group, visible in sysfs
 *
 * This function is called by an iommu driver to allocate a new iommu
 * group.  The iommu group represents the minimum granularity of the iommu.
 * Upon successful return, the caller holds a reference to the supplied
 * group in order to hold the group until devices are added.  Use
 * iommu_group_put() to release this extra reference count, allowing the
 * group to be automatically reclaimed once it has no devices or external
 * references.
 */
struct iommu_group *iommu_group_alloc(void)
{
	struct iommu_group *group;
	int ret;

	group = kzalloc(sizeof(*group), GFP_KERNEL);
	if (!group)
		return ERR_PTR(-ENOMEM);

	group->kobj.kset = iommu_group_kset;
	mutex_init(&group->mutex);
	INIT_LIST_HEAD(&group->devices);
	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);

	mutex_lock(&iommu_group_mutex);

again:
	if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
		kfree(group);
		mutex_unlock(&iommu_group_mutex);
		return ERR_PTR(-ENOMEM);
	}

	if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
		goto again;

	mutex_unlock(&iommu_group_mutex);

	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
				   NULL, "%d", group->id);
	if (ret) {
		mutex_lock(&iommu_group_mutex);
		ida_remove(&iommu_group_ida, group->id);
		mutex_unlock(&iommu_group_mutex);
		kfree(group);
		return ERR_PTR(ret);
	}

	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
	if (!group->devices_kobj) {
		kobject_put(&group->kobj); /* triggers .release & free */
		return ERR_PTR(-ENOMEM);
	}

	/*
	 * The devices_kobj holds a reference on the group kobject, so
	 * as long as that exists so will the group.  We can therefore
	 * use the devices_kobj for reference counting.
	 */
	kobject_put(&group->kobj);

	pr_debug("Allocated group %d\n", group->id);

	return group;
}
Esempio n. 7
0
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
				  struct ttm_buffer_object *bo,
				  struct ttm_placement *placement,
				  struct ttm_mem_reg *mem)
{
	struct vmwgfx_gmrid_man *gman =
		(struct vmwgfx_gmrid_man *)man->priv;
	int ret = 0;
	int id;

	mem->mm_node = NULL;

	spin_lock(&gman->lock);

	if (gman->max_gmr_pages > 0) {
		gman->used_gmr_pages += bo->num_pages;
		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
			goto out_err_locked;
	}

	do {
		spin_unlock(&gman->lock);
		if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
			ret = -ENOMEM;
			goto out_err;
		}
		spin_lock(&gman->lock);

		ret = ida_get_new(&gman->gmr_ida, &id);
		if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
			ida_remove(&gman->gmr_ida, id);
			ret = 0;
			goto out_err_locked;
		}
	} while (ret == -EAGAIN);

	if (likely(ret == 0)) {
		mem->mm_node = gman;
		mem->start = id;
		mem->num_pages = bo->num_pages;
	} else
		goto out_err_locked;

	spin_unlock(&gman->lock);
	return 0;

out_err:
	spin_lock(&gman->lock);
out_err_locked:
	gman->used_gmr_pages -= bo->num_pages;
	spin_unlock(&gman->lock);
	return ret;
}