Exemplo n.º 1
0
int devpts_new_index(struct pts_fs_info *fsi)
{
	int index;
	int ida_ret;

retry:
	if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
		return -ENOMEM;

	mutex_lock(&allocated_ptys_lock);
	if (pty_count >= (pty_limit -
			  (fsi->mount_opts.reserve ? 0 : pty_reserve))) {
		mutex_unlock(&allocated_ptys_lock);
		return -ENOSPC;
	}

	ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
	if (ida_ret < 0) {
		mutex_unlock(&allocated_ptys_lock);
		if (ida_ret == -EAGAIN)
			goto retry;
		return -EIO;
	}

	if (index >= fsi->mount_opts.max) {
		ida_remove(&fsi->allocated_ptys, index);
		mutex_unlock(&allocated_ptys_lock);
		return -ENOSPC;
	}
	pty_count++;
	mutex_unlock(&allocated_ptys_lock);
	return index;
}
Exemplo n.º 2
0
int devpts_new_index(struct inode *ptmx_inode)
{
	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
	struct pts_fs_info *fsi = DEVPTS_SB(sb);
	int index;
	int ida_ret;

retry:
	if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
		return -ENOMEM;

	mutex_lock(&allocated_ptys_lock);
	ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
	if (ida_ret < 0) {
		mutex_unlock(&allocated_ptys_lock);
		if (ida_ret == -EAGAIN)
			goto retry;
		return -EIO;
	}

	if (index >= pty_limit) {
		ida_remove(&fsi->allocated_ptys, index);
		mutex_unlock(&allocated_ptys_lock);
		return -EIO;
	}
	mutex_unlock(&allocated_ptys_lock);
	return index;
}
Exemplo n.º 3
0
int __init_new_context(void)
{
    int index;
    int err;

again:
    if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
        return -ENOMEM;

    spin_lock(&mmu_context_lock);
    err = ida_get_new_above(&mmu_context_ida, 1, &index);
    spin_unlock(&mmu_context_lock);

    if (err == -EAGAIN)
        goto again;
    else if (err)
        return err;

    if (index > MAX_USER_CONTEXT) {
        spin_lock(&mmu_context_lock);
        ida_remove(&mmu_context_ida, index);
        spin_unlock(&mmu_context_lock);
        return -ENOMEM;
    }

    return index;
}
Exemplo n.º 4
0
void devpts_kill_index(struct pts_fs_info *fsi, int idx)
{
	mutex_lock(&allocated_ptys_lock);
	ida_remove(&fsi->allocated_ptys, idx);
	pty_count--;
	mutex_unlock(&allocated_ptys_lock);
}
Exemplo n.º 5
0
int
ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
               gfp_t flags)
{
    int ret, id;
    unsigned int max;

    MPASS((int)start >= 0);
    MPASS((int)end >= 0);

    if (end == 0)
        max = 0x80000000;
    else {
        MPASS(end > start);
        max = end - 1;
    }
again:
    if (!ida_pre_get(ida, flags))
        return (-ENOMEM);

    if ((ret = ida_get_new_above(ida, start, &id)) == 0) {
        if (id > max) {
            ida_remove(ida, id);
            ret = -ENOSPC;
        } else {
            ret = id;
        }
    }
    if (__predict_false(ret == -EAGAIN))
        goto again;

    return (ret);
}
Exemplo n.º 6
0
int devpts_new_index(struct inode *ptmx_inode)
{
	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
	struct pts_fs_info *fsi = DEVPTS_SB(sb);
	int index;
	int ida_ret;

retry:
	if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL))
		return -ENOMEM;

	mutex_lock(&allocated_ptys_lock);
	if (pty_count >= pty_limit -
			(fsi->mount_opts.newinstance ? pty_reserve : 0)) {
		mutex_unlock(&allocated_ptys_lock);
		return -ENOSPC;
	}

	ida_ret = ida_get_new(&fsi->allocated_ptys, &index);
	if (ida_ret < 0) {
		mutex_unlock(&allocated_ptys_lock);
		if (ida_ret == -EAGAIN)
			goto retry;
		return -EIO;
	}

	if (index >= fsi->mount_opts.max) {
		ida_remove(&fsi->allocated_ptys, index);
		mutex_unlock(&allocated_ptys_lock);
		return -ENOSPC;
	}
	pty_count++;
	mutex_unlock(&allocated_ptys_lock);
	return index;
}
Exemplo n.º 7
0
int devpts_new_index(struct inode *ptmx_inode)
{
	int index;
	int ida_ret;

retry:
	if (!ida_pre_get(&allocated_ptys, GFP_KERNEL)) {
		return -ENOMEM;
	}

	mutex_lock(&allocated_ptys_lock);
	ida_ret = ida_get_new(&allocated_ptys, &index);
	if (ida_ret < 0) {
		mutex_unlock(&allocated_ptys_lock);
		if (ida_ret == -EAGAIN)
			goto retry;
		return -EIO;
	}

	if (index >= pty_limit) {
		ida_remove(&allocated_ptys, index);
		mutex_unlock(&allocated_ptys_lock);
		return -EIO;
	}
	mutex_unlock(&allocated_ptys_lock);
	return index;
}
Exemplo n.º 8
0
static int new_cop_pid(struct ida *ida, int min_id, int max_id,
		       spinlock_t *lock)
{
	int index;
	int err;

again:
	if (!ida_pre_get(ida, GFP_KERNEL))
		return -ENOMEM;

	spin_lock(lock);
	err = ida_get_new_above(ida, min_id, &index);
	spin_unlock(lock);

	if (err == -EAGAIN)
		goto again;
	else if (err)
		return err;

	if (index > max_id) {
		spin_lock(lock);
		ida_remove(ida, index);
		spin_unlock(lock);
		return -ENOMEM;
	}

	return index;
}
Exemplo n.º 9
0
static int alloc_context_id(int min_id, int max_id)
{
	int index, err;

again:
	if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
		return -ENOMEM;

	spin_lock(&mmu_context_lock);
	err = ida_get_new_above(&mmu_context_ida, min_id, &index);
	spin_unlock(&mmu_context_lock);

	if (err == -EAGAIN)
		goto again;
	else if (err)
		return err;

	if (index > max_id) {
		spin_lock(&mmu_context_lock);
		ida_remove(&mmu_context_ida, index);
		spin_unlock(&mmu_context_lock);
		return -ENOMEM;
	}

	return index;
}
Exemplo n.º 10
0
/**
 * ida_simple_remove - remove an allocated id.
 * @ida: the (initialized) ida.
 * @id: the id returned by ida_simple_get.
 */
void compat_ida_simple_remove(struct ida *ida, unsigned int id)
{
	unsigned long flags;

	BUG_ON((int)id < 0);
	spin_lock_irqsave(&compat_simple_ida_lock, flags);
	ida_remove(ida, id);
	spin_unlock_irqrestore(&compat_simple_ida_lock, flags);
}
Exemplo n.º 11
0
void devpts_kill_index(struct inode *ptmx_inode, int idx)
{
	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
	struct pts_fs_info *fsi = DEVPTS_SB(sb);

	mutex_lock(&allocated_ptys_lock);
	ida_remove(&fsi->allocated_ptys, idx);
	mutex_unlock(&allocated_ptys_lock);
}
Exemplo n.º 12
0
/**
 * iommu_group_alloc - Allocate a new group
 * @name: Optional name to associate with group, visible in sysfs
 *
 * This function is called by an iommu driver to allocate a new iommu
 * group.  The iommu group represents the minimum granularity of the iommu.
 * Upon successful return, the caller holds a reference to the supplied
 * group in order to hold the group until devices are added.  Use
 * iommu_group_put() to release this extra reference count, allowing the
 * group to be automatically reclaimed once it has no devices or external
 * references.
 */
struct iommu_group *iommu_group_alloc(void)
{
	struct iommu_group *group;
	int ret;

	group = kzalloc(sizeof(*group), GFP_KERNEL);
	if (!group)
		return ERR_PTR(-ENOMEM);

	group->kobj.kset = iommu_group_kset;
	mutex_init(&group->mutex);
	INIT_LIST_HEAD(&group->devices);
	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);

	mutex_lock(&iommu_group_mutex);

again:
	if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
		kfree(group);
		mutex_unlock(&iommu_group_mutex);
		return ERR_PTR(-ENOMEM);
	}

	if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
		goto again;

	mutex_unlock(&iommu_group_mutex);

	ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
				   NULL, "%d", group->id);
	if (ret) {
		mutex_lock(&iommu_group_mutex);
		ida_remove(&iommu_group_ida, group->id);
		mutex_unlock(&iommu_group_mutex);
		kfree(group);
		return ERR_PTR(ret);
	}

	group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
	if (!group->devices_kobj) {
		kobject_put(&group->kobj); /* triggers .release & free */
		return ERR_PTR(-ENOMEM);
	}

	/*
	 * The devices_kobj holds a reference on the group kobject, so
	 * as long as that exists so will the group.  We can therefore
	 * use the devices_kobj for reference counting.
	 */
	kobject_put(&group->kobj);

	pr_debug("Allocated group %d\n", group->id);

	return group;
}
Exemplo n.º 13
0
static void destroy_contexts(mm_context_t *ctx)
{
	int index, context_id;

	spin_lock(&mmu_context_lock);
	for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
		context_id = ctx->extended_id[index];
		if (context_id)
			ida_remove(&mmu_context_ida, context_id);
	}
	spin_unlock(&mmu_context_lock);
}
Exemplo n.º 14
0
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
				  struct ttm_buffer_object *bo,
				  struct ttm_placement *placement,
				  struct ttm_mem_reg *mem)
{
	struct vmwgfx_gmrid_man *gman =
		(struct vmwgfx_gmrid_man *)man->priv;
	int ret = 0;
	int id;

	mem->mm_node = NULL;

	spin_lock(&gman->lock);

	if (gman->max_gmr_pages > 0) {
		gman->used_gmr_pages += bo->num_pages;
		if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
			goto out_err_locked;
	}

	do {
		spin_unlock(&gman->lock);
		if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
			ret = -ENOMEM;
			goto out_err;
		}
		spin_lock(&gman->lock);

		ret = ida_get_new(&gman->gmr_ida, &id);
		if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
			ida_remove(&gman->gmr_ida, id);
			ret = 0;
			goto out_err_locked;
		}
	} while (ret == -EAGAIN);

	if (likely(ret == 0)) {
		mem->mm_node = gman;
		mem->start = id;
		mem->num_pages = bo->num_pages;
	} else
		goto out_err_locked;

	spin_unlock(&gman->lock);
	return 0;

out_err:
	spin_lock(&gman->lock);
out_err_locked:
	gman->used_gmr_pages -= bo->num_pages;
	spin_unlock(&gman->lock);
	return ret;
}
Exemplo n.º 15
0
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
				   struct ttm_mem_reg *mem)
{
	struct vmwgfx_gmrid_man *gman =
		(struct vmwgfx_gmrid_man *)man->priv;

	if (mem->mm_node) {
		spin_lock(&gman->lock);
		ida_remove(&gman->gmr_ida, mem->start);
		gman->used_gmr_pages -= mem->num_pages;
		spin_unlock(&gman->lock);
		mem->mm_node = NULL;
	}
}
Exemplo n.º 16
0
static void iommu_group_release(struct kobject *kobj)
{
	struct iommu_group *group = to_iommu_group(kobj);

	pr_debug("Releasing group %d\n", group->id);

	if (group->iommu_data_release)
		group->iommu_data_release(group->iommu_data);

	mutex_lock(&iommu_group_mutex);
	ida_remove(&iommu_group_ida, group->id);
	mutex_unlock(&iommu_group_mutex);

	if (group->default_domain)
		iommu_domain_free(group->default_domain);

	kfree(group->name);
	kfree(group);
}
Exemplo n.º 17
0
/**
 * Stop using a coprocessor.
 * @acop: mask of coprocessor to be stopped.
 * @mm: The mm the coprocessor associated with.
 */
void drop_cop(unsigned long acop, struct mm_struct *mm)
{
	int free_pid = COP_PID_NONE;

	if (!cpu_has_feature(CPU_FTR_ICSWX))
		return;

	if (WARN_ON_ONCE(!mm))
		return;

	/* We need to make sure mm_users doesn't change */
	down_read(&mm->mmap_sem);
	spin_lock(mm->context.cop_lockp);

	mm->context.acop &= ~acop;

	if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) {
		free_pid = mm->context.cop_pid;
		mm->context.cop_pid = COP_PID_NONE;
	}

	sync_cop(mm);

	/*
	 * If this is a threaded process then there might be other threads
	 * running. We need to send an IPI to force them to pick up any
	 * change in PID and ACOP.
	 */
	if (atomic_read(&mm->mm_users) > 1)
		smp_call_function(sync_cop, mm, 1);

	if (free_pid != COP_PID_NONE) {
		spin_lock(&mmu_context_acop_lock);
		ida_remove(&cop_ida, free_pid);
		spin_unlock(&mmu_context_acop_lock);
	}

	spin_unlock(mm->context.cop_lockp);
	up_read(&mm->mmap_sem);
}
Exemplo n.º 18
0
/**
 * ida_simple_get - get a new id.
 * @ida: the (initialized) ida.
 * @start: the minimum id (inclusive, < 0x8000000)
 * @end: the maximum id (exclusive, < 0x8000000 or 0)
 * @gfp_mask: memory allocation flags
 *
 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
 * On memory allocation failure, returns -ENOMEM.
 *
 * Use ida_simple_remove() to get rid of an id.
 */
int compat_ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
		   gfp_t gfp_mask)
{
	int ret, id;
	unsigned int max;
	unsigned long flags;

	BUG_ON((int)start < 0);
	BUG_ON((int)end < 0);

	if (end == 0)
		max = 0x80000000;
	else {
		BUG_ON(end < start);
		max = end - 1;
	}

again:
	if (!ida_pre_get(ida, gfp_mask))
		return -ENOMEM;

	spin_lock_irqsave(&compat_simple_ida_lock, flags);
	ret = ida_get_new_above(ida, start, &id);
	if (!ret) {
		if (id > max) {
			ida_remove(ida, id);
			ret = -ENOSPC;
		} else {
			ret = id;
		}
	}
	spin_unlock_irqrestore(&compat_simple_ida_lock, flags);

	if (unlikely(ret == -EAGAIN))
		goto again;

	return ret;
}
Exemplo n.º 19
0
void __destroy_context(int context_id)
{
	spin_lock(&mmu_context_lock);
	ida_remove(&mmu_context_ida, context_id);
	spin_unlock(&mmu_context_lock);
}
Exemplo n.º 20
0
void devpts_kill_index(struct inode *ptmx_inode, int idx)
{
	mutex_lock(&allocated_ptys_lock);
	ida_remove(&allocated_ptys, idx);
	mutex_unlock(&allocated_ptys_lock);
}
Exemplo n.º 21
0
static void vas_release_window_id(struct ida *ida, int winid)
{
	spin_lock(&vas_ida_lock);
	ida_remove(ida, winid);
	spin_unlock(&vas_ida_lock);
}
Exemplo n.º 22
0
Arquivo: dir.c Projeto: cilynx/dd-wrt
static void sysfs_free_ino(ino_t ino)
{
	spin_lock(&sysfs_ino_lock);
	ida_remove(&sysfs_ino_ida, ino);
	spin_unlock(&sysfs_ino_lock);
}