static int alloc_context_id(int min_id, int max_id) { int index, err; again: if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) return -ENOMEM; spin_lock(&mmu_context_lock); err = ida_get_new_above(&mmu_context_ida, min_id, &index); spin_unlock(&mmu_context_lock); if (err == -EAGAIN) goto again; else if (err) return err; if (index > max_id) { spin_lock(&mmu_context_lock); ida_remove(&mmu_context_ida, index); spin_unlock(&mmu_context_lock); return -ENOMEM; } return index; }
int __init_new_context(void) { int index; int err; again: if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) return -ENOMEM; spin_lock(&mmu_context_lock); err = ida_get_new_above(&mmu_context_ida, 1, &index); spin_unlock(&mmu_context_lock); if (err == -EAGAIN) goto again; else if (err) return err; if (index > MAX_USER_CONTEXT) { spin_lock(&mmu_context_lock); ida_remove(&mmu_context_ida, index); spin_unlock(&mmu_context_lock); return -ENOMEM; } return index; }
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t flags) { int ret, id; unsigned int max; MPASS((int)start >= 0); MPASS((int)end >= 0); if (end == 0) max = 0x80000000; else { MPASS(end > start); max = end - 1; } again: if (!ida_pre_get(ida, flags)) return (-ENOMEM); if ((ret = ida_get_new_above(ida, start, &id)) == 0) { if (id > max) { ida_remove(ida, id); ret = -ENOSPC; } else { ret = id; } } if (__predict_false(ret == -EAGAIN)) goto again; return (ret); }
static int new_cop_pid(struct ida *ida, int min_id, int max_id, spinlock_t *lock) { int index; int err; again: if (!ida_pre_get(ida, GFP_KERNEL)) return -ENOMEM; spin_lock(lock); err = ida_get_new_above(ida, min_id, &index); spin_unlock(lock); if (err == -EAGAIN) goto again; else if (err) return err; if (index > max_id) { spin_lock(lock); ida_remove(ida, index); spin_unlock(lock); return -ENOMEM; } return index; }
void hash__reserve_context_id(int id) { int rc, result = 0; do { if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) break; spin_lock(&mmu_context_lock); rc = ida_get_new_above(&mmu_context_ida, id, &result); spin_unlock(&mmu_context_lock); } while (rc == -EAGAIN); WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result); }
static int sysfs_alloc_ino(ino_t *pino) { int ino, rc; retry: spin_lock(&sysfs_ino_lock); rc = ida_get_new_above(&sysfs_ino_ida, 2, &ino); spin_unlock(&sysfs_ino_lock); if (rc == -EAGAIN) { if (ida_pre_get(&sysfs_ino_ida, GFP_KERNEL)) goto retry; rc = -ENOMEM; } *pino = ino; return rc; }
/** * ida_simple_get - get a new id. * @ida: the (initialized) ida. * @start: the minimum id (inclusive, < 0x8000000) * @end: the maximum id (exclusive, < 0x8000000 or 0) * @gfp_mask: memory allocation flags * * Allocates an id in the range start <= id < end, or returns -ENOSPC. * On memory allocation failure, returns -ENOMEM. * * Use ida_simple_remove() to get rid of an id. */ int compat_ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, gfp_t gfp_mask) { int ret, id; unsigned int max; unsigned long flags; BUG_ON((int)start < 0); BUG_ON((int)end < 0); if (end == 0) max = 0x80000000; else { BUG_ON(end < start); max = end - 1; } again: if (!ida_pre_get(ida, gfp_mask)) return -ENOMEM; spin_lock_irqsave(&compat_simple_ida_lock, flags); ret = ida_get_new_above(ida, start, &id); if (!ret) { if (id > max) { ida_remove(ida, id); ret = -ENOSPC; } else { ret = id; } } spin_unlock_irqrestore(&compat_simple_ida_lock, flags); if (unlikely(ret == -EAGAIN)) goto again; return ret; }