/** * percpu_ida_alloc - allocate a tag * @pool: pool to allocate from * @gfp: gfp flags * * Returns a tag - an integer in the range [0..nr_tags) (passed to * tag_pool_init()), or otherwise -ENOSPC on allocation failure. * * Safe to be called from interrupt context (assuming it isn't passed * __GFP_WAIT, of course). * * @gfp indicates whether or not to wait until a free id is available (it's not * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep * however long it takes until another thread frees an id (same semantics as a * mempool). * * Will not fail if passed __GFP_WAIT. */ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) { DEFINE_WAIT(wait); struct percpu_ida_cpu *tags; unsigned long flags; int tag; local_irq_save(flags); tags = this_cpu_ptr(pool->tag_cpu); /* Fastpath */ tag = alloc_local_tag(pool, tags); if (likely(tag >= 0)) { local_irq_restore(flags); return tag; } while (1) { spin_lock(&pool->lock); /* * prepare_to_wait() must come before steal_tags(), in case * percpu_ida_free() on another cpu flips a bit in * cpus_have_tags * * global lock held and irqs disabled, don't need percpu lock */ prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); if (!tags->nr_free) alloc_global_tags(pool, tags); if (!tags->nr_free) steal_tags(pool, tags); if (tags->nr_free) { tag = tags->freelist[--tags->nr_free]; if (tags->nr_free) cpumask_set_cpu(smp_processor_id(), &pool->cpus_have_tags); } spin_unlock(&pool->lock); local_irq_restore(flags); if (tag >= 0 || !(gfp & __GFP_WAIT)) break; schedule(); local_irq_save(flags); tags = this_cpu_ptr(pool->tag_cpu); } finish_wait(&pool->wait, &wait); return tag; }
/** * percpu_ida_alloc - allocate a tag * @pool: pool to allocate from * @state: task state for prepare_to_wait * * Returns a tag - an integer in the range [0..nr_tags) (passed to * tag_pool_init()), or otherwise -ENOSPC on allocation failure. * * Safe to be called from interrupt context (assuming it isn't passed * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). * * @gfp indicates whether or not to wait until a free id is available (it's not * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep * however long it takes until another thread frees an id (same semantics as a * mempool). * * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE. */ int percpu_ida_alloc(struct percpu_ida *pool, int state) { DEFINE_WAIT(wait); struct percpu_ida_cpu *tags; unsigned long flags; int tag = -ENOSPC; tags = raw_cpu_ptr(pool->tag_cpu); spin_lock_irqsave(&tags->lock, flags); /* Fastpath */ if (likely(tags->nr_free)) { tag = tags->freelist[--tags->nr_free]; spin_unlock_irqrestore(&tags->lock, flags); return tag; } spin_unlock_irqrestore(&tags->lock, flags); while (1) { spin_lock_irqsave(&pool->lock, flags); tags = this_cpu_ptr(pool->tag_cpu); /* * prepare_to_wait() must come before steal_tags(), in case * percpu_ida_free() on another cpu flips a bit in * cpus_have_tags * * global lock held and irqs disabled, don't need percpu lock */ if (state != TASK_RUNNING) prepare_to_wait(&pool->wait, &wait, state); if (!tags->nr_free) alloc_global_tags(pool, tags); if (!tags->nr_free) steal_tags(pool, tags); if (tags->nr_free) { tag = tags->freelist[--tags->nr_free]; if (tags->nr_free) cpumask_set_cpu(smp_processor_id(), &pool->cpus_have_tags); } spin_unlock_irqrestore(&pool->lock, flags); if (tag >= 0 || state == TASK_RUNNING) break; if (signal_pending_state(state, current)) { tag = -ERESTARTSYS; break; } schedule(); } if (state != TASK_RUNNING) finish_wait(&pool->wait, &wait); return tag; }