void lg_global_lock(struct lglock *lg) { int i; preempt_disable(); lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); for_each_possible_cpu(i) { arch_spinlock_t *lock; lock = per_cpu_ptr(lg->lock, i); arch_spin_lock(lock); } }
/** * bus1_active_drain() - drain active references * @active: object to drain * @waitq: wait-queue linked to @active * * This waits for all active-references on @active to be dropped. It uses the * passed wait-queue to sleep. It must be the same wait-queue that is used when * calling bus1_active_release(). * * The caller must guarantee that bus1_active_deactivate() was called before. * * This function can be safely called in parallel on multiple CPUs. * * Semantically (and also enforced by lockdep), this call behaves like a * down_write(), followed by an up_write(), on this active object. */ void bus1_active_drain(struct bus1_active *active, wait_queue_head_t *waitq) { if (BUS1_WARN_ON(!bus1_active_is_deactivated(active))) return; #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * We pretend this is a down_write_interruptible() and all but * the release-context get interrupted. This is required, as we * cannot call lock_acquired() on multiple threads without * synchronization. Hence, only the release-context will do * this, all others just release the lock. */ lock_acquire_exclusive(&active->dep_map, /* lock */ 0, /* subclass */ 0, /* try-lock */ NULL, /* nest underneath */ _RET_IP_); /* IP */ if (atomic_read(&active->count) > BUS1_ACTIVE_BIAS) lock_contended(&active->dep_map, _RET_IP_); #endif /* wait until all active references were dropped */ wait_event(*waitq, atomic_read(&active->count) <= BUS1_ACTIVE_BIAS); #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Pretend that no-one got the lock, but everyone got interruped * instead. That is, they released the lock without ever actually * getting it locked. */ lock_release(&active->dep_map, /* lock */ 1, /* nested (no-op) */ _RET_IP_); /* instruction pointer */ #endif }
/** * bus1_active_cleanup() - cleanup drained object * @active: object to release * @waitq: wait-queue linked to @active, or NULL * @cleanup: cleanup callback, or NULL * @userdata: userdata for callback * * This performs the final object cleanup. The caller must guarantee that the * object is drained, by calling bus1_active_drain(). * * This function invokes the passed cleanup callback on the object. However, it * guarantees that this is done exactly once. If there're multiple parallel * callers, this will pick one randomly and make all others wait until it is * done. If you call this after it was already cleaned up, this is a no-op * and only serves as barrier. * * If @waitq is NULL, the wait is skipped and the call returns immediately. In * this case, another thread has entered before, but there is no guarantee that * they finished executing the cleanup callback, yet. * * If @waitq is non-NULL, this call behaves like a down_write(), followed by an * up_write(), just like bus1_active_drain(). If @waitq is NULL, this rather * behaves like a down_write_trylock(), optionally followed by an up_write(). * * Return: True if this is the thread that released it, false otherwise. */ bool bus1_active_cleanup(struct bus1_active *active, wait_queue_head_t *waitq, void (*cleanup) (struct bus1_active *, void *), void *userdata) { int v; if (BUS1_WARN_ON(!bus1_active_is_drained(active))) return false; #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * We pretend this is a down_write_interruptible() and all but * the release-context get interrupted. This is required, as we * cannot call lock_acquired() on multiple threads without * synchronization. Hence, only the release-context will do * this, all others just release the lock. */ lock_acquire_exclusive(&active->dep_map,/* lock */ 0, /* subclass */ !waitq, /* try-lock */ NULL, /* nest underneath */ _RET_IP_); /* IP */ #endif /* mark object as RELEASE */ v = atomic_cmpxchg(&active->count, BUS1_ACTIVE_RELEASE_DIRECT, BUS1_ACTIVE_RELEASE); if (v != BUS1_ACTIVE_RELEASE_DIRECT) v = atomic_cmpxchg(&active->count, BUS1_ACTIVE_BIAS, BUS1_ACTIVE_RELEASE); /* * If this is the thread that marked the object as RELEASE, we * perform the actual release. Otherwise, we wait until the * release is done and the node is marked as DRAINED. */ if (v == BUS1_ACTIVE_BIAS || v == BUS1_ACTIVE_RELEASE_DIRECT) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* we're the release-context and acquired the lock */ lock_acquired(&active->dep_map, _RET_IP_); #endif if (cleanup) cleanup(active, userdata); /* mark as DONE */ atomic_set(&active->count, BUS1_ACTIVE_DONE); if (waitq) wake_up_all(waitq); } else if (waitq) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* we're contended against the release context */ lock_contended(&active->dep_map, _RET_IP_); #endif /* wait until object is DRAINED */ wait_event(*waitq, atomic_read(&active->count) == BUS1_ACTIVE_DONE); } #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * No-one but the release-context acquired the lock. However, * that does not matter as we simply treat this as * 'interrupted'. Everyone releases the lock, but only one * caller really got it. */ lock_release(&active->dep_map, /* lock */ 1, /* nested (no-op) */ _RET_IP_); /* instruction pointer */ #endif /* true if we released it */ return v == BUS1_ACTIVE_BIAS || v == BUS1_ACTIVE_RELEASE_DIRECT; }