Beispiel #1
0
/**
 *	kernfs_deactivate - deactivate kernfs_node
 *	@kn: kernfs_node to deactivate
 *
 *	Deny new active references and drain existing ones.
 */
static void kernfs_deactivate(struct kernfs_node *kn)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	int v;

	BUG_ON(!(kn->flags & KERNFS_REMOVED));

	if (!(kernfs_type(kn) & KERNFS_ACTIVE_REF))
		return;

	kn->u.completion = (void *)&wait;

	if (kn->flags & KERNFS_LOCKDEP)
		rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
	/* atomic_add_return() is a mb(), put_active() will always see
	 * the updated kn->u.completion.
	 */
	v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active);

	if (v != KN_DEACTIVATED_BIAS) {
		if (kn->flags & KERNFS_LOCKDEP)
			lock_contended(&kn->dep_map, _RET_IP_);
		wait_for_completion(&wait);
	}

	if (kn->flags & KERNFS_LOCKDEP) {
		lock_acquired(&kn->dep_map, _RET_IP_);
		rwsem_release(&kn->dep_map, 1, _RET_IP_);
	}
}
/**
 *	sysfs_deactivate - deactivate sysfs_dirent
 *	@sd: sysfs_dirent to deactivate
 *
 *	Deny new active references and drain existing ones.
 */
static void sysfs_deactivate(struct sysfs_dirent *sd)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	int v;

	BUG_ON(!(sd->s_flags & SYSFS_FLAG_REMOVED));

	if (!(sysfs_type(sd) & SYSFS_ACTIVE_REF))
		return;

	sd->u.completion = (void *)&wait;

	rwsem_acquire(&sd->dep_map, 0, 0, _RET_IP_);
	/* atomic_add_return() is a mb(), put_active() will always see
	 * the updated sd->u.completion.
	 */
	v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active);

	if (v != SD_DEACTIVATED_BIAS) {
		lock_contended(&sd->dep_map, _RET_IP_);
		wait_for_completion(&wait);
	}

	lock_acquired(&sd->dep_map, _RET_IP_);
	rwsem_release(&sd->dep_map, 1, _RET_IP_);
}
Beispiel #3
0
/**
 * bus1_active_drain() - drain active references
 * @active:	object to drain
 * @waitq:	wait-queue linked to @active
 *
 * This waits for all active-references on @active to be dropped. It uses the
 * passed wait-queue to sleep. It must be the same wait-queue that is used when
 * calling bus1_active_release().
 *
 * The caller must guarantee that bus1_active_deactivate() was called before.
 *
 * This function can be safely called in parallel on multiple CPUs.
 *
 * Semantically (and also enforced by lockdep), this call behaves like a
 * down_write(), followed by an up_write(), on this active object.
 */
void bus1_active_drain(struct bus1_active *active, wait_queue_head_t *waitq)
{
	if (BUS1_WARN_ON(!bus1_active_is_deactivated(active)))
		return;

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * We pretend this is a down_write_interruptible() and all but
	 * the release-context get interrupted. This is required, as we
	 * cannot call lock_acquired() on multiple threads without
	 * synchronization. Hence, only the release-context will do
	 * this, all others just release the lock.
	 */
	lock_acquire_exclusive(&active->dep_map,	/* lock */
			       0,			/* subclass */
			       0,			/* try-lock */
			       NULL,			/* nest underneath */
			       _RET_IP_);		/* IP */
	if (atomic_read(&active->count) > BUS1_ACTIVE_BIAS)
		lock_contended(&active->dep_map, _RET_IP_);
#endif

	/* wait until all active references were dropped */
	wait_event(*waitq, atomic_read(&active->count) <= BUS1_ACTIVE_BIAS);

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Pretend that no-one got the lock, but everyone got interruped
	 * instead. That is, they released the lock without ever actually
	 * getting it locked.
	 */
	lock_release(&active->dep_map,		/* lock */
		     1,				/* nested (no-op) */
		     _RET_IP_);			/* instruction pointer */
#endif
}
Beispiel #4
0
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
		    struct lockdep_map *nest_lock, unsigned long ip)
{
	struct task_struct *task = current;
	struct mutex_waiter waiter;
	unsigned long flags;

	preempt_disable();
	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
	/*
	 * Optimistic spinning.
	 *
	 * We try to spin for acquisition when we find that there are no
	 * pending waiters and the lock owner is currently running on a
	 * (different) CPU.
	 *
	 * The rationale is that if the lock owner is running, it is likely to
	 * release the lock soon.
	 *
	 * Since this needs the lock owner, and this mutex implementation
	 * doesn't track the owner atomically in the lock field, we need to
	 * track it non-atomically.
	 *
	 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
	 * to serialize everything.
	 *
	 * The mutex spinners are queued up using MCS lock so that only one
	 * spinner can compete for the mutex. However, if mutex spinning isn't
	 * going to happen, there is no point in going through the lock/unlock
	 * overhead.
	 */
	if (!mutex_can_spin_on_owner(lock))
		goto slowpath;

	for (;;) {
		struct task_struct *owner;
		struct mspin_node  node;

		/*
		 * If there's an owner, wait for it to either
		 * release the lock or go to sleep.
		 */
		mspin_lock(MLOCK(lock), &node);
		owner = ACCESS_ONCE(lock->owner);
		if (owner && !mutex_spin_on_owner(lock, owner)) {
			mspin_unlock(MLOCK(lock), &node);
			break;
		}

		if ((atomic_read(&lock->count) == 1) &&
		    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
			lock_acquired(&lock->dep_map, ip);
			mutex_set_owner(lock);
			mspin_unlock(MLOCK(lock), &node);
			preempt_enable();
			return 0;
		}
		mspin_unlock(MLOCK(lock), &node);

		/*
		 * When there's no owner, we might have preempted between the
		 * owner acquiring the lock and setting the owner field. If
		 * we're an RT task that will live-lock because we won't let
		 * the owner complete.
		 */
		if (!owner && (need_resched() || rt_task(task)))
			break;

		/*
		 * The cpu_relax() call is a compiler barrier which forces
		 * everything in this loop to be re-loaded. We don't need
		 * memory barriers as we'll eventually observe the right
		 * values at the cost of a few extra spins.
		 */
		arch_mutex_cpu_relax();
	}
slowpath:
#endif
	spin_lock_mutex(&lock->wait_lock, flags);

	debug_mutex_lock_common(lock, &waiter);
	debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

	/* add waiting tasks to the end of the waitqueue (FIFO): */
	list_add_tail(&waiter.list, &lock->wait_list);
	waiter.task = task;

	if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
		goto done;

	lock_contended(&lock->dep_map, ip);

	for (;;) {
		/*
		 * Lets try to take the lock again - this is needed even if
		 * we get here for the first time (shortly after failing to
		 * acquire the lock), to make sure that we get a wakeup once
		 * it's unlocked. Later on, if we sleep, this is the
		 * operation that gives us the lock. We xchg it to -1, so
		 * that when we release the lock, we properly wake up the
		 * other waiters:
		 */
		if (MUTEX_SHOW_NO_WAITER(lock) &&
		   (atomic_xchg(&lock->count, -1) == 1))
			break;

		/*
		 * got a signal? (This code gets eliminated in the
		 * TASK_UNINTERRUPTIBLE case.)
		 */
		if (unlikely(signal_pending_state(state, task))) {
			mutex_remove_waiter(lock, &waiter,
					    task_thread_info(task));
			mutex_release(&lock->dep_map, 1, ip);
			spin_unlock_mutex(&lock->wait_lock, flags);

			debug_mutex_free_waiter(&waiter);
			preempt_enable();
			return -EINTR;
		}
		__set_task_state(task, state);

		/* didn't get the lock, go to sleep: */
		spin_unlock_mutex(&lock->wait_lock, flags);
		schedule_preempt_disabled();
		spin_lock_mutex(&lock->wait_lock, flags);
	}

done:
	lock_acquired(&lock->dep_map, ip);
	/* got the lock - rejoice! */
	mutex_remove_waiter(lock, &waiter, current_thread_info());
	mutex_set_owner(lock);

	/* set it to 0 if there are no waiters left: */
	if (likely(list_empty(&lock->wait_list)))
		atomic_set(&lock->count, 0);

	spin_unlock_mutex(&lock->wait_lock, flags);

	debug_mutex_free_waiter(&waiter);
	preempt_enable();

	return 0;
}
Beispiel #5
0
/*
 * Lock a mutex (possibly interruptible), slowpath:
 */
static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
               unsigned long ip)
{
    struct task_struct *task = current;
    struct mutex_waiter waiter;
    unsigned int old_val;
    unsigned long flags;

    spin_lock_mutex(&lock->wait_lock, flags);

    debug_mutex_lock_common(lock, &waiter);
    mutex_acquire(&lock->dep_map, subclass, 0, ip);
    debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));

    /* add waiting tasks to the end of the waitqueue (FIFO): */
    list_add_tail(&waiter.list, &lock->wait_list);
    waiter.task = task;

    old_val = atomic_xchg(&lock->count, -1);
    if (old_val == 1)
        goto done;

    lock_contended(&lock->dep_map, ip);

    for (;;) {
        /*
         * Lets try to take the lock again - this is needed even if
         * we get here for the first time (shortly after failing to
         * acquire the lock), to make sure that we get a wakeup once
         * it's unlocked. Later on, if we sleep, this is the
         * operation that gives us the lock. We xchg it to -1, so
         * that when we release the lock, we properly wake up the
         * other waiters:
         */
        old_val = atomic_xchg(&lock->count, -1);
        if (old_val == 1)
            break;

        /*
         * got a signal? (This code gets eliminated in the
         * TASK_UNINTERRUPTIBLE case.)
         */
        if (unlikely((state == TASK_INTERRUPTIBLE &&
                    signal_pending(task)) ||
                  (state == TASK_KILLABLE &&
                    fatal_signal_pending(task)))) {
            mutex_remove_waiter(lock, &waiter,
                        task_thread_info(task));
            mutex_release(&lock->dep_map, 1, ip);
            spin_unlock_mutex(&lock->wait_lock, flags);

            debug_mutex_free_waiter(&waiter);
            return -EINTR;
        }
        __set_task_state(task, state);

        /* didnt get the lock, go to sleep: */
        spin_unlock_mutex(&lock->wait_lock, flags);
        schedule();
        spin_lock_mutex(&lock->wait_lock, flags);
    }

done:
    lock_acquired(&lock->dep_map);
    /* got the lock - rejoice! */
    mutex_remove_waiter(lock, &waiter, task_thread_info(task));
    debug_mutex_set_owner(lock, task_thread_info(task));

    /* set it to 0 if there are no waiters left: */
    if (likely(list_empty(&lock->wait_list)))
        atomic_set(&lock->count, 0);

    spin_unlock_mutex(&lock->wait_lock, flags);

    debug_mutex_free_waiter(&waiter);

    return 0;
}
void WmlibFastMutex::lock() {
    WmlibFastMutex::FastMutexInternal* m_internal = (WmlibFastMutex::FastMutexInternal*)(&_futex);
    if (m_internal->locked.exchange(1, std::memory_order_acquire)) {
        (void)lock_contended();
    }
}
Beispiel #7
0
/**
 * bus1_active_cleanup() - cleanup drained object
 * @active:	object to release
 * @waitq:	wait-queue linked to @active, or NULL
 * @cleanup:	cleanup callback, or NULL
 * @userdata:	userdata for callback
 *
 * This performs the final object cleanup. The caller must guarantee that the
 * object is drained, by calling bus1_active_drain().
 *
 * This function invokes the passed cleanup callback on the object. However, it
 * guarantees that this is done exactly once. If there're multiple parallel
 * callers, this will pick one randomly and make all others wait until it is
 * done. If you call this after it was already cleaned up, this is a no-op
 * and only serves as barrier.
 *
 * If @waitq is NULL, the wait is skipped and the call returns immediately. In
 * this case, another thread has entered before, but there is no guarantee that
 * they finished executing the cleanup callback, yet.
 *
 * If @waitq is non-NULL, this call behaves like a down_write(), followed by an
 * up_write(), just like bus1_active_drain(). If @waitq is NULL, this rather
 * behaves like a down_write_trylock(), optionally followed by an up_write().
 *
 * Return: True if this is the thread that released it, false otherwise.
 */
bool bus1_active_cleanup(struct bus1_active *active,
			 wait_queue_head_t *waitq,
			 void (*cleanup) (struct bus1_active *, void *),
			 void *userdata)
{
	int v;

	if (BUS1_WARN_ON(!bus1_active_is_drained(active)))
		return false;

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * We pretend this is a down_write_interruptible() and all but
	 * the release-context get interrupted. This is required, as we
	 * cannot call lock_acquired() on multiple threads without
	 * synchronization. Hence, only the release-context will do
	 * this, all others just release the lock.
	 */
	lock_acquire_exclusive(&active->dep_map,/* lock */
			       0,		/* subclass */
			       !waitq,		/* try-lock */
			       NULL,		/* nest underneath */
			       _RET_IP_);	/* IP */
#endif

	/* mark object as RELEASE */
	v = atomic_cmpxchg(&active->count,
			   BUS1_ACTIVE_RELEASE_DIRECT, BUS1_ACTIVE_RELEASE);
	if (v != BUS1_ACTIVE_RELEASE_DIRECT)
		v = atomic_cmpxchg(&active->count,
				   BUS1_ACTIVE_BIAS, BUS1_ACTIVE_RELEASE);

	/*
	 * If this is the thread that marked the object as RELEASE, we
	 * perform the actual release. Otherwise, we wait until the
	 * release is done and the node is marked as DRAINED.
	 */
	if (v == BUS1_ACTIVE_BIAS || v == BUS1_ACTIVE_RELEASE_DIRECT) {

#ifdef CONFIG_DEBUG_LOCK_ALLOC
		/* we're the release-context and acquired the lock */
		lock_acquired(&active->dep_map, _RET_IP_);
#endif

		if (cleanup)
			cleanup(active, userdata);

		/* mark as DONE */
		atomic_set(&active->count, BUS1_ACTIVE_DONE);
		if (waitq)
			wake_up_all(waitq);
	} else if (waitq) {

#ifdef CONFIG_DEBUG_LOCK_ALLOC
		/* we're contended against the release context */
		lock_contended(&active->dep_map, _RET_IP_);
#endif

		/* wait until object is DRAINED */
		wait_event(*waitq,
			   atomic_read(&active->count) == BUS1_ACTIVE_DONE);
	}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * No-one but the release-context acquired the lock. However,
	 * that does not matter as we simply treat this as
	 * 'interrupted'. Everyone releases the lock, but only one
	 * caller really got it.
	 */
	lock_release(&active->dep_map,	/* lock */
		     1,			/* nested (no-op) */
		     _RET_IP_);		/* instruction pointer */
#endif

	/* true if we released it */
	return v == BUS1_ACTIVE_BIAS || v == BUS1_ACTIVE_RELEASE_DIRECT;
}