コード例 #1
0
/*
 * This function represents the so-called 'hard case' for sx_xlock
 * operation.  All 'easy case' failures are redirected to this.  Note
 * that ideally this would be a static function, but it needs to be
 * accessible from at least sx.h.
 */
int
_sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
               int line)
{
    GIANT_DECLARE;
#ifdef ADAPTIVE_SX
    volatile struct thread *owner;
    u_int i, spintries = 0;
#endif
    uintptr_t x;
#ifdef LOCK_PROFILING
    uint64_t waittime = 0;
    int contested = 0;
#endif
    int error = 0;
#ifdef	KDTRACE_HOOKS
    uint64_t spin_cnt = 0;
    uint64_t sleep_cnt = 0;
    int64_t sleep_time = 0;
#endif

    if (SCHEDULER_STOPPED())
        return (0);

    /* If we already hold an exclusive lock, then recurse. */
    if (sx_xlocked(sx)) {
        KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
                ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
                 sx->lock_object.lo_name, file, line));
        sx->sx_recurse++;
        atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
        if (LOCK_LOG_TEST(&sx->lock_object, 0))
            CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
        return (0);
    }

    if (LOCK_LOG_TEST(&sx->lock_object, 0))
        CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
             sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);

    while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
#ifdef KDTRACE_HOOKS
        spin_cnt++;
#endif
        lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
                                        &waittime);
#ifdef ADAPTIVE_SX
        /*
         * If the lock is write locked and the owner is
         * running on another CPU, spin until the owner stops
         * running or the state of the lock changes.
         */
        x = sx->sx_lock;
        if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
            if ((x & SX_LOCK_SHARED) == 0) {
                x = SX_OWNER(x);
                owner = (struct thread *)x;
                if (TD_IS_RUNNING(owner)) {
                    if (LOCK_LOG_TEST(&sx->lock_object, 0))
                        CTR3(KTR_LOCK,
                             "%s: spinning on %p held by %p",
                             __func__, sx, owner);
                    GIANT_SAVE();
                    while (SX_OWNER(sx->sx_lock) == x &&
                            TD_IS_RUNNING(owner)) {
                        cpu_spinwait();
#ifdef KDTRACE_HOOKS
                        spin_cnt++;
#endif
                    }
                    continue;
                }
            } else if (SX_SHARERS(x) && spintries < asx_retries) {
                GIANT_SAVE();
                spintries++;
                for (i = 0; i < asx_loops; i++) {
                    if (LOCK_LOG_TEST(&sx->lock_object, 0))
                        CTR4(KTR_LOCK,
                             "%s: shared spinning on %p with %u and %u",
                             __func__, sx, spintries, i);
                    x = sx->sx_lock;
                    if ((x & SX_LOCK_SHARED) == 0 ||
                            SX_SHARERS(x) == 0)
                        break;
                    cpu_spinwait();
#ifdef KDTRACE_HOOKS
                    spin_cnt++;
#endif
                }
                if (i != asx_loops)
                    continue;
            }
        }
#endif

        sleepq_lock(&sx->lock_object);
        x = sx->sx_lock;

        /*
         * If the lock was released while spinning on the
         * sleep queue chain lock, try again.
         */
        if (x == SX_LOCK_UNLOCKED) {
            sleepq_release(&sx->lock_object);
            continue;
        }

#ifdef ADAPTIVE_SX
        /*
         * The current lock owner might have started executing
         * on another CPU (or the lock could have changed
         * owners) while we were waiting on the sleep queue
         * chain lock.  If so, drop the sleep queue lock and try
         * again.
         */
        if (!(x & SX_LOCK_SHARED) &&
                (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
            owner = (struct thread *)SX_OWNER(x);
            if (TD_IS_RUNNING(owner)) {
                sleepq_release(&sx->lock_object);
                continue;
            }
        }
#endif

        /*
         * If an exclusive lock was released with both shared
         * and exclusive waiters and a shared waiter hasn't
         * woken up and acquired the lock yet, sx_lock will be
         * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
         * If we see that value, try to acquire it once.  Note
         * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
         * as there are other exclusive waiters still.  If we
         * fail, restart the loop.
         */
        if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
            if (atomic_cmpset_acq_ptr(&sx->sx_lock,
                                      SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
                                      tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
                sleepq_release(&sx->lock_object);
                CTR2(KTR_LOCK, "%s: %p claimed by new writer",
                     __func__, sx);
                break;
            }
            sleepq_release(&sx->lock_object);
            continue;
        }

        /*
         * Try to set the SX_LOCK_EXCLUSIVE_WAITERS.  If we fail,
         * than loop back and retry.
         */
        if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
            if (!atomic_cmpset_ptr(&sx->sx_lock, x,
                                   x | SX_LOCK_EXCLUSIVE_WAITERS)) {
                sleepq_release(&sx->lock_object);
                continue;
            }
            if (LOCK_LOG_TEST(&sx->lock_object, 0))
                CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
                     __func__, sx);
        }

        /*
         * Since we have been unable to acquire the exclusive
         * lock and the exclusive waiters flag is set, we have
         * to sleep.
         */
        if (LOCK_LOG_TEST(&sx->lock_object, 0))
            CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
                 __func__, sx);

#ifdef KDTRACE_HOOKS
        sleep_time -= lockstat_nsecs();
#endif
        GIANT_SAVE();
        sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
                   SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
                                SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
        if (!(opts & SX_INTERRUPTIBLE))
            sleepq_wait(&sx->lock_object, 0);
        else
            error = sleepq_wait_sig(&sx->lock_object, 0);
#ifdef KDTRACE_HOOKS
        sleep_time += lockstat_nsecs();
        sleep_cnt++;
#endif
        if (error) {
            if (LOCK_LOG_TEST(&sx->lock_object, 0))
                CTR2(KTR_LOCK,
                     "%s: interruptible sleep by %p suspended by signal",
                     __func__, sx);
            break;
        }
        if (LOCK_LOG_TEST(&sx->lock_object, 0))
            CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
                 __func__, sx);
    }

    GIANT_RESTORE();
    if (!error)
        LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_XLOCK_ACQUIRE, sx,
                                             contested, waittime, file, line);
#ifdef KDTRACE_HOOKS
    if (sleep_time)
        LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
    if (spin_cnt > sleep_cnt)
        LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
#endif
    return (error);
}
コード例 #2
0
/*
 * This function represents the so-called 'hard case' for sx_slock
 * operation.  All 'easy case' failures are redirected to this.  Note
 * that ideally this would be a static function, but it needs to be
 * accessible from at least sx.h.
 */
int
_sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
{
	GIANT_DECLARE;
#ifdef ADAPTIVE_SX
	volatile struct thread *owner;
#endif
#ifdef LOCK_PROFILING
	uint64_t waittime = 0;
	int contested = 0;
#endif
	uintptr_t x;
	int error = 0;
#ifdef KDTRACE_HOOKS
	uint64_t spin_cnt = 0;
	uint64_t sleep_cnt = 0;
	int64_t sleep_time = 0;
#endif

	/*
	 * As with rwlocks, we don't make any attempt to try to block
	 * shared locks once there is an exclusive waiter.
	 */
	for (;;) {
#ifdef KDTRACE_HOOKS
		spin_cnt++;
#endif
		x = sx->sx_lock;

		/*
		 * If no other thread has an exclusive lock then try to bump up
		 * the count of sharers.  Since we have to preserve the state
		 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
		 * shared lock loop back and retry.
		 */
		if (x & SX_LOCK_SHARED) {
			MPASS(!(x & SX_LOCK_SHARED_WAITERS));
			if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
			    x + SX_ONE_SHARER)) {
				if (LOCK_LOG_TEST(&sx->lock_object, 0))
					CTR4(KTR_LOCK,
					    "%s: %p succeed %p -> %p", __func__,
					    sx, (void *)x,
					    (void *)(x + SX_ONE_SHARER));
				break;
			}
			continue;
		}
		lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
		    &waittime);

#ifdef ADAPTIVE_SX
		/*
		 * If the owner is running on another CPU, spin until
		 * the owner stops running or the state of the lock
		 * changes.
		 */
		if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
			x = SX_OWNER(x);
			owner = (struct thread *)x;
			if (TD_IS_RUNNING(owner)) {
				if (LOCK_LOG_TEST(&sx->lock_object, 0))
					CTR3(KTR_LOCK,
					    "%s: spinning on %p held by %p",
					    __func__, sx, owner);
				GIANT_SAVE();
				while (SX_OWNER(sx->sx_lock) == x &&
				    TD_IS_RUNNING(owner)) {
#ifdef KDTRACE_HOOKS
					spin_cnt++;
#endif
					cpu_spinwait();
				}
				continue;
			}
		}
#endif

		/*
		 * Some other thread already has an exclusive lock, so
		 * start the process of blocking.
		 */
		sleepq_lock(&sx->lock_object);
		x = sx->sx_lock;

		/*
		 * The lock could have been released while we spun.
		 * In this case loop back and retry.
		 */
		if (x & SX_LOCK_SHARED) {
			sleepq_release(&sx->lock_object);
			continue;
		}

#ifdef ADAPTIVE_SX
		/*
		 * If the owner is running on another CPU, spin until
		 * the owner stops running or the state of the lock
		 * changes.
		 */
		if (!(x & SX_LOCK_SHARED) &&
		    (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
			owner = (struct thread *)SX_OWNER(x);
			if (TD_IS_RUNNING(owner)) {
				sleepq_release(&sx->lock_object);
				continue;
			}
		}
#endif

		/*
		 * Try to set the SX_LOCK_SHARED_WAITERS flag.  If we
		 * fail to set it drop the sleep queue lock and loop
		 * back.
		 */
		if (!(x & SX_LOCK_SHARED_WAITERS)) {
			if (!atomic_cmpset_ptr(&sx->sx_lock, x,
			    x | SX_LOCK_SHARED_WAITERS)) {
				sleepq_release(&sx->lock_object);
				continue;
			}
			if (LOCK_LOG_TEST(&sx->lock_object, 0))
				CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
				    __func__, sx);
		}

		/*
		 * Since we have been unable to acquire the shared lock,
		 * we have to sleep.
		 */
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
			    __func__, sx);

#ifdef KDTRACE_HOOKS
		sleep_time -= lockstat_nsecs();
#endif
		GIANT_SAVE();
		sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
		    SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
		    SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
		if (!(opts & SX_INTERRUPTIBLE))
			sleepq_wait(&sx->lock_object, 0);
		else
			error = sleepq_wait_sig(&sx->lock_object, 0);
#ifdef KDTRACE_HOOKS
		sleep_time += lockstat_nsecs();
		sleep_cnt++;
#endif
		if (error) {
			if (LOCK_LOG_TEST(&sx->lock_object, 0))
				CTR2(KTR_LOCK,
			"%s: interruptible sleep by %p suspended by signal",
				    __func__, sx);
			break;
		}
		if (LOCK_LOG_TEST(&sx->lock_object, 0))
			CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
			    __func__, sx);
	}
	if (error == 0)
		LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_SX_SLOCK_ACQUIRE, sx,
		    contested, waittime, file, line);
#ifdef KDTRACE_HOOKS
	if (sleep_time)
		LOCKSTAT_RECORD1(LS_SX_XLOCK_BLOCK, sx, sleep_time);
	if (spin_cnt > sleep_cnt)
		LOCKSTAT_RECORD1(LS_SX_XLOCK_SPIN, sx, (spin_cnt - sleep_cnt));
#endif
	GIANT_RESTORE();
	return (error);
}