Exemple #1
0
int
_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
    int trylock, const char *file, int line)
{

	if (SCHEDULER_STOPPED())
		return (1);

#ifdef INVARIANTS
	if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
		critical_enter();
		KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
		    curthread) == 0,
		    ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
		    rm->lock_object.lo_name, file, line));
		critical_exit();
	}
#endif
	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
	    ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
	    curthread, rm->lock_object.lo_name, file, line));
	KASSERT(!rm_destroyed(rm),
	    ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
	if (!trylock) {
		KASSERT(!rm_wowned(rm),
		    ("rm_rlock: wlock already held for %s @ %s:%d",
		    rm->lock_object.lo_name, file, line));
		WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
		    NULL);
	}

	if (_rm_rlock(rm, tracker, trylock)) {
		if (trylock)
			LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
			    line);
		else
			LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
			    line);
		WITNESS_LOCK(&rm->lock_object, 0, file, line);

		curthread->td_locks++;

		return (1);
	} else if (trylock)
		LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);

	return (0);
}
Exemple #2
0
/*
 * Note that this does not need to use witness_assert() for read lock
 * assertions since an exact count of read locks held by this thread
 * is computable.
 */
void
_rm_assert(const struct rmlock *rm, int what, const char *file, int line)
{
	int count;

	if (panicstr != NULL)
		return;
	switch (what) {
	case RA_LOCKED:
	case RA_LOCKED | RA_RECURSED:
	case RA_LOCKED | RA_NOTRECURSED:
	case RA_RLOCKED:
	case RA_RLOCKED | RA_RECURSED:
	case RA_RLOCKED | RA_NOTRECURSED:
		/*
		 * Handle the write-locked case.  Unlike other
		 * primitives, writers can never recurse.
		 */
		if (rm_wowned(rm)) {
			if (what & RA_RLOCKED)
				panic("Lock %s exclusively locked @ %s:%d\n",
				    rm->lock_object.lo_name, file, line);
			if (what & RA_RECURSED)
				panic("Lock %s not recursed @ %s:%d\n",
				    rm->lock_object.lo_name, file, line);
			break;
		}

		critical_enter();
		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
		critical_exit();

		if (count == 0)
			panic("Lock %s not %slocked @ %s:%d\n",
			    rm->lock_object.lo_name, (what & RA_RLOCKED) ?
			    "read " : "", file, line);
		if (count > 1) {
			if (what & RA_NOTRECURSED)
				panic("Lock %s recursed @ %s:%d\n",
				    rm->lock_object.lo_name, file, line);
		} else if (what & RA_RECURSED)
			panic("Lock %s not recursed @ %s:%d\n",
			    rm->lock_object.lo_name, file, line);
		break;
	case RA_WLOCKED:
		if (!rm_wowned(rm))
			panic("Lock %s not exclusively locked @ %s:%d\n",
			    rm->lock_object.lo_name, file, line);
		break;
	case RA_UNLOCKED:
		if (rm_wowned(rm))
			panic("Lock %s exclusively locked @ %s:%d\n",
			    rm->lock_object.lo_name, file, line);

		critical_enter();
		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
		critical_exit();

		if (count != 0)
			panic("Lock %s read locked @ %s:%d\n",
			    rm->lock_object.lo_name, file, line);
		break;
	default:
		panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
		    line);
	}
}
Exemple #3
0
static int
_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
{
	struct pcpu *pc;

	critical_enter();
	pc = pcpu_find(curcpu);

	/* Check if we just need to do a proper critical_exit. */
	if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
		critical_exit();
		return (1);
	}

	/* Remove our tracker from the per-cpu list. */
	rm_tracker_remove(pc, tracker);

	/* Check to see if the IPI granted us the lock after all. */
	if (tracker->rmp_flags) {
		/* Just add back tracker - we hold the lock. */
		rm_tracker_add(pc, tracker);
		critical_exit();
		return (1);
	}

	/*
	 * We allow readers to acquire a lock even if a writer is blocked if
	 * the lock is recursive and the reader already holds the lock.
	 */
	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
		/*
		 * Just grant the lock if this thread already has a tracker
		 * for this lock on the per-cpu queue.
		 */
		if (rm_trackers_present(pc, rm, curthread) != 0) {
			mtx_lock_spin(&rm_spinlock);
			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
			    rmp_qentry);
			tracker->rmp_flags = RMPF_ONQUEUE;
			mtx_unlock_spin(&rm_spinlock);
			rm_tracker_add(pc, tracker);
			critical_exit();
			return (1);
		}
	}

	sched_unpin();
	critical_exit();

	if (trylock) {
		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
			if (!sx_try_xlock(&rm->rm_lock_sx))
				return (0);
		} else {
			if (!mtx_trylock(&rm->rm_lock_mtx))
				return (0);
		}
	} else {
		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
			THREAD_SLEEPING_OK();
			sx_xlock(&rm->rm_lock_sx);
			THREAD_NO_SLEEPING();
		} else
			mtx_lock(&rm->rm_lock_mtx);
	}

	critical_enter();
	pc = pcpu_find(curcpu);
	CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
	rm_tracker_add(pc, tracker);
	sched_pin();
	critical_exit();

	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
		sx_xunlock(&rm->rm_lock_sx);
	else
		mtx_unlock(&rm->rm_lock_mtx);

	return (1);
}