/**
 * @brief Signal condition variable, ie, wake up anyone waiting.
 * @param mutex mutex that holds the condition variable
 * @param cvi   which condition variable to signal
 * @param all   false: wake a single thread<br>
 *              true: wake all threads
 */
void
Mutex_CondSig(Mutex *mutex,
	      uint32 cvi,
	      _Bool all)
{
	uint32 waiters;

	ASSERT(cvi < MUTEX_CVAR_MAX);

	waiters = ATOMIC_GETO(mutex->waiters);
	if (waiters != 0) {
		/* Cleanup the effects of Mutex_UnlPoll() but only when it is
		 * SMP safe, considering that atomic and wakeup operations
		 * should also do memory barriers accordingly. This is
		 * mandatory otherwise rare SMP races are even possible,
		 * since Mutex_CondSig is called with the associated mutex
		 * unlocked, and that does not prevent from select() to run
		 * parallel!
		 */
		wait_queue_head_t *wq =
			(wait_queue_head_t *)mutex->cvarWaitQs[cvi];

		if ((waiters >= POLL_IN_PROGRESS_FLAG) &&
		    !waitqueue_active(wq))
			ATOMIC_ANDO(mutex->waiters,
				    ~(POLL_IN_PROGRESS_FLAG << cvi));

		DMB();

		if (all)
			WAKEUPALL(mutex->cvarWaitQs[cvi]);
		else
			WAKEUPONE(mutex->cvarWaitQs[cvi]);
	}
}
/**
 * @brief Unlock the mutex.  Also does a data barrier before unlocking so any
 *        modifications made before the lock gets released will be completed
 *        before the lock is released.
 * @param mutex as passed to Mutex_Lock()
 * @param mode  as passed to Mutex_Lock()
 * @param line the line number of the code that called this function
 */
void
Mutex_UnlockLine(Mutex *mutex,
		 MutexMode mode,
		 int line)
{
	Mutex_State newState, oldState;

	DMB();
	do {
		oldState.state = ATOMIC_GETO(mutex->state);
		newState.mode  = oldState.mode - mode;
		newState.blck  = oldState.blck;
		mutex->lineUnl = line;

		ASSERT(oldState.mode >= mode);
	} while (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state));

	/*
	 * If another thread was blocked, then wake it up.
	 */
	if (oldState.blck) {
		if (mode == MutexModeSH)
			WAKEUPONE(mutex->lockWaitQ);
		else
			WAKEUPALL(mutex->lockWaitQ);
	}
}
/**
 * @brief Unlock the mutex and sleep.  Also does a data barrier before
 *        unlocking so any modifications made before the lock gets released
 *        will be completed before the lock is released.
 * @param mutex as passed to Mutex_Lock()
 * @param mode  as passed to Mutex_Lock()
 * @param cvi   which condition variable to sleep on
 * @param test  sleep only if null or pointed atomic value mismatches mask
 * @param mask  bitfield to check test against before sleeping
 * @param file the file of the caller code
 * @param line the line number of the caller code
 * @return rc = 0: successfully waited<br>
 *            < 0: error waiting
 */
int
Mutex_UnlSleepTestLine(Mutex *mutex,
		       MutexMode mode,
		       uint32 cvi,
		       AtmUInt32 *test,
		       uint32 mask,
		       const char *file,
		       int line)
{
	DEFINE_WAIT(waiter);

	MutexCheckSleep(file, line);

	ASSERT(cvi < MUTEX_CVAR_MAX);

	/*
	 * Tell anyone who might try to wake us that they need to actually call
	 * WAKEUP***().
	 */
	ATOMIC_ADDV(mutex->waiters, 1);

	/*
	 * Be sure to catch any wake that comes along just after we unlock the
	 * mutex but before we call schedule().
	 */
	prepare_to_wait_exclusive((wait_queue_head_t *)mutex->cvarWaitQs[cvi],
				  &waiter,
				  TASK_INTERRUPTIBLE);

	/*
	 * Release the mutex, someone can wake us up now.
	 * They will see mutex->waiters non-zero so will actually do the wake.
	 */
	Mutex_Unlock(mutex, mode);

	/*
	 * Wait to be woken or interrupted.
	 */
	if (test == NULL || (ATOMIC_GETO(*test) & mask) == 0)
		schedule();
	finish_wait((wait_queue_head_t *)mutex->cvarWaitQs[cvi], &waiter);

	/*
	 * Done waiting, don't need a wake any more.
	 */
	ATOMIC_SUBV(mutex->waiters, 1);

	/*
	 * If interrupted, return error status.
	 */
	if (signal_pending(current))
		return -ERESTARTSYS;

	/*
	 * Wait completed, return success status.
	 */
	return 0;
}
Exemplo n.º 4
0
void
Mksck_DecRefc(Mksck *mksck)
{
	uint32 oldRefc;

	DMB();
	do {
		while ((oldRefc = ATOMIC_GETO(mksck->refCount)) == 1) {
			MksckPage *mksckPage = Mksck_ToSharedPage(mksck);


			while (Mutex_Lock(&mksckPage->mutex, MutexModeEX) < 0)
				;

			if (ATOMIC_SETIF(mksck->refCount, 0, 1)) {
#if 0
				KNOWN_BUG(MVP-1349);
				PRINTK("Mksck_DecRefc: %08X " \
				       "shutDown %u, foundEmpty %u, " \
				       "foundFull %u, blocked %u\n",
				       mksck->addr.addr, mksck->shutDown,
				       mksck->foundEmpty, mksck->foundFull,
				       ATOMIC_GETO(mksck->mutex.blocked));
#endif

				ASSERT(mksck->peer == 0);

				Mutex_Unlock(&mksckPage->mutex, MutexModeEX);
				MksckPage_DecRefc(mksckPage);
				return;
			}

			Mutex_Unlock(&mksckPage->mutex, MutexModeEX);
		}

		 ASSERT(oldRefc != 0);
	} while (!ATOMIC_SETIF(mksck->refCount, oldRefc - 1, oldRefc));
}
Exemplo n.º 5
0
Mksck *
MksckPage_GetFromAddr(MksckPage *mksckPage,
		      Mksck_Address addr)
{
	Mksck *mksck = mksckPage->sockets;
	uint32 ii;

	ASSERT(addr.vmId == mksckPage->vmId);

	for (ii = mksckPage->numAllocSocks; ii--; mksck++) {
		if ((ATOMIC_GETO(mksck->refCount) != 0) &&
		    (mksck->addr.addr == addr.addr))
			return mksck;
	}
	return NULL;
}
/**
 * @brief Lock the mutex.  Also does a data barrier after locking so the
 *        locking is complete before any shared data is accessed.
 * @param[in,out] mutex which mutex to lock
 * @param         mode  mutex lock mode
 * @param file the file of the caller code
 * @param line the line number of the code that called this function
 * @return rc = 0: mutex now locked by caller<br>
 *             < 0: interrupted
 */
int
Mutex_LockLine(Mutex *mutex,
	       MutexMode mode,
	       const char *file,
	       int line)
{
	Mutex_State newState, oldState;

	MutexCheckSleep(file, line);

	/*
	 * If uncontended, just set new lock state and return success status.
	 * If contended, mark state saying there is a waiting thread to wake.
	 */
	do {
lock_start:
		/*
		 * Get current state and calculate what new state would be.
		 * New state adds 1 for shared and 0xFFFF for exclusive.
		 * If the 16 bit field overflows, there is contention.
		 */
		oldState.state = ATOMIC_GETO(mutex->state);
		newState.mode  = oldState.mode + mode;
		newState.blck  = oldState.blck;

		/*
		 * So we are saying there is no contention if new state
		 * indicates no overflow.
		 *
		 * On fairness: The test here allows a new-comer thread to grab
		 * the lock even if there is a blocked thread. For example 2
		 * threads repeatedly obtaining shared access can starve a third
		 * wishing to obtain an exclusive lock. Currently this is only a
		 * hypothetical situation as mksck use exclusive lock only and
		 * the code never has more than 2 threads using the same mutex.
		 */
		if ((uint32)newState.mode >= (uint32)mode) {
			if (!ATOMIC_SETIF(mutex->state, newState.state,
					  oldState.state))
				goto lock_start;

			DMB();
			mutex->line    = line;
			mutex->lineUnl = -1;
			return 0;
		}

		/*
		 * There is contention, so increment the number of blocking
		 * threads.
		 */
		newState.mode = oldState.mode;
		newState.blck = oldState.blck + 1;
	} while (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state));

	/*
	 * Statistics...
	 */
	ATOMIC_ADDV(mutex->blocked, 1);

	/*
	 * Mutex is contended, state has been updated to say there is a blocking
	 * thread.
	 *
	 * So now we block till someone wakes us up.
	 */
	 do {
		DEFINE_WAIT(waiter);

		/*
		 * This will make sure we catch any wakes done after we check
		 * the lock state again.
		 */
		prepare_to_wait((wait_queue_head_t *)mutex->lockWaitQ,
				&waiter,
				TASK_INTERRUPTIBLE);

		/*
		 * Now that we will catch wakes, check the lock state again.
		 * If now uncontended, mark it locked, abandon the wait and
		 * return success.
		 */

set_new_state:
		/*
		 * Same as the original check for contention above, except
		 * that we must decrement the number of waiting threads by one
		 * if we are successful in locking the mutex.
		 */
		oldState.state = ATOMIC_GETO(mutex->state);
		newState.mode  = oldState.mode + mode;
		newState.blck  = oldState.blck - 1;
		ASSERT(oldState.blck);

		if ((uint32)newState.mode >= (uint32)mode) {
			if (!ATOMIC_SETIF(mutex->state,
					  newState.state, oldState.state))
				goto set_new_state;

			/*
			 * No longer contended and we were able to lock it.
			 */
			finish_wait((wait_queue_head_t *)mutex->lockWaitQ,
				    &waiter);
			DMB();
			mutex->line    = line;
			mutex->lineUnl = -1;
			return 0;
		}

		/*
		 * Wait for a wake that happens any time after prepare_to_wait()
		 * returned.
		 */
		WARN(!schedule_timeout(10*HZ),
		     "Mutex_Lock: soft lockup - stuck for 10s!\n");
		finish_wait((wait_queue_head_t *)mutex->lockWaitQ, &waiter);
	} while (!signal_pending(current));

	/*
	 * We aren't waiting anymore, decrement the number of waiting threads.
	 */
	do {
		oldState.state = ATOMIC_GETO(mutex->state);
		newState.mode  = oldState.mode;
		newState.blck  = oldState.blck - 1;

		ASSERT(oldState.blck);
	} while (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state));

	return -ERESTARTSYS;
}