/**
 * @brief Signal condition variable, ie, wake up anyone waiting.
 * @param mutex mutex that holds the condition variable
 * @param cvi   which condition variable to signal
 * @param all   false: wake a single thread<br>
 *              true: wake all threads
 */
void
Mutex_CondSig(Mutex *mutex,
	      uint32 cvi,
	      _Bool all)
{
	uint32 waiters;

	ASSERT(cvi < MUTEX_CVAR_MAX);

	waiters = ATOMIC_GETO(mutex->waiters);
	if (waiters != 0) {
		/* Cleanup the effects of Mutex_UnlPoll() but only when it is
		 * SMP safe, considering that atomic and wakeup operations
		 * should also do memory barriers accordingly. This is
		 * mandatory otherwise rare SMP races are even possible,
		 * since Mutex_CondSig is called with the associated mutex
		 * unlocked, and that does not prevent from select() to run
		 * parallel!
		 */
		wait_queue_head_t *wq =
			(wait_queue_head_t *)mutex->cvarWaitQs[cvi];

		if ((waiters >= POLL_IN_PROGRESS_FLAG) &&
		    !waitqueue_active(wq))
			ATOMIC_ANDO(mutex->waiters,
				    ~(POLL_IN_PROGRESS_FLAG << cvi));

		DMB();

		if (all)
			WAKEUPALL(mutex->cvarWaitQs[cvi]);
		else
			WAKEUPONE(mutex->cvarWaitQs[cvi]);
	}
}
/**
 * @brief Unlock the mutex and prepare to sleep on a kernel polling table
 *        given as anonymous parameters for poll_wait
 * @param mutex as passed to Mutex_Lock()
 * @param mode  as passed to Mutex_Lock()
 * @param cvi   which condition variable to sleep on
 * @param filp  which file to poll_wait upon
 * @param wait  which poll_table to poll_wait upon
 */
void
Mutex_UnlPoll(Mutex *mutex,
	      MutexMode mode,
	      uint32 cvi,
	      void *filp,
	      void *wait)
{
	ASSERT(cvi < MUTEX_CVAR_MAX);

	 /* poll_wait is done with mutex locked to prevent any wake that comes
	 * and defer them just after we unlock the mutex but before kernel
	 * polling tables are used. Note that the kernel is probably avoiding
	 * an exclusive wait in that case and also increments the usage for
	 * the file given in filp.
	 */
	poll_wait(filp, (wait_queue_head_t *)mutex->cvarWaitQs[cvi], wait);

	/*
	 * Tell anyone who might try to wake us that they need to actually call
	 * WAKEUP***(). This is done in putting ourselves in a "noisy" mode
	 * since there is no guaranty that we would really sleep, or if we
	 * would be wakening the sleeping thread with that socket or condition.
	 * This is done using a POLL_IN_PROGRESS_FLAG, but unfortunately it
	 * has to be a per-cvi flag, in case we would poll independently on
	 * different cvi.
	 */
	DMB();
	ATOMIC_ORO(mutex->waiters, (POLL_IN_PROGRESS_FLAG << cvi));

	/*
	 * Release the mutex, someone can wake us up now.
	 * They will see mutex->waiters non-zero so will actually do the wake.
	 */
	Mutex_Unlock(mutex, mode);
}
/**
 * @brief Unlock the mutex.  Also does a data barrier before unlocking so any
 *        modifications made before the lock gets released will be completed
 *        before the lock is released.
 * @param mutex as passed to Mutex_Lock()
 * @param mode  as passed to Mutex_Lock()
 * @param line the line number of the code that called this function
 */
void
Mutex_UnlockLine(Mutex *mutex,
		 MutexMode mode,
		 int line)
{
	Mutex_State newState, oldState;

	DMB();
	do {
		oldState.state = ATOMIC_GETO(mutex->state);
		newState.mode  = oldState.mode - mode;
		newState.blck  = oldState.blck;
		mutex->lineUnl = line;

		ASSERT(oldState.mode >= mode);
	} while (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state));

	/*
	 * If another thread was blocked, then wake it up.
	 */
	if (oldState.blck) {
		if (mode == MutexModeSH)
			WAKEUPONE(mutex->lockWaitQ);
		else
			WAKEUPALL(mutex->lockWaitQ);
	}
}
Пример #4
0
/**
  * @brief  hal ADC Isr
  * @param  None
  * @retval None
  */
void halAdcIsr(void)
{
  uint8_t i;
  uint8_t conversion = adcPendingConversion; /* fix '__IO' warning; costs no flash */

  /* make sure data is ready and the desired conversion is valid */
  if ( (ADC->ISR & ADC_IER_DMABFIE)
        && (conversion < NUM_ADC_USERS) ) {
    adcReadings[conversion] = adcData;
    adcReadingValid |= BIT(conversion); /* mark the reading as valid */
    /* setup the next conversion if any */
    if (adcPendingRequests) {
      for (i = 0; i < NUM_ADC_USERS; i++) {
        if (BIT(i) & adcPendingRequests) {
          adcPendingConversion = i;     /* set pending conversion */
          adcPendingRequests ^= BIT(i); /* clear request: conversion is starting */
          ADC->CR = adcConfig[i]; 
          break; /* conversion started, so we're done here (only one at a time) */
        }
      }
    } else {                                /* no conversion to do */
      ADC->CR = 0;                          /* disable adc */
      adcPendingConversion = NUM_ADC_USERS; /* nothing pending, so go "idle" */
    }
  }
  ADC->ISR = 0xFFFF;
  
 /* asm("DMB"); */
  
#if (defined (__ICCARM__) || defined (__GNUC__))
  asm("DMB");
#elif defined __CC_ARM
	DMB();
#else 
  #error "Inline assembler syntax expected"  
#endif

}
Пример #5
0
void
Mksck_DecRefc(Mksck *mksck)
{
	uint32 oldRefc;

	DMB();
	do {
		while ((oldRefc = ATOMIC_GETO(mksck->refCount)) == 1) {
			MksckPage *mksckPage = Mksck_ToSharedPage(mksck);


			while (Mutex_Lock(&mksckPage->mutex, MutexModeEX) < 0)
				;

			if (ATOMIC_SETIF(mksck->refCount, 0, 1)) {
#if 0
				KNOWN_BUG(MVP-1349);
				PRINTK("Mksck_DecRefc: %08X " \
				       "shutDown %u, foundEmpty %u, " \
				       "foundFull %u, blocked %u\n",
				       mksck->addr.addr, mksck->shutDown,
				       mksck->foundEmpty, mksck->foundFull,
				       ATOMIC_GETO(mksck->mutex.blocked));
#endif

				ASSERT(mksck->peer == 0);

				Mutex_Unlock(&mksckPage->mutex, MutexModeEX);
				MksckPage_DecRefc(mksckPage);
				return;
			}

			Mutex_Unlock(&mksckPage->mutex, MutexModeEX);
		}

		 ASSERT(oldRefc != 0);
	} while (!ATOMIC_SETIF(mksck->refCount, oldRefc - 1, oldRefc));
}
/**
 * @brief Lock the mutex.  Also does a data barrier after locking so the
 *        locking is complete before any shared data is accessed.
 * @param[in,out] mutex which mutex to lock
 * @param         mode  mutex lock mode
 * @param file the file of the caller code
 * @param line the line number of the code that called this function
 * @return rc = 0: mutex now locked by caller<br>
 *             < 0: interrupted
 */
int
Mutex_LockLine(Mutex *mutex,
	       MutexMode mode,
	       const char *file,
	       int line)
{
	Mutex_State newState, oldState;

	MutexCheckSleep(file, line);

	/*
	 * If uncontended, just set new lock state and return success status.
	 * If contended, mark state saying there is a waiting thread to wake.
	 */
	do {
lock_start:
		/*
		 * Get current state and calculate what new state would be.
		 * New state adds 1 for shared and 0xFFFF for exclusive.
		 * If the 16 bit field overflows, there is contention.
		 */
		oldState.state = ATOMIC_GETO(mutex->state);
		newState.mode  = oldState.mode + mode;
		newState.blck  = oldState.blck;

		/*
		 * So we are saying there is no contention if new state
		 * indicates no overflow.
		 *
		 * On fairness: The test here allows a new-comer thread to grab
		 * the lock even if there is a blocked thread. For example 2
		 * threads repeatedly obtaining shared access can starve a third
		 * wishing to obtain an exclusive lock. Currently this is only a
		 * hypothetical situation as mksck use exclusive lock only and
		 * the code never has more than 2 threads using the same mutex.
		 */
		if ((uint32)newState.mode >= (uint32)mode) {
			if (!ATOMIC_SETIF(mutex->state, newState.state,
					  oldState.state))
				goto lock_start;

			DMB();
			mutex->line    = line;
			mutex->lineUnl = -1;
			return 0;
		}

		/*
		 * There is contention, so increment the number of blocking
		 * threads.
		 */
		newState.mode = oldState.mode;
		newState.blck = oldState.blck + 1;
	} while (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state));

	/*
	 * Statistics...
	 */
	ATOMIC_ADDV(mutex->blocked, 1);

	/*
	 * Mutex is contended, state has been updated to say there is a blocking
	 * thread.
	 *
	 * So now we block till someone wakes us up.
	 */
	 do {
		DEFINE_WAIT(waiter);

		/*
		 * This will make sure we catch any wakes done after we check
		 * the lock state again.
		 */
		prepare_to_wait((wait_queue_head_t *)mutex->lockWaitQ,
				&waiter,
				TASK_INTERRUPTIBLE);

		/*
		 * Now that we will catch wakes, check the lock state again.
		 * If now uncontended, mark it locked, abandon the wait and
		 * return success.
		 */

set_new_state:
		/*
		 * Same as the original check for contention above, except
		 * that we must decrement the number of waiting threads by one
		 * if we are successful in locking the mutex.
		 */
		oldState.state = ATOMIC_GETO(mutex->state);
		newState.mode  = oldState.mode + mode;
		newState.blck  = oldState.blck - 1;
		ASSERT(oldState.blck);

		if ((uint32)newState.mode >= (uint32)mode) {
			if (!ATOMIC_SETIF(mutex->state,
					  newState.state, oldState.state))
				goto set_new_state;

			/*
			 * No longer contended and we were able to lock it.
			 */
			finish_wait((wait_queue_head_t *)mutex->lockWaitQ,
				    &waiter);
			DMB();
			mutex->line    = line;
			mutex->lineUnl = -1;
			return 0;
		}

		/*
		 * Wait for a wake that happens any time after prepare_to_wait()
		 * returned.
		 */
		WARN(!schedule_timeout(10*HZ),
		     "Mutex_Lock: soft lockup - stuck for 10s!\n");
		finish_wait((wait_queue_head_t *)mutex->lockWaitQ, &waiter);
	} while (!signal_pending(current));

	/*
	 * We aren't waiting anymore, decrement the number of waiting threads.
	 */
	do {
		oldState.state = ATOMIC_GETO(mutex->state);
		newState.mode  = oldState.mode;
		newState.blck  = oldState.blck - 1;

		ASSERT(oldState.blck);
	} while (!ATOMIC_SETIF(mutex->state, newState.state, oldState.state));

	return -ERESTARTSYS;
}