/* * Mfs_Lock */ int32_t Mfs_Lock(uint32_t Ch) { Mfs_INFO *p_info; uint32_t flag; uint32_t dummy; int32_t exc; int32_t ret; p_info = &MfsInfo[Ch]; flag = __LDREXW(&(p_info->CritSec)); if (flag == 0) { exc = __STREXW(1, &(p_info->CritSec)); if (exc != 0) { do { dummy = __LDREXW(&(p_info->CritSec)); if (dummy == 0) { /* Compiler warning */ /* Do nothing */ } exc = __STREXW(1, &(p_info->CritSec)); } while (exc != 0); } ret = SUCCESS; } else { ret = ERROR; } return ret; }
inline uint32_t safe_read_and_reset(uint32_t *addr) { uint32_t last_value; do { last_value = __LDREXW(addr); } while (__STREXW(0x00, addr)); return last_value; }
static void * skub_alloc_from_pool(const struct skub_pool_info * pool) { int i, nbitfields = (pool->max + 31) / 32; /* Search for a free block */ for (i = 0; i < nbitfields; i++) { while (1) { uint32_t bf = __LDREXW(&pool->bitmask[i]); /* If there are no bits free, try the next block */ if (!bf) break; /* There is a free bit here. */ int idx = __builtin_ctz(bf); bf &= ~(1 << idx); /* Attempt to write. If this fails, try again. */ if (__STREXW(bf, &pool->bitmask[i])) continue; /* Success! */ int chunknum = 32 * i + idx; return pool->pool + chunknum * pool->sz; } } /* No free blocks. */ return NULL; }
uint32_t core_util_atomic_decr_u32(uint32_t *valuePtr, uint32_t delta) { uint32_t newValue; do { newValue = __LDREXW((volatile uint32_t*)valuePtr) - delta; } while (__STREXW(newValue, (volatile uint32_t*)valuePtr)); return newValue; }
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) { uint32_t newValue; do { newValue = __LDREXW(valuePtr) + delta; } while (__STREXW(newValue, valuePtr)); return newValue; }
void evrPush(uint16_t evr, uint16_t reason) { evr_t e = { millis(), evr, reason }; int i; do i = __LDREXW(&evrHead); while (__STREXW( (i+1) & evrBuffMASK,&evrHead)); evrRingBuff[ i ] = e; }
uint8_t Lock(volatile uint8_t *tbl){ // Get the lock status and see if it is already locked if (__LDREXW(tbl) == 0) { // if not locked, try set lock to 1 return (__STREXW(1, tbl) != 0) ; } else { return(1); // return fail status } }
static void resetPendingEvent(RadioPendingEvents aEvent) { volatile uint32_t pendingEvents; uint32_t bitsToRemain = ~(1UL << aEvent); do { pendingEvents = __LDREXW((unsigned long volatile *)&sPendingEvents); pendingEvents &= bitsToRemain; } while (__STREXW(pendingEvents, (unsigned long volatile *)&sPendingEvents)); }
bool core_util_atomic_cas_u32(uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) { uint32_t currentValue = __LDREXW((volatile uint32_t*)ptr); if (currentValue != *expectedCurrentValue) { *expectedCurrentValue = currentValue; __CLREX(); return false; } return !__STREXW(desiredValue, (volatile uint32_t*)ptr); }
bool atomic_cas<uint32_t>(uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) { uint32_t currentValue = __LDREXW(ptr); if (currentValue != *expectedCurrentValue) { *expectedCurrentValue = currentValue; __CLREX(); return false; } return !__STREXW(desiredValue, ptr); }
static uint32_t atomic_inc(uint32_t *pVar) { uint32_t var; int32_t exc; do { var = __LDREXW(pVar); exc = __STREXW((var+1), pVar); } while (exc != 0); return *pVar; }
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue) { do { uint32_t currentValue = __LDREXW(ptr); if (currentValue != *expectedCurrentValue) { *expectedCurrentValue = currentValue; __CLREX(); return false; } } while (__STREXW(desiredValue, ptr)); return true; }
/** \fn int32_t Set_Channel_active_flag (uint8_t ch) \brief Protected set of channel active flag \param[in] ch Channel number (0..7) \returns - \b 0: function succeeded - \b -1: function failed */ static int32_t Set_Channel_active_flag (uint8_t ch) { uint32_t val; do { val = __LDREXW (&Channel_active); if (val & (1 << ch)) { __CLREX (); return -1; } } while (__STREXW (val | (1 << ch), &Channel_active)); return 0; }
static void setPendingEvent(RadioPendingEvents aEvent) { volatile uint32_t pendingEvents; uint32_t bitToSet = 1UL << aEvent; do { pendingEvents = __LDREXW((unsigned long volatile *)&sPendingEvents); pendingEvents |= bitToSet; } while (__STREXW(pendingEvents, (unsigned long volatile *)&sPendingEvents)); otSysEventSignalPending(); }
uint32_t micros(void) { register uint32_t oldCycle, cycle, timeMs; do { timeMs = __LDREXW(&sysTickUptime); cycle = *DWT_CYCCNT; oldCycle = sysTickCycleCounter; } while (__STREXW(timeMs , &sysTickUptime)); return (timeMs * 1000) + (cycle - oldCycle) / usTicks; }
static inline void clearPendingEvents(void) { // Clear pending events that could cause race in the MAC layer. volatile uint32_t pendingEvents; uint32_t bitsToRemain = ~(0UL); bitsToRemain &= ~(1UL << kPendingEventSleep); do { pendingEvents = __LDREXW((unsigned long volatile *)&sPendingEvents); pendingEvents &= bitsToRemain; } while (__STREXW(pendingEvents, (unsigned long volatile *)&sPendingEvents)); }
/* * Mfs_UnLock */ void Mfs_UnLock(uint32_t Ch) { Mfs_INFO *p_info; uint32_t dummy; int32_t exc; p_info = &MfsInfo[Ch]; do { dummy = __LDREXW(&(p_info->CritSec)); if (dummy == 0) { /* Compiler warning */ /* Do nothing */ } exc = __STREXW(0, &(p_info->CritSec)); } while (exc != 0); }
/** * \brief Callback function for DMA receiving. */ static void _DmaRxCallback( uint8_t status, void* pArg ) { /*dummy*/ status = status; pArg = pArg; mutexTimeout = 0x7FF; while (LockMutex(semaphore, mutexTimeout)); // lock semaphore _UpdateCount(); SCB_InvalidateDCache_by_Addr((uint32_t *)pRxBuffer,sizeof(pRxBuffer)); if (__LDREXW(&pUsartBuffer->Count) >= MAX_FREE_BYTES) { /* Send signal to Tx side to stop sending data after filling all * except one block of buffer */ BASE_USART->US_CR = US_CR_RTSEN; } ReleaseMutex(semaphore); }
unsigned int atomic_incr (unsigned int *pVar) { #if ((defined __CORTEX_M) && (__CORTEX_M == 0x00)) __disable_irq(); *pVar = *pVar + 1; __enable_irq(); #elif ((defined __CORTEX_M) && (__CORTEX_M == 0x03)) unsigned int var; int result; do { var = __LDREXW(pVar); result = __STREXW((var+1), pVar); } while (result != 0); #else *pVar = *pVar + 1; #endif /* (__CORTEX_M == ...) */ return *pVar; }
int atomic_cas(atomic_int_t *var, int old, int now) { int tmp; int status; /* Load exclusive */ tmp = __LDREXW((volatile uint32_t *)(&ATOMIC_VALUE(*var))); if (tmp != old) { /* Clear memory exclusivity */ __CLREX(); return 0; } /* Try to write the new value */ status = __STREXW(now, (volatile uint32_t *)(&ATOMIC_VALUE(*var))); return (status == 0); }
bool trylock(uint32_t * ressources) { if( __LDREXW((uint32_t *)ressources) == 0) { if(__STREXW( 1, (uint32_t *)ressources) != 0) { return false; } else { // ressources is not used by another thread, we can use them ! return true; } } else { return false; } }
uint8_t task_add(void (*pfn)(void*, void*), void* pContext1, void* pContext2) { for(int i = 0; i < 32; i++) { uint32_t pfnValue = __LDREXW(&tasks[i].pfnValue); if(pfnValue) // Already claimed continue; // Try to claim the slot: if(__STREXW((uint32_t)pfn, &tasks[i].pfnValue)) // Failed, someone else interrupted us, go to the next slot continue; // Fill out the context fields. We don't have to synchronize, here, because // task_run can only be performed at passive level. tasks[i].pContext1 = pContext1; tasks[i].pContext2 = pContext2; __SEV(); return 1; } my_printf("Task overrun!\r\n"); return 0; }
/** \fn void Clear_Channel_active_flag (uint8_t ch) \brief Protected clear of channel active flag \param[in] ch Channel number (0..7) */ static void Clear_Channel_active_flag (uint8_t ch) { while(__STREXW((__LDREXW(&Channel_active) & ~(1 << ch)), &Channel_active)); }
/** * \brief Ring buffer management * This function copies data from ring buffer to application buffer with a given length. * \param pBuff Usart Rx DMA ring buffer * \param pDestination Usart application buffer * \param Len Num of dat to copy from ring buffer * \return function returns number of bytes read from ringbuffer * */ static uint32_t RingBufferRead(RignBuffer_t *pBuff, uint8_t *pDestination, uint32_t Len) { uint32_t EndAddrs = ((uint32_t)pBuff->pBuffer + pBuff->BuffSize); uint32_t UnreadCount = 0; uint32_t EnableRTS = 0; uint32_t TotalLen = 0; uint32_t TailAddrs, BytesLeft; /* If timeout has occurred then re calculate the unread number of bytes */ if (dmaflush) { mutexTimeout = 0x7FF; BASE_USART->US_CR = US_CR_RTSEN; // disable transmission __disable_irq(); while (LockMutex(semaphore, mutexTimeout)); // lock mutex _UpdateCount(); /* If Circular buffer has still free space to fill */ if (pBuff->Count < MAX_FREE_BYTES) EnableRTS = US_CR_RTSDIS; dmaflush = 0; memory_sync(); ReleaseMutex(semaphore); BASE_USART->US_CR = EnableRTS; __enable_irq(); } /* If there are unread bytes in ringbuffer then copy them to application buffer */ if (pBuff->Count) { UnreadCount = __LDREXW(&pBuff->Count); // unread bytes count memory_barrier(); TotalLen = (Len > UnreadCount) ? UnreadCount : Len; // if read length surpasses the ring buffer boundary, then loop over if ((pBuff->pTail + TotalLen) >= EndAddrs) { BytesLeft = (EndAddrs - pBuff->pTail); memcpy( pDestination , (uint32_t *)pBuff->pTail, BytesLeft ); memcpy( (pDestination +(EndAddrs - pBuff->pTail)), (uint32_t *)(pBuff->pBuffer), TotalLen - BytesLeft); TailAddrs = ( (uint32_t)pBuff->pBuffer + (TotalLen - BytesLeft)); } else { memcpy( pDestination , (uint32_t *)pBuff->pTail, TotalLen); TailAddrs = pBuff->pTail + TotalLen; } /* In this part function enable the RTS signal to stop all reception disable irq to enter in critical part gets a lock on semaphore updates Tail pointer and count of ring buffer check if RTS need to be disable to accept the data from host frees the semaphore and enable irq*/ BASE_USART->US_CR = US_CR_RTSEN; // disable transmission __disable_irq(); mutexTimeout = 0x7FF; while (LockMutex(semaphore, mutexTimeout)); // lock mutex pBuff->pTail = TailAddrs; pBuff->Count -=TotalLen; // update count of ring buffer TotalbytesReceived +=TotalLen; memory_sync(); #ifdef FULL_DUPLEX TimeOutTimer = GetTicks(); #endif /* If Circular buffer is read completely */ if (pUsartBuffer->Count < MAX_FREE_BYTES) EnableRTS = US_CR_RTSDIS; ReleaseMutex(semaphore); BASE_USART->US_CR = EnableRTS; __enable_irq(); printf("\r Total bytes received 0x%x (%u)", \ (unsigned)TotalbytesReceived, (unsigned)TotalbytesReceived); return TotalLen; // return the number of bytes } else return 0; }
// avoid some inline assembly: // http://electronics.stackexchange.com/questions/25690/critical-sections-on-cortex-m3 // Note: __LDREXW and __STREXW are CMSIS functions inline void safe_increment(uint32_t *addr) { uint32_t new_value; do { new_value = __LDREXW(addr) + 1; } while (__STREXW(new_value, addr)); }