void vParTestToggleLED( unsigned portBASE_TYPE uxLED ) { unsigned char ucBit; vTaskSuspendAll(); { if( uxLED < partstMAX_OUTPUT_LED ) { ucBit = ( ( unsigned char ) 1 ) << uxLED; if( ucOutputValue & ucBit ) { ucOutputValue &= ~ucBit; } else { ucOutputValue |= ucBit; } PDCWrite( PDC_LED, ucOutputValue ); } } xTaskResumeAll(); }
static portTASK_FUNCTION( vQueueSendWhenSuspendedTask, pvParameters ) { static unsigned long ulValueToSend = ( unsigned long ) 0; /* Just to stop warning messages. */ ( void ) pvParameters; for( ;; ) { vTaskSuspendAll(); { /* We must not block while the scheduler is suspended! */ if( xQueueSend( xSuspendedTestQueue, ( void * ) &ulValueToSend, priNO_BLOCK ) != pdTRUE ) { xSuspendedQueueSendError = pdTRUE; } } xTaskResumeAll(); vTaskDelay( priSLEEP_TIME ); ++ulValueToSend; } }
void vParTestToggleLED( unsigned portBASE_TYPE uxLED ) { unsigned char ucBit; if( uxLED <= partstMAX_OUTPUT_LED ) { ucBit = ( ( unsigned char ) 1 ) << uxLED; vTaskSuspendAll(); { if( ucCurrentOutputValue & ucBit ) { ucCurrentOutputValue &= ~ucBit; } else { ucCurrentOutputValue |= ucBit; } PORTB = ucCurrentOutputValue; } xTaskResumeAll(); } }
void vParTestToggleLED( unsigned portBASE_TYPE uxLED ) { unsigned short usBit; vTaskSuspendAll(); { if( uxLED < partstMAX_OUTPUT_LED ) { usBit = partstFIRST_LED << uxLED; if( usOutputValue & usBit ) { usOutputValue &= ~usBit; } else { usOutputValue |= usBit; } GPIO_Write( GPIOC, usOutputValue ); } } xTaskResumeAll(); }
void vPortFree( void *pv ) { unsigned char *puc = ( unsigned char * ) pv; xBlockLink *pxLink; if( pv != NULL ) { /* The memory being freed will have an xBlockLink structure immediately before it. */ puc -= heapSTRUCT_SIZE; /* This unexpected casting is to keep some compilers from issuing byte alignment warnings. */ pxLink = ( xBlockLink * ) puc; vTaskSuspendAll(); { /* Add this block to the list of free blocks. */ prvInsertBlockIntoFreeList( ( ( xBlockLink * ) pxLink ) ); xFreeBytesRemaining += pxLink->xBlockSize; } (void)xTaskResumeAll(); } }
void IRAM_ATTR spi_flash_op_block_func(void* arg) { // Disable scheduler on this CPU vTaskSuspendAll(); // Restore interrupts that aren't located in IRAM esp_intr_noniram_disable(); uint32_t cpuid = (uint32_t) arg; // Disable cache so that flash operation can start spi_flash_disable_cache(cpuid, &s_flash_op_cache_state[cpuid]); // s_flash_op_complete flag is cleared on *this* CPU, otherwise the other // CPU may reset the flag back to false before IPC task has a chance to check it // (if it is preempted by an ISR taking non-trivial amount of time) s_flash_op_complete = false; s_flash_op_can_start = true; while (!s_flash_op_complete) { // busy loop here and wait for the other CPU to finish flash operation } // Flash operation is complete, re-enable cache spi_flash_restore_cache(cpuid, s_flash_op_cache_state[cpuid]); // Restore interrupts that aren't located in IRAM esp_intr_noniram_enable(); // Re-enable scheduler xTaskResumeAll(); }
/* * Controller task as described above. */ static void vCounterControlTask( void * pvParameters ) { unsigned long ulLastCounter; short sLoops; short sError = pdFALSE; const char * const pcTaskStartMsg = "Priority manipulation tasks started.\r\n"; const char * const pcTaskFailMsg = "Priority manipulation Task Failed\r\n"; /* Just to stop warning messages. */ ( void ) pvParameters; /* Queue a message for printing to say the task has started. */ vPrintDisplayMessage( &pcTaskStartMsg ); for( ;; ) { /* Start with the counter at zero. */ ulCounter = ( unsigned long ) 0; /* First section : */ /* Check the continuous count task is running. */ for( sLoops = 0; sLoops < priLOOPS; sLoops++ ) { /* Suspend the continuous count task so we can take a mirror of the shared variable without risk of corruption. */ vTaskSuspend( xContinuousIncrementHandle ); ulLastCounter = ulCounter; vTaskResume( xContinuousIncrementHandle ); /* Now delay to ensure the other task has processor time. */ vTaskDelay( priSLEEP_TIME ); /* Check the shared variable again. This time to ensure mutual exclusion the whole scheduler will be locked. This is just for demo purposes! */ vTaskSuspendAll(); { if( ulLastCounter == ulCounter ) { /* The shared variable has not changed. There is a problem with the continuous count task so flag an error. */ sError = pdTRUE; xTaskResumeAll(); vPrintDisplayMessage( &pcTaskFailMsg ); vTaskSuspendAll(); } } xTaskResumeAll(); } /* Second section: */ /* Suspend the continuous counter task so it stops accessing the shared variable. */ vTaskSuspend( xContinuousIncrementHandle ); /* Reset the variable. */ ulCounter = ( unsigned long ) 0; /* Resume the limited count task which has a higher priority than us. We should therefore not return from this call until the limited count task has suspended itself with a known value in the counter variable. The scheduler suspension is not necessary but is included for test purposes. */ vTaskSuspendAll(); vTaskResume( xLimitedIncrementHandle ); xTaskResumeAll(); /* Does the counter variable have the expected value? */ if( ulCounter != priMAX_COUNT ) { sError = pdTRUE; vPrintDisplayMessage( &pcTaskFailMsg ); } if( sError == pdFALSE ) { /* If no errors have occurred then increment the check variable. */ portENTER_CRITICAL(); usCheckVariable++; portEXIT_CRITICAL(); } #if configUSE_PREEMPTION == 0 taskYIELD(); #endif /* Resume the continuous count task and do it all again. */ vTaskResume( xContinuousIncrementHandle ); } }
/** * @brief Set the moisture thresold to water */ void WATER_SetThreshold(uint16_t newThreshold) { vTaskSuspendAll(); m_moistureThreshold = newThreshold; xTaskResumeAll(); }
void TRANSFER_TASK(void *pvParameters) { portBASE_TYPE xStatus; xData ReadValue; char *Cmd; PIDCoff PID_Coff; while(1) { xSemaphoreTake(UART_xCountingSemaphore, portMAX_DELAY); if (uxQueueMessagesWaiting(RxQueue) != NULL) { xStatus = xQueueReceive(RxQueue, &ReadValue, 1); if (xStatus == pdPASS) { if (ReadValue.ID == USART_ID) { if (TSVN_USART_Create_Frame(ReadValue.Value) == End) { Cmd = TSVN_Get_Parameters(1, TSVN_USART_Get_Frame()); if (!strcmp(Cmd, "PID1")) { Cmd = TSVN_Get_Parameters(2, TSVN_USART_Get_Frame()); PID_Coff.Kp = atof(Cmd); Cmd = TSVN_Get_Parameters(3, TSVN_USART_Get_Frame()); PID_Coff.Ki = atof(Cmd); Cmd = TSVN_Get_Parameters(4, TSVN_USART_Get_Frame()); PID_Coff.Kd = atof(Cmd); PID_Init(MOTOR1, PID_Coff); vTaskSuspendAll(); printf("PID MOTOR1: %0.5f\t%0.5f\t%0.5f\n", PID_Coff.Kp, PID_Coff.Ki, PID_Coff.Kd); xTaskResumeAll(); } else if (!strcmp(Cmd, "PID2")) { Cmd = TSVN_Get_Parameters(2, TSVN_USART_Get_Frame()); PID_Coff.Kp = atof(Cmd); Cmd = TSVN_Get_Parameters(3, TSVN_USART_Get_Frame()); PID_Coff.Ki = atof(Cmd); Cmd = TSVN_Get_Parameters(4, TSVN_USART_Get_Frame()); PID_Coff.Kd = atof(Cmd); PID_Init(MOTOR2, PID_Coff); vTaskSuspendAll(); printf("PID MOTOR2: %0.5f\t%0.5f\t%0.5f\n", PID_Coff.Kp, PID_Coff.Ki, PID_Coff.Kd); xTaskResumeAll(); } else if (!strcmp(Cmd, "PID3")) { Cmd = TSVN_Get_Parameters(2, TSVN_USART_Get_Frame()); PID_Coff.Kp = atof(Cmd); Cmd = TSVN_Get_Parameters(3, TSVN_USART_Get_Frame()); PID_Coff.Ki = atof(Cmd); Cmd = TSVN_Get_Parameters(4, TSVN_USART_Get_Frame()); PID_Coff.Kd = atof(Cmd); PID_Init(MOTOR3, PID_Coff); vTaskSuspendAll(); printf("PID MOTOR3: %0.5f\t%0.5f\t%0.5f\n", PID_Coff.Kp, PID_Coff.Ki, PID_Coff.Kd); xTaskResumeAll(); } } } } } } }
void *pvPortMalloc( size_t xWantedSize ) { BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink; static BaseType_t xHeapHasBeenInitialised = pdFALSE; void *pvReturn = NULL; vTaskSuspendAll(); { /* If this is the first call to malloc then the heap will require initialisation to setup the list of free blocks. */ if( xHeapHasBeenInitialised == pdFALSE ) { prvHeapInit(); xHeapHasBeenInitialised = pdTRUE; } /* The wanted size is increased so it can contain a BlockLink_t structure in addition to the requested amount of bytes. */ if( xWantedSize > 0 ) { xWantedSize += heapSTRUCT_SIZE; /* Ensure that blocks are always aligned to the required number of bytes. */ if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0 ) { /* Byte alignment required. */ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); } } if( ( xWantedSize > 0 ) && ( xWantedSize < configADJUSTED_HEAP_SIZE ) ) { /* Blocks are stored in byte order - traverse the list from the start (smallest) block until one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If we found the end marker then a block of adequate size was not found. */ if( pxBlock != &xEnd ) { /* Return the memory space - jumping over the BlockLink_t structure at its start. */ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE ); /* This block is being returned for use so must be taken out of the list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; /* If the block is larger than required it can be split into two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new block following the number of bytes requested. The void cast is used to prevent byte alignment warnings from the compiler. */ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); /* Calculate the sizes of two blocks split from the single block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; pxBlock->xBlockSize = xWantedSize; /* Insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( ( pxNewBlockLink ) ); } xFreeBytesRemaining -= pxBlock->xBlockSize; } } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } } #endif return pvReturn; }
EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet ) { ListItem_t *pxListItem, *pxNext; ListItem_t const *pxListEnd; List_t *pxList; EventBits_t uxBitsToClear = 0, uxBitsWaitedFor, uxControlBits; EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup; BaseType_t xMatchFound = pdFALSE; /* Check the user is not attempting to set the bits used by the kernel itself. */ configASSERT( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); pxList = &( pxEventBits->xTasksWaitingForBits ); pxListEnd = listGET_END_MARKER( pxList ); /*lint !e826 !e740 The mini list structure is used as the list end to save RAM. This is checked and valid. */ vTaskSuspendAll(); { traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet ); pxListItem = listGET_HEAD_ENTRY( pxList ); /* Set the bits. */ pxEventBits->uxEventBits |= uxBitsToSet; /* See if the new bit value should unblock any tasks. */ while( pxListItem != pxListEnd ) { pxNext = listGET_NEXT( pxListItem ); uxBitsWaitedFor = listGET_LIST_ITEM_VALUE( pxListItem ); xMatchFound = pdFALSE; /* Split the bits waited for from the control bits. */ uxControlBits = uxBitsWaitedFor & eventEVENT_BITS_CONTROL_BYTES; uxBitsWaitedFor &= ~eventEVENT_BITS_CONTROL_BYTES; if( ( uxControlBits & eventWAIT_FOR_ALL_BITS ) == ( EventBits_t ) 0 ) { /* Just looking for single bit being set. */ if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) != ( EventBits_t ) 0 ) { xMatchFound = pdTRUE; } else { mtCOVERAGE_TEST_MARKER(); } } else if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) == uxBitsWaitedFor ) { /* All bits are set. */ xMatchFound = pdTRUE; } else { /* Need all bits to be set, but not all the bits were set. */ } if( xMatchFound != pdFALSE ) { /* The bits match. Should the bits be cleared on exit? */ if( ( uxControlBits & eventCLEAR_EVENTS_ON_EXIT_BIT ) != ( EventBits_t ) 0 ) { uxBitsToClear |= uxBitsWaitedFor; } else { mtCOVERAGE_TEST_MARKER(); } /* Store the actual event flag value in the task's event list item before removing the task from the event list. The eventUNBLOCKED_DUE_TO_BIT_SET bit is set so the task knows that is was unblocked due to its required bits matching, rather than because it timed out. */ ( void ) xTaskRemoveFromUnorderedEventList( pxListItem, pxEventBits->uxEventBits | eventUNBLOCKED_DUE_TO_BIT_SET ); } /* Move onto the next list item. Note pxListItem->pxNext is not used here as the list item may have been removed from the event list and inserted into the ready/pending reading list. */ pxListItem = pxNext; } /* Clear any bits that matched when the eventCLEAR_EVENTS_ON_EXIT_BIT bit was set in the control word. */ pxEventBits->uxEventBits &= ~uxBitsToClear; } ( void ) xTaskResumeAll(); return pxEventBits->uxEventBits; }
EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToSet, const EventBits_t uxBitsToWaitFor, TickType_t xTicksToWait ) { EventBits_t uxOriginalBitValue, uxReturn; EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup; BaseType_t xAlreadyYielded; configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); configASSERT( uxBitsToWaitFor != 0 ); #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) { configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); } #endif vTaskSuspendAll(); { traceEVENT_GROUP_SYNC_START( xEventGroup, uxBitsToSet ); uxOriginalBitValue = pxEventBits->uxEventBits; ( void ) xEventGroupSetBits( xEventGroup, uxBitsToSet ); if( ( ( uxOriginalBitValue | uxBitsToSet ) & uxBitsToWaitFor ) == uxBitsToWaitFor ) { /* All the rendezvous bits are now set - no need to block. */ uxReturn = ( uxOriginalBitValue | uxBitsToSet ); /* Rendezvous always clear the bits. They will have been cleared already unless this is the only task in the rendezvous. */ pxEventBits->uxEventBits &= uxBitsToWaitFor; xTicksToWait = 0; } else { if( xTicksToWait != ( TickType_t ) 0 ) { /* Store the bits that the calling task is waiting for in the task's event list item so the kernel knows when a match is found. Then enter the blocked state. */ vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | eventCLEAR_EVENTS_ON_EXIT_BIT | eventWAIT_FOR_ALL_BITS ), xTicksToWait ); /* This assignment is obsolete as uxReturn will get set after the task unblocks, but some compilers mistakenly generate a warning about uxReturn being returned without being set if the assignment is omitted. */ uxReturn = 0; } else { /* The rendezvous bits were not set, but no block time was specified - just return the current event bit value. */ uxReturn = pxEventBits->uxEventBits; } } } xAlreadyYielded = xTaskResumeAll(); if( xTicksToWait != ( TickType_t ) 0 ) { if( xAlreadyYielded == pdFALSE ) { portYIELD_WITHIN_API(); } else { mtCOVERAGE_TEST_MARKER(); } /* The task blocked to wait for its required bits to be set - at this point either the required bits were set or the block time expired. If the required bits were set they will have been stored in the task's event list item, and they should now be retrieved then cleared. */ uxReturn = uxTaskResetEventItemValue(); if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { /* The task timed out, just return the current event bit value. */ taskENTER_CRITICAL(); { uxReturn = pxEventBits->uxEventBits; /* Although the task got here because it timed out before the bits it was waiting for were set, it is possible that since it unblocked another task has set the bits. If this is the case then it may be required to clear the bits before exiting. */ if( ( uxReturn & uxBitsToWaitFor ) == uxBitsToWaitFor ) { pxEventBits->uxEventBits &= ~uxBitsToWaitFor; } else { mtCOVERAGE_TEST_MARKER(); } } taskEXIT_CRITICAL(); } else { /* The task unblocked because the bits were set. Clear the control bits before returning the value. */ uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES; } } traceEVENT_GROUP_SYNC_END( xEventGroup, uxReturn ); return uxReturn; }
/*-----------------------------------------------------------*/ void* pvPortReAlloc( void *pv, size_t xWantedSize ) { ASSERT( ( pv >= (void*)WMSDK_HEAP_START_ADDR ) || ( pv == NULL ) ); ASSERT( ( pv <= (void*)lastHeapAddress ) || ( pv == NULL ) ); pre_free_hook(pv); unsigned char *puc = ( unsigned char * ) pv; #ifdef ALLOC_TRACE unsigned char *old_ptr= puc; #endif /* ALLOC_TRACE */ xBlockLink *pxLink; if( pv ) { if( !xWantedSize ) { vPortFree( pv ); return NULL; } void *newArea = pvPortMalloc( xWantedSize ); if( newArea ) { /* The memory being freed will have an xBlockLink structure immediately before it. */ puc -= heapSTRUCT_SIZE; /* This casting is to keep the compiler from issuing warnings. */ pxLink = ( void * ) puc; ATRACE("MDC F %10x %10d %10d\r\n", puc + heapSTRUCT_SIZE, BLOCK_SIZE( pxLink ), xFreeBytesRemaining + BLOCK_SIZE(pxLink)); post_free_hook( ( ( unsigned )puc + heapSTRUCT_SIZE ), GET_ACTUAL_SIZE( pxLink ) ); int oldSize = BLOCK_SIZE( pxLink ) - heapSTRUCT_SIZE; int copySize = ( oldSize < xWantedSize ) ? oldSize : xWantedSize; memcpy( newArea, pv, copySize ); randomizeAreaData((unsigned char*) ((unsigned)pxLink + heapSTRUCT_SIZE), BLOCK_SIZE( pxLink ) - heapSTRUCT_SIZE); vTaskSuspendAll(); { /* Add this block to the list of free blocks. */ SET_FREE( pxLink ); xFreeBytesRemaining += BLOCK_SIZE(pxLink); prvInsertBlockIntoFreeList( ( ( xBlockLink * ) pxLink ) ); #ifdef FREERTOS_ENABLE_MALLOC_STATS hI.totalAllocations--; #endif // FREERTOS_ENABLE_MALLOC_STATS } xTaskResumeAll(); return newArea; } } else if( xWantedSize ) return pvPortMalloc( xWantedSize ); else return NULL; return NULL; }
void *pvPortMalloc( size_t xWantedSize ) { xBlockLink *pxBlock = NULL, *pxPreviousBlock, *pxNewBlockLink; void *pvReturn = NULL; if(!xWantedSize) return NULL; pre_alloc_hook( xWantedSize ); vTaskSuspendAll(); { /* If this is the first call to malloc then the heap will require initialisation to setup the list of free blocks. */ if( xHeapHasBeenInitialised == pdFALSE ) { prvHeapInit(); xHeapHasBeenInitialised = pdTRUE; } /* The wanted size is increased so it can contain a xBlockLink structure in addition to the requested amount of bytes. */ if( xWantedSize > 0 ) { xWantedSize += heapSTRUCT_SIZE; /* Ensure that blocks are always aligned to the required number of bytes. */ if( xWantedSize & portBYTE_ALIGNMENT_MASK ) { /* Byte alignment required. */ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); } } if( ( xWantedSize > 0 ) && ( xWantedSize < configTOTAL_HEAP_SIZE ) ) { /* Blocks are stored in byte order - traverse the list from the start (smallest) block until one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If we found the end marker then a block of adequate size was not found. */ if( pxBlock != &xEnd ) { /* Return the memory space - jumping over the xBlockLink structure at its start. */ pvReturn = ( void * ) ( ( ( unsigned char * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE ); #ifdef FREERTOS_ENABLE_MALLOC_STATS hI.totalAllocations++; #endif // FREERTOS_ENABLE_MALLOC_STATS /* This block is being returned for use so must be taken off the list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; pxBlock->pxNextFreeBlock = NULL; /* If the block is larger than required it can be split into two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new block following the number of bytes requested. The void cast is used to prevent byte alignment warnings from the compiler. */ pxNewBlockLink = ( void * ) ( ( ( unsigned char * ) pxBlock ) + xWantedSize ); /* Calculate the sizes of two blocks split from the single block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; /* Assume bit 0 is 0 i.e. BLOCK_ALLOCATED flag is clear */ pxBlock->xBlockSize = xWantedSize; /* Add the new block to the serial list */ pxNewBlockLink->pxPrev = pxBlock; if( ! IS_LAST_BLOCK(pxNewBlockLink) ) NEXT_BLOCK( pxNewBlockLink )->pxPrev = pxNewBlockLink; SET_ALLOCATED(pxBlock); /* insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( pxNewBlockLink ); } else { SET_ALLOCATED(pxBlock); } xFreeBytesRemaining -= BLOCK_SIZE(pxBlock); } } } xTaskResumeAll(); #if( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { DTRACE("Heap allocation failed.\n\r" "Requested: %d\n\r" "Available : %d\n\r", xWantedSize, xFreeBytesRemaining); extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } } #else if( pvReturn == NULL ) { DTRACE("Heap allocation failed.\n\r" "Requested: %d\n\r" "Available : %d\n\r", xWantedSize, xFreeBytesRemaining); #ifdef FREERTOS_ENABLE_MALLOC_STATS hI.failedAllocations++; #endif /* FREERTOS_ENABLE_MALLOC_STATS */ } #endif if(pvReturn) { SET_ACTUAL_SIZE( pxBlock ); SET_CALLER_ADDR( pxBlock ); ATRACE("MDC A %10x %6d %10d R: %x\r\n", pvReturn , BLOCK_SIZE( pxBlock ), xFreeBytesRemaining, __builtin_return_address(0)); randomizeAreaData((unsigned char*)pvReturn, BLOCK_SIZE( pxBlock ) - heapSTRUCT_SIZE); post_alloc_hook( pvReturn ); #ifdef FREERTOS_ENABLE_MALLOC_STATS if ((configTOTAL_HEAP_SIZE - xFreeBytesRemaining) > hI.peakHeapUsage) { hI.peakHeapUsage = (configTOTAL_HEAP_SIZE - xFreeBytesRemaining); } #endif } return pvReturn; }
int prvHeapAddMemBank(char *chunk_start, size_t size) { xBlockLink *pxIterator; xBlockLink *pxNewBlock; xBlockLink *pxAllocBlock; xBlockLink *p; /* Ensure that blocks are always aligned to the required number of bytes. */ DTRACE("AddMemBank: Received size: %u\r\n", size); /* Make sure chunk_start is on portBYTE_ALIGNMENT */ if( (unsigned long) chunk_start & portBYTE_ALIGNMENT_MASK ) { chunk_start += ( portBYTE_ALIGNMENT - ( (unsigned long) chunk_start & portBYTE_ALIGNMENT_MASK ) ); size -= ( portBYTE_ALIGNMENT - ( (unsigned long) chunk_start & portBYTE_ALIGNMENT_MASK ) ); } if( size & portBYTE_ALIGNMENT_MASK ) { /* Let go of the last few bytes */ size -= ( size & portBYTE_ALIGNMENT_MASK ) ; } DTRACE("AddMemBank: Fixed size: %u\r\n", size); vTaskSuspendAll(); /* Initialize heap if not already */ if( xHeapHasBeenInitialised == pdFALSE ) { prvHeapInit(); xHeapHasBeenInitialised = pdTRUE; } for( pxIterator = ( xBlockLink * )xHeap.ucHeap; NEXT_BLOCK(pxIterator) != (void *)lastHeapAddress; pxIterator = NEXT_BLOCK(pxIterator) ) { } if (!IS_FREE_BLOCK(pxIterator)) { /* For adding a new chunk it is important that the last part of the * previous chunk is empty (ie end of SRAM0 should be available) */ DTRACE("End of the first bank is not free. Cannot add new memory bank"); return pdFAIL; } if ((chunk_start - (char *)lastHeapAddress) < heapMINIMUM_BLOCK_SIZE) { /* Too small hole in between */ DTRACE("Too Small Hole in between %p %p %d\r\n", chunk_start, lastHeapAddress, heapMINIMUM_BLOCK_SIZE); return pdFAIL; } if (chunk_start < (char *)startHeapAddress) { DTRACE("Adding of a memory bank BEFORE the default heap is not supported"); return pdFAIL; } /* pxIterator should now point to the free block that contains the end * of the previous chunk. * We have to split this now, such that end of the previous chunk * contains the heapSTRUCT. This heapSTRUCT should say that the entire * hole from end of SRAM0 to start of the free space in SRAM1 is a huge * allocation. */ /* Fix the size of the last free block */ /* XXX Check if we are creating a block lesser than the minimum allowed * block size */ DTRACE("AddMemBank: Last Block: %p size %u\r\n", pxIterator, pxIterator->xBlockSize); pxIterator->xBlockSize -= heapSTRUCT_SIZE; unsigned unalignment = (pxIterator->xBlockSize & portBYTE_ALIGNMENT_MASK); if (unalignment) { pxIterator->xBlockSize -= unalignment; } DTRACE("AddMemBank: Fixed Last Block size %u\r\n", pxIterator->xBlockSize); /* Create a new block that marks the hole as allocated */ pxAllocBlock = NEXT_BLOCK(pxIterator); DTRACE("AddMemBank: Alloc Block: %p \r\n", pxAllocBlock); pxAllocBlock->xBlockSize = chunk_start - (char *)lastHeapAddress + heapSTRUCT_SIZE + unalignment; DTRACE("AddMemBank: Alloc Block: %p size %u\r\n", pxAllocBlock, pxAllocBlock->xBlockSize); /* These are never used for an allocated block*/ pxAllocBlock->pxNextFreeBlock = NULL; pxAllocBlock->pxPrev = pxIterator; SET_ALLOCATED(pxAllocBlock); /* Manage all accounting variables */ lastHeapAddress = (size_t)(chunk_start + size); xFreeBytesRemaining += size; configTOTAL_HEAP_SIZE += size; #ifdef FREERTOS_ENABLE_MALLOC_STATS hI.heapSize += size; #endif // FREERTOS_ENABLE_MALLOC_STATS xEnd.xBlockSize = configTOTAL_HEAP_SIZE; /* Create a new block at the start of the chunk_start */ /* Ensure the allocation flags aren't part of the block size */ pxNewBlock = NEXT_BLOCK(pxAllocBlock); DTRACE("AddMemBank: New Block: %p \r\n", pxNewBlock); pxNewBlock->xBlockSize = size; pxNewBlock->pxNextFreeBlock = &xEnd; pxNewBlock->pxPrev = pxAllocBlock; /* Iterate through the list until a block is found that has a larger size */ /* than the block we are inserting. */ for( p = &xStart; (p->pxNextFreeBlock != &xEnd) && (p->pxNextFreeBlock->xBlockSize < size); p = p->pxNextFreeBlock ) { /* There is nothing to do here - just iterate to the correct position. */ } if (p->pxNextFreeBlock == &xEnd) { /* Ours is the largest block */ p->pxNextFreeBlock = pxNewBlock; } else { /* Update the list to include the block being inserted in the correct */ /* position. */ pxNewBlock->pxNextFreeBlock = p->pxNextFreeBlock; p->pxNextFreeBlock = pxNewBlock; } DTRACE("AddMemBank: New Block: %p size %u\r\n", pxNewBlock, pxNewBlock->xBlockSize); xTaskResumeAll(); return pdPASS; }
signed portBASE_TYPE xQueueReceive( xQueueHandle pxQueue, void *pvBuffer, portTickType xTicksToWait ) { signed portBASE_TYPE xReturn; /* This function is very similar to xQueueSend(). See comments within xQueueSend() for a more detailed explanation. Make sure other tasks do not access the queue. */ vTaskSuspendAll(); /* Make sure interrupts do not access the queue. */ prvLockQueue( pxQueue ); /* If there are no messages in the queue we may have to block. */ if( prvIsQueueEmpty( pxQueue ) ) { /* There are no messages in the queue, do we want to block or just leave with nothing? */ if( xTicksToWait > ( portTickType ) 0 ) { vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); taskENTER_CRITICAL(); { prvUnlockQueue( pxQueue ); if( !xTaskResumeAll() ) { taskYIELD(); } vTaskSuspendAll(); prvLockQueue( pxQueue ); } taskEXIT_CRITICAL(); } } taskENTER_CRITICAL(); { if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 ) { pxQueue->pcReadFrom += pxQueue->uxItemSize; if( pxQueue->pcReadFrom >= pxQueue->pcTail ) { pxQueue->pcReadFrom = pxQueue->pcHead; } --( pxQueue->uxMessagesWaiting ); memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->pcReadFrom, ( unsigned ) pxQueue->uxItemSize ); /* Increment the lock count so prvUnlockQueue knows to check for tasks waiting for space to become available on the queue. */ ++( pxQueue->xRxLock ); xReturn = pdPASS; } else { xReturn = pdFAIL; } } taskEXIT_CRITICAL(); /* We no longer require exclusive access to the queue. */ if( prvUnlockQueue( pxQueue ) ) { if( !xTaskResumeAll() ) { taskYIELD(); } } else { xTaskResumeAll(); } return xReturn; }
static void vMemCheckTask( void *pvParameters ) { unsigned long *pulMemCheckTaskRunningCounter; void *pvMem1, *pvMem2, *pvMem3; static long lErrorOccurred = pdFALSE; /* This task is dynamically created then deleted during each cycle of the vErrorChecks task to check the operation of the memory allocator. Each time the task is created memory is allocated for the stack and TCB. Each time the task is deleted this memory is returned to the heap. This task itself exercises the allocator by allocating and freeing blocks. The task executes at the idle priority so does not require a delay. pulMemCheckTaskRunningCounter is incremented each cycle to indicate to the vErrorChecks() task that this task is still executing without error. */ pulMemCheckTaskRunningCounter = ( unsigned long * ) pvParameters; for( ;; ) { if( lErrorOccurred == pdFALSE ) { /* We have never seen an error so increment the counter. */ ( *pulMemCheckTaskRunningCounter )++; } /* Allocate some memory - just to give the allocator some extra exercise. This has to be in a critical section to ensure the task does not get deleted while it has memory allocated. */ vTaskSuspendAll(); { pvMem1 = pvPortMalloc( mainMEM_CHECK_SIZE_1 ); if( pvMem1 == NULL ) { lErrorOccurred = pdTRUE; } else { memset( pvMem1, 0xaa, mainMEM_CHECK_SIZE_1 ); vPortFree( pvMem1 ); } } xTaskResumeAll(); /* Again - with a different size block. */ vTaskSuspendAll(); { pvMem2 = pvPortMalloc( mainMEM_CHECK_SIZE_2 ); if( pvMem2 == NULL ) { lErrorOccurred = pdTRUE; } else { memset( pvMem2, 0xaa, mainMEM_CHECK_SIZE_2 ); vPortFree( pvMem2 ); } } xTaskResumeAll(); /* Again - with a different size block. */ vTaskSuspendAll(); { pvMem3 = pvPortMalloc( mainMEM_CHECK_SIZE_3 ); if( pvMem3 == NULL ) { lErrorOccurred = pdTRUE; } else { memset( pvMem3, 0xaa, mainMEM_CHECK_SIZE_3 ); vPortFree( pvMem3 ); } } xTaskResumeAll(); } }
/* * Controller task as described above. */ static portTASK_FUNCTION( vCounterControlTask, pvParameters ) { uint32_t ulLastCounter; short sLoops; short sError = pdFALSE; /* Just to stop warning messages. */ ( void ) pvParameters; for( ;; ) { /* Start with the counter at zero. */ ulCounter = ( uint32_t ) 0; /* First section : */ /* Check the continuous count task is running. */ for( sLoops = 0; sLoops < priLOOPS; sLoops++ ) { /* Suspend the continuous count task so we can take a mirror of the shared variable without risk of corruption. This is not really needed as the other task raises its priority above this task's priority. */ vTaskSuspend( xContinuousIncrementHandle ); { #if( INCLUDE_eTaskGetState == 1 ) { configASSERT( eTaskGetState( xContinuousIncrementHandle ) == eSuspended ); } #endif /* INCLUDE_eTaskGetState */ ulLastCounter = ulCounter; } vTaskResume( xContinuousIncrementHandle ); #if( configUSE_PREEMPTION == 0 ) taskYIELD(); #endif #if( INCLUDE_eTaskGetState == 1 ) { configASSERT( eTaskGetState( xContinuousIncrementHandle ) == eReady ); } #endif /* INCLUDE_eTaskGetState */ /* Now delay to ensure the other task has processor time. */ vTaskDelay( priSLEEP_TIME ); /* Check the shared variable again. This time to ensure mutual exclusion the whole scheduler will be locked. This is just for demo purposes! */ vTaskSuspendAll(); { if( ulLastCounter == ulCounter ) { /* The shared variable has not changed. There is a problem with the continuous count task so flag an error. */ sError = pdTRUE; } } xTaskResumeAll(); } /* Second section: */ /* Suspend the continuous counter task so it stops accessing the shared variable. */ vTaskSuspend( xContinuousIncrementHandle ); /* Reset the variable. */ ulCounter = ( uint32_t ) 0; #if( INCLUDE_eTaskGetState == 1 ) { configASSERT( eTaskGetState( xLimitedIncrementHandle ) == eSuspended ); } #endif /* INCLUDE_eTaskGetState */ /* Resume the limited count task which has a higher priority than us. We should therefore not return from this call until the limited count task has suspended itself with a known value in the counter variable. */ vTaskResume( xLimitedIncrementHandle ); #if( configUSE_PREEMPTION == 0 ) taskYIELD(); #endif /* This task should not run again until xLimitedIncrementHandle has suspended itself. */ #if( INCLUDE_eTaskGetState == 1 ) { configASSERT( eTaskGetState( xLimitedIncrementHandle ) == eSuspended ); } #endif /* INCLUDE_eTaskGetState */ /* Does the counter variable have the expected value? */ if( ulCounter != priMAX_COUNT ) { sError = pdTRUE; } if( sError == pdFALSE ) { /* If no errors have occurred then increment the check variable. */ portENTER_CRITICAL(); usCheckVariable++; portEXIT_CRITICAL(); } /* Resume the continuous count task and do it all again. */ vTaskResume( xContinuousIncrementHandle ); #if( configUSE_PREEMPTION == 0 ) taskYIELD(); #endif } }
EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup, const EventBits_t uxBitsToWaitFor, const BaseType_t xClearOnExit, const BaseType_t xWaitForAllBits, TickType_t xTicksToWait ) { EventGroup_t *pxEventBits = ( EventGroup_t * ) xEventGroup; EventBits_t uxReturn, uxControlBits = 0; BaseType_t xWaitConditionMet, xAlreadyYielded; /* Check the user is not attempting to wait on the bits used by the kernel itself, and that at least one bit is being requested. */ configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 ); configASSERT( uxBitsToWaitFor != 0 ); #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) { configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) ); } #endif vTaskSuspendAll(); { const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits; traceEVENT_GROUP_WAIT_BITS_START( xEventGroup, uxBitsToWaitFor ); /* Check to see if the wait condition is already met or not. */ xWaitConditionMet = prvTestWaitCondition( uxCurrentEventBits, uxBitsToWaitFor, xWaitForAllBits ); if( xWaitConditionMet != pdFALSE ) { /* The wait condition has already been met so there is no need to block. */ uxReturn = uxCurrentEventBits; xTicksToWait = ( TickType_t ) 0; /* Clear the wait bits if requested to do so. */ if( xClearOnExit != pdFALSE ) { pxEventBits->uxEventBits &= ~uxBitsToWaitFor; } else { mtCOVERAGE_TEST_MARKER(); } } else if( xTicksToWait == ( TickType_t ) 0 ) { /* The wait condition has not been met, but no block time was specified, so just return the current value. */ uxReturn = uxCurrentEventBits; } else { /* The task is going to block to wait for its required bits to be set. uxControlBits are used to remember the specified behaviour of this call to xEventGroupWaitBits() - for use when the event bits unblock the task. */ if( xClearOnExit != pdFALSE ) { uxControlBits |= eventCLEAR_EVENTS_ON_EXIT_BIT; } else { mtCOVERAGE_TEST_MARKER(); } if( xWaitForAllBits != pdFALSE ) { uxControlBits |= eventWAIT_FOR_ALL_BITS; } else { mtCOVERAGE_TEST_MARKER(); } /* Store the bits that the calling task is waiting for in the task's event list item so the kernel knows when a match is found. Then enter the blocked state. */ vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | uxControlBits ), xTicksToWait ); /* This is obsolete as it will get set after the task unblocks, but some compilers mistakenly generate a warning about the variable being returned without being set if it is not done. */ uxReturn = 0; } } xAlreadyYielded = xTaskResumeAll(); if( xTicksToWait != ( TickType_t ) 0 ) { if( xAlreadyYielded == pdFALSE ) { portYIELD_WITHIN_API(); } else { mtCOVERAGE_TEST_MARKER(); } /* The task blocked to wait for its required bits to be set - at this point either the required bits were set or the block time expired. If the required bits were set they will have been stored in the task's event list item, and they should now be retrieved then cleared. */ uxReturn = uxTaskResetEventItemValue(); if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 ) { taskENTER_CRITICAL(); { /* The task timed out, just return the current event bit value. */ uxReturn = pxEventBits->uxEventBits; /* It is possible that the event bits were updated between this task leaving the Blocked state and running again. */ if( prvTestWaitCondition( uxReturn, uxBitsToWaitFor, xWaitForAllBits ) != pdFALSE ) { if( xClearOnExit != pdFALSE ) { pxEventBits->uxEventBits &= ~uxBitsToWaitFor; } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } } taskEXIT_CRITICAL(); } else { /* The task unblocked because the bits were set. Clear the control bits before returning the value. */ uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES; } } traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxReturn ); return uxReturn; }
int _init_startup(void) { /* Import the Exception Vector Base Address. */ extern void _evba; #if configHEAP_INIT extern void __heap_start__; extern void __heap_end__; portBASE_TYPE *pxMem; #endif /* Load the Exception Vector Base Address in the corresponding system register. */ Set_system_register( AVR32_EVBA, ( int ) &_evba ); /* Enable exceptions. */ ENABLE_ALL_EXCEPTIONS(); /* Initialize interrupt handling. */ INTC_init_interrupts(); #if configHEAP_INIT /* Initialize the heap used by malloc. */ for( pxMem = &__heap_start__; pxMem < ( portBASE_TYPE * )&__heap_end__; ) { *pxMem++ = 0xA5A5A5A5; } #endif /* Code section present if and only if the debug trace is activated. */ #if configDBG { static const gpio_map_t DBG_USART_GPIO_MAP = { { configDBG_USART_RX_PIN, configDBG_USART_RX_FUNCTION }, { configDBG_USART_TX_PIN, configDBG_USART_TX_FUNCTION } }; static const usart_options_t DBG_USART_OPTIONS = { .baudrate = configDBG_USART_BAUDRATE, .charlength = 8, .paritytype = USART_NO_PARITY, .stopbits = USART_1_STOPBIT, .channelmode = USART_NORMAL_CHMODE }; /* Initialize the USART used for the debug trace with the configured parameters. */ extern volatile avr32_usart_t *volatile stdio_usart_base; stdio_usart_base = configDBG_USART; gpio_enable_module( DBG_USART_GPIO_MAP, sizeof( DBG_USART_GPIO_MAP ) / sizeof( DBG_USART_GPIO_MAP[0] ) ); usart_init_rs232(configDBG_USART, &DBG_USART_OPTIONS, configPBA_CLOCK_HZ); } #endif // Don't-care value for GCC. return 1; } /*-----------------------------------------------------------*/ /* * malloc, realloc and free are meant to be called through respectively * pvPortMalloc, pvPortRealloc and vPortFree. * The latter functions call the former ones from within sections where tasks * are suspended, so the latter functions are task-safe. __malloc_lock and * __malloc_unlock use the same mechanism to also keep the former functions * task-safe as they may be called directly from Newlib's functions. * However, all these functions are interrupt-unsafe and SHALL THEREFORE NOT BE * CALLED FROM WITHIN AN INTERRUPT, because __malloc_lock and __malloc_unlock do * not call portENTER_CRITICAL and portEXIT_CRITICAL in order not to disable * interrupts during memory allocation management as this may be a very time- * consuming process. */ /* * Lock routine called by Newlib on malloc / realloc / free entry to guarantee a * safe section as memory allocation management uses global data. * See the aforementioned details. */ void __malloc_lock(struct _reent *ptr); void __malloc_lock(struct _reent *ptr) { vTaskSuspendAll(); } /* * Unlock routine called by Newlib on malloc / realloc / free exit to guarantee * a safe section as memory allocation management uses global data. * See the aforementioned details. */ void __malloc_unlock(struct _reent *ptr); void __malloc_unlock(struct _reent *ptr) { xTaskResumeAll(); } /*-----------------------------------------------------------*/ /* Added as there is no such function in FreeRTOS. */ void *pvPortRealloc( void *pv, size_t xWantedSize ) { void *pvReturn; vTaskSuspendAll(); { pvReturn = realloc( pv, xWantedSize ); } xTaskResumeAll(); return pvReturn; }
void *pvPortMalloc( size_t xWantedSize ) { BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink; void *pvReturn = NULL; vTaskSuspendAll(); { /* If this is the first call to malloc then the heap will require initialisation to setup the list of free blocks. */ if( pxEnd == NULL ) { prvHeapInit(); } else { mtCOVERAGE_TEST_MARKER(); } /* Check the requested block size is not so large that the top bit is set. The top bit of the block size member of the BlockLink_t structure is used to determine who owns the block - the application or the kernel, so it must be free. */ if( ( xWantedSize & xBlockAllocatedBit ) == 0 ) { /* The wanted size is increased so it can contain a BlockLink_t structure in addition to the requested amount of bytes. */ if( xWantedSize > 0 ) { xWantedSize += xHeapStructSize; /* Ensure that blocks are always aligned to the required number of bytes. */ if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 ) { /* Byte alignment required. */ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); configASSERT( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) == 0 ); } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) ) { /* Traverse the list from the start (lowest address) block until one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If the end marker was reached then a block of adequate size was not found. */ if( pxBlock != pxEnd ) { /* Return the memory space pointed to - jumping over the BlockLink_t structure at its start. */ pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + xHeapStructSize ); /* This block is being returned for use so must be taken out of the list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; /* If the block is larger than required it can be split into two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new block following the number of bytes requested. The void cast is used to prevent byte alignment warnings from the compiler. */ pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize ); configASSERT( ( ( ( size_t ) pxNewBlockLink ) & portBYTE_ALIGNMENT_MASK ) == 0 ); /* Calculate the sizes of two blocks split from the single block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; pxBlock->xBlockSize = xWantedSize; /* Insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( pxNewBlockLink ); } else { mtCOVERAGE_TEST_MARKER(); } xFreeBytesRemaining -= pxBlock->xBlockSize; if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining ) { xMinimumEverFreeBytesRemaining = xFreeBytesRemaining; } else { mtCOVERAGE_TEST_MARKER(); } /* The block is being returned - it is allocated and owned by the application and has no "next" block. */ pxBlock->xBlockSize |= xBlockAllocatedBit; pxBlock->pxNextFreeBlock = NULL; } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } } else { mtCOVERAGE_TEST_MARKER(); } traceMALLOC( pvReturn, xWantedSize ); } ( void ) xTaskResumeAll(); #if( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } else { mtCOVERAGE_TEST_MARKER(); } } #endif configASSERT( ( ( ( size_t ) pvReturn ) & ( size_t ) portBYTE_ALIGNMENT_MASK ) == 0 ); return pvReturn; }
signed portBASE_TYPE xQueueGenericSend( xQueueHandle pxQueue, const void * const pvItemToQueue, portTickType xTicksToWait, portBASE_TYPE xCopyPosition ) { signed portBASE_TYPE xReturn = pdTRUE; xTimeOutType xTimeOut; do { /* If xTicksToWait is zero then we are not going to block even if there is no room in the queue to post. */ if( xTicksToWait > ( portTickType ) 0 ) { vTaskSuspendAll(); prvLockQueue( pxQueue ); if( xReturn == pdTRUE ) { /* This is the first time through - we need to capture the time while the scheduler is locked to ensure we attempt to block at least once. */ vTaskSetTimeOutState( &xTimeOut ); } if( prvIsQueueFull( pxQueue ) ) { /* Need to call xTaskCheckForTimeout again as time could have passed since it was last called if this is not the first time around this loop. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { traceBLOCKING_ON_QUEUE_SEND( pxQueue ); vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); /* Unlocking the queue means queue events can effect the event list. It is possible that interrupts occurring now remove this task from the event list again - but as the scheduler is suspended the task will go onto the pending ready last instead of the actual ready list. */ prvUnlockQueue( pxQueue ); /* Resuming the scheduler will move tasks from the pending ready list into the ready list - so it is feasible that this task is already in a ready list before it yields - in which case the yield will not cause a context switch unless there is also a higher priority task in the pending ready list. */ if( !xTaskResumeAll() ) { taskYIELD(); } } else { prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); } } else { /* The queue was not full so we can just unlock the scheduler and queue again before carrying on. */ prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); } } /* Higher priority tasks and interrupts can execute during this time and could possible refill the queue - even if we unblocked because space became available. */ taskENTER_CRITICAL(); { /* Is there room on the queue now? To be running we must be the highest priority task wanting to access the queue. */ if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) { traceQUEUE_SEND( pxQueue ); prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition ); xReturn = pdPASS; /* If there was a task waiting for data to arrive on the queue then unblock it now. */ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE ) { /* The unblocked task has a priority higher than our own so yield immediately. */ taskYIELD(); } } } else { /* Setting xReturn to errQUEUE_FULL will force its timeout to be re-evaluated. This is necessary in case interrupts and higher priority tasks accessed the queue between this task being unblocked and subsequently attempting to write to the queue. */ xReturn = errQUEUE_FULL; } } taskEXIT_CRITICAL(); if( xReturn == errQUEUE_FULL ) { if( xTicksToWait > ( portTickType ) 0 ) { if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { xReturn = queueERRONEOUS_UNBLOCK; } else { traceQUEUE_SEND_FAILED( pxQueue ); } } else { traceQUEUE_SEND_FAILED( pxQueue ); } } } while( xReturn == queueERRONEOUS_UNBLOCK ); return xReturn; }
/* * Low-level initialization routine called during startup, before the main * function. */ int __low_level_init(void) { #if configHEAP_INIT #pragma segment = "HEAP" portBASE_TYPE *pxMem; #endif /* Enable exceptions. */ ENABLE_ALL_EXCEPTIONS(); /* Initialize interrupt handling. */ INTC_init_interrupts(); #if configHEAP_INIT { /* Initialize the heap used by malloc. */ for( pxMem = __segment_begin( "HEAP" ); pxMem < ( portBASE_TYPE * ) __segment_end( "HEAP" ); ) { *pxMem++ = 0xA5A5A5A5; } } #endif /* Code section present if and only if the debug trace is activated. */ #if configDBG { static const gpio_map_t DBG_USART_GPIO_MAP = { { configDBG_USART_RX_PIN, configDBG_USART_RX_FUNCTION }, { configDBG_USART_TX_PIN, configDBG_USART_TX_FUNCTION } }; static const usart_options_t DBG_USART_OPTIONS = { .baudrate = configDBG_USART_BAUDRATE, .charlength = 8, .paritytype = USART_NO_PARITY, .stopbits = USART_1_STOPBIT, .channelmode = USART_NORMAL_CHMODE }; /* Initialize the USART used for the debug trace with the configured parameters. */ extern volatile avr32_usart_t *volatile stdio_usart_base; stdio_usart_base = configDBG_USART; gpio_enable_module( DBG_USART_GPIO_MAP, sizeof( DBG_USART_GPIO_MAP ) / sizeof( DBG_USART_GPIO_MAP[0] ) ); usart_init_rs232(configDBG_USART, &DBG_USART_OPTIONS, configCPU_CLOCK_HZ); } #endif /* Request initialization of data segments. */ return 1; } /*-----------------------------------------------------------*/ /* Added as there is no such function in FreeRTOS. */ void *pvPortRealloc( void *pv, size_t xWantedSize ) { void *pvReturn; vTaskSuspendAll(); { pvReturn = realloc( pv, xWantedSize ); } xTaskResumeAll(); return pvReturn; }
signed portBASE_TYPE xQueueGenericReceive( xQueueHandle pxQueue, void * const pvBuffer, portTickType xTicksToWait, portBASE_TYPE xJustPeeking ) { signed portBASE_TYPE xReturn = pdTRUE; xTimeOutType xTimeOut; signed portCHAR *pcOriginalReadPosition; do { /* If there are no messages in the queue we may have to block. */ if( xTicksToWait > ( portTickType ) 0 ) { vTaskSuspendAll(); prvLockQueue( pxQueue ); if( xReturn == pdTRUE ) { /* This is the first time through - we need to capture the time while the scheduler is locked to ensure we attempt to block at least once. */ vTaskSetTimeOutState( &xTimeOut ); } if( prvIsQueueEmpty( pxQueue ) ) { /* Need to call xTaskCheckForTimeout again as time could have passed since it was last called if this is not the first time around this loop. */ if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ); #if ( configUSE_MUTEXES == 1 ) { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { portENTER_CRITICAL(); vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder ); portEXIT_CRITICAL(); } } #endif vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait ); prvUnlockQueue( pxQueue ); if( !xTaskResumeAll() ) { taskYIELD(); } } else { prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); } } else { prvUnlockQueue( pxQueue ); ( void ) xTaskResumeAll(); } } /* The two tasks are blocked on the queue, the low priority task is polling/running. */ /* An interrupt occurs here - which unblocks the HP tasks, but they do not run. */ taskENTER_CRITICAL(); { /* Because the interrupt occurred the LP task manages to grab the data as the other two tasks are not yet running. */ if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 ) { /* Remember our read position in case we are just peeking. */ pcOriginalReadPosition = pxQueue->pcReadFrom; prvCopyDataFromQueue( pxQueue, pvBuffer ); if( xJustPeeking == pdFALSE ) { traceQUEUE_RECEIVE( pxQueue ); /* We are actually removing data. */ --( pxQueue->uxMessagesWaiting ); #if ( configUSE_MUTEXES == 1 ) { if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) { /* Record the information required to implement priority inheritance should it become necessary. */ pxQueue->pxMutexHolder = xTaskGetCurrentTaskHandle(); } } #endif if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE ) { if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE ) { taskYIELD(); } } } else { traceQUEUE_PEEK( pxQueue ); /* We are not removing the data, so reset our read pointer. */ pxQueue->pcReadFrom = pcOriginalReadPosition; /* The data is being left in the queue, so see if there are any other tasks waiting for the data. */ if( !listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) ) { /* Tasks that are removed from the event list will get added to the pending ready list as the scheduler is still suspended. */ if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE ) { /* The task waiting has a higher priority than this task. */ taskYIELD(); } } } xReturn = pdPASS; } else { xReturn = errQUEUE_EMPTY; } } taskEXIT_CRITICAL(); if( xReturn == errQUEUE_EMPTY ) { if( xTicksToWait > ( portTickType ) 0 ) { if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE ) { xReturn = queueERRONEOUS_UNBLOCK; } else { traceQUEUE_RECEIVE_FAILED( pxQueue ); } } else { traceQUEUE_RECEIVE_FAILED( pxQueue ); } } } while( xReturn == queueERRONEOUS_UNBLOCK ); return xReturn; }
/** * @brief Set the period for valve open */ void WATER_SetPeriod(uint32_t newPeriod) { vTaskSuspendAll(); m_period = newPeriod; xTaskResumeAll(); }
static void prvEventControllerTask( void *pvParameters ) { const char * const pcTaskStartMsg = "Multi event controller task started.\r\n"; portBASE_TYPE xDummy = 0; /* Just to stop warnings. */ ( void ) pvParameters; vPrintDisplayMessage( &pcTaskStartMsg ); for( ;; ) { /* All tasks are blocked on the queue. When a message is posted one of the two tasks that share the highest priority should unblock to read the queue. The next message written should unblock the other task with the same high priority, and so on in order. No other task should unblock to read data as they have lower priorities. */ prvCheckTaskCounters( evtHIGHEST_PRIORITY_INDEX_1, 1 ); prvCheckTaskCounters( evtHIGHEST_PRIORITY_INDEX_2, 1 ); prvCheckTaskCounters( evtHIGHEST_PRIORITY_INDEX_1, 1 ); prvCheckTaskCounters( evtHIGHEST_PRIORITY_INDEX_2, 1 ); prvCheckTaskCounters( evtHIGHEST_PRIORITY_INDEX_1, 1 ); /* For the rest of these tests we don't need the second 'highest' priority task - so it is suspended. */ vTaskSuspend( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_2 ] ); /* Now suspend the other highest priority task. The medium priority task will then be the task with the highest priority that remains blocked on the queue. */ vTaskSuspend( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_1 ] ); /* This time, when we post onto the queue we will expect the medium priority task to unblock and preempt us. */ prvCheckTaskCounters( evtMEDIUM_PRIORITY_INDEX, 1 ); /* Now try resuming the highest priority task while the scheduler is suspended. The task should start executing as soon as the scheduler is resumed - therefore when we post to the queue again, the highest priority task should again preempt us. */ vTaskSuspendAll(); vTaskResume( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_1 ] ); xTaskResumeAll(); prvCheckTaskCounters( evtHIGHEST_PRIORITY_INDEX_1, 1 ); /* Now we are going to suspend the high and medium priority tasks. The low priority task should then preempt us. Again the task suspension is done with the whole scheduler suspended just for test purposes. */ vTaskSuspendAll(); vTaskSuspend( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_1 ] ); vTaskSuspend( xCreatedTasks[ evtMEDIUM_PRIORITY_INDEX ] ); xTaskResumeAll(); prvCheckTaskCounters( evtLOWEST_PRIORITY_INDEX, 1 ); /* Do the same basic test another few times - selectively suspending and resuming tasks and each time calling prvCheckTaskCounters() passing to the function the number of the task we expected to be unblocked by the post. */ vTaskResume( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_1 ] ); prvCheckTaskCounters( evtHIGHEST_PRIORITY_INDEX_1, 1 ); vTaskSuspendAll(); /* Just for test. */ vTaskSuspendAll(); /* Just for test. */ vTaskSuspendAll(); /* Just for even more test. */ vTaskSuspend( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_1 ] ); xTaskResumeAll(); xTaskResumeAll(); xTaskResumeAll(); prvCheckTaskCounters( evtLOWEST_PRIORITY_INDEX, 1 ); vTaskResume( xCreatedTasks[ evtMEDIUM_PRIORITY_INDEX ] ); prvCheckTaskCounters( evtMEDIUM_PRIORITY_INDEX, 1 ); vTaskResume( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_1 ] ); prvCheckTaskCounters( evtHIGHEST_PRIORITY_INDEX_1, 1 ); /* Now a slight change, first suspend all tasks. */ vTaskSuspend( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_1 ] ); vTaskSuspend( xCreatedTasks[ evtMEDIUM_PRIORITY_INDEX ] ); vTaskSuspend( xCreatedTasks[ evtLOWEST_PRIORITY_INDEX ] ); /* Now when we resume the low priority task and write to the queue 3 times. We expect the low priority task to service the queue three times. */ vTaskResume( xCreatedTasks[ evtLOWEST_PRIORITY_INDEX ] ); prvCheckTaskCounters( evtLOWEST_PRIORITY_INDEX, evtQUEUE_LENGTH ); /* Again suspend all tasks (only the low priority task is not suspended already). */ vTaskSuspend( xCreatedTasks[ evtLOWEST_PRIORITY_INDEX ] ); /* This time we are going to suspend the scheduler, resume the low priority task, then resume the high priority task. In this state we will write to the queue three times. When the scheduler is resumed we expect the high priority task to service all three messages. */ vTaskSuspendAll(); { vTaskResume( xCreatedTasks[ evtLOWEST_PRIORITY_INDEX ] ); vTaskResume( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_1 ] ); for( xDummy = 0; xDummy < evtQUEUE_LENGTH; xDummy++ ) { if( xQueueSend( xQueue, &xDummy, evtNO_DELAY ) != pdTRUE ) { printf( "%s, %d: queue send error\n", __FILE__, __LINE__ ); xHealthStatus = pdFAIL; } } /* The queue should not have been serviced yet!. The scheduler is still suspended. */ if( memcmp( ( void * ) xExpectedTaskCounters, ( void * ) xTaskCounters, sizeof( xExpectedTaskCounters ) ) ) { printf( "%s, %d: memcmp error\n", __FILE__, __LINE__ ); xHealthStatus = pdFAIL; } } xTaskResumeAll(); /* We should have been preempted by resuming the scheduler - so by the time we are running again we expect the high priority task to have removed three items from the queue. */ xExpectedTaskCounters[ evtHIGHEST_PRIORITY_INDEX_1 ] += evtQUEUE_LENGTH; if( memcmp( ( void * ) xExpectedTaskCounters, ( void * ) xTaskCounters, sizeof( xExpectedTaskCounters ) ) ) { printf( "%s, %d: memcmp error\n", __FILE__, __LINE__ ); xHealthStatus = pdFAIL; } /* The medium priority and second high priority tasks are still suspended. Make sure to resume them before starting again. */ vTaskResume( xCreatedTasks[ evtMEDIUM_PRIORITY_INDEX ] ); vTaskResume( xCreatedTasks[ evtHIGHEST_PRIORITY_INDEX_2 ] ); /* Just keep incrementing to show the task is still executing. */ xCheckVariable++; } }
/* * Lock routine called by Newlib on malloc / realloc / free entry to guarantee a * safe section as memory allocation management uses global data. * See the aforementioned details. */ void __malloc_lock(struct _reent *ptr) { vTaskSuspendAll(); }
void vParTestSetLED( unsigned portBASE_TYPE uxLED, portBASE_TYPE xValue ) { portBASE_TYPE xError = pdFALSE; vTaskSuspendAll(); { if( xValue == pdFALSE ) { switch( uxLED ) { case 0 : P3 |= partstOUTPUT_0; break; case 1 : P3 |= partstOUTPUT_1; break; case 2 : P3 |= partstOUTPUT_2; break; case 3 : P3 |= partstOUTPUT_3; break; case 4 : P3 |= partstOUTPUT_4; break; case 5 : P3 |= partstOUTPUT_5; break; case 6 : P3 |= partstOUTPUT_6; break; case 7 : P3 |= partstOUTPUT_7; break; default : /* There are no other LED's wired in. */ xError = pdTRUE; break; } } else { switch( uxLED ) { case 0 : P3 &= ~partstOUTPUT_0; break; case 1 : P3 &= ~partstOUTPUT_1; break; case 2 : P3 &= ~partstOUTPUT_2; break; case 3 : P3 &= ~partstOUTPUT_3; break; case 4 : P3 &= ~partstOUTPUT_4; break; case 5 : P3 &= ~partstOUTPUT_5; break; case 6 : P3 &= ~partstOUTPUT_6; break; case 7 : P3 &= ~partstOUTPUT_7; break; default : /* There are no other LED's wired in. */ break; } } } xTaskResumeAll(); }
static void prvChangePriorityWhenSuspendedTask( void *pvParameters ) { const char * const pcTaskStartMsg = "Priority change when suspended task started.\r\n"; const char * const pcTaskFailMsg = "Priority change when suspended task failed.\r\n"; /* Just to stop warning messages. */ ( void ) pvParameters; /* Queue a message for printing to say the task has started. */ vPrintDisplayMessage( &pcTaskStartMsg ); for( ;; ) { /* Start with the counter at 0 so we know what the counter should be when we check it next. */ ulPrioritySetCounter = ( unsigned long ) 0; /* Resume the helper task. At this time it has a priority lower than ours so no context switch should occur. */ vTaskResume( xChangePriorityWhenSuspendedHandle ); /* Check to ensure the task just resumed has not executed. */ portENTER_CRITICAL(); { if( ulPrioritySetCounter != ( unsigned long ) 0 ) { xPriorityRaiseWhenSuspendedError = pdTRUE; vPrintDisplayMessage( &pcTaskFailMsg ); } } portEXIT_CRITICAL(); /* Now try raising the priority while the scheduler is suspended. */ vTaskSuspendAll(); { vTaskPrioritySet( xChangePriorityWhenSuspendedHandle, ( configMAX_PRIORITIES - 1 ) ); /* Again, even though the helper task has a priority greater than ours, it should not have executed yet because the scheduler is suspended. */ portENTER_CRITICAL(); { if( ulPrioritySetCounter != ( unsigned long ) 0 ) { xPriorityRaiseWhenSuspendedError = pdTRUE; vPrintDisplayMessage( &pcTaskFailMsg ); } } portEXIT_CRITICAL(); } xTaskResumeAll(); /* Now the scheduler has been resumed the helper task should immediately preempt us and execute. When it executes it will increment the ulPrioritySetCounter exactly once before suspending itself. We should now always find the counter set to 1. */ portENTER_CRITICAL(); { if( ulPrioritySetCounter != ( unsigned long ) 1 ) { xPriorityRaiseWhenSuspendedError = pdTRUE; vPrintDisplayMessage( &pcTaskFailMsg ); } } portEXIT_CRITICAL(); /* Delay until we try this again. */ vTaskDelay( priSLEEP_TIME * 2 ); /* Set the priority of the helper task back ready for the next execution of this task. */ vTaskSuspendAll(); vTaskPrioritySet( xChangePriorityWhenSuspendedHandle, tskIDLE_PRIORITY ); xTaskResumeAll(); } }
signed portBASE_TYPE xQueueSend( xQueueHandle pxQueue, const void *pvItemToQueue, portTickType xTicksToWait ) { signed portBASE_TYPE xReturn; /* Make sure other tasks do not access the queue. */ vTaskSuspendAll(); /* It is important that this is the only thread/ISR that modifies the ready or delayed lists until xTaskResumeAll() is called. Places where the ready/delayed lists are modified include: + vTaskDelay() - Nothing can call vTaskDelay as the scheduler is suspended, vTaskDelay() cannot be called from an ISR. + vTaskPrioritySet() - Has a critical section around the access. + vTaskSwitchContext() - This will not get executed while the scheduler is suspended. + prvCheckDelayedTasks() - This will not get executed while the scheduler is suspended. + xTaskCreate() - Has a critical section around the access. + vTaskResume() - Has a critical section around the access. + xTaskResumeAll() - Has a critical section around the access. + xTaskRemoveFromEventList - Checks to see if the scheduler is suspended. If so then the TCB being removed from the event is removed from the event and added to the xPendingReadyList. */ /* Make sure interrupts do not access the queue event list. */ prvLockQueue( pxQueue ); /* It is important that interrupts to not access the event list of the queue being modified here. Places where the event list is modified include: + xQueueSendFromISR(). This checks the lock on the queue to see if it has access. If the queue is locked then the Tx lock count is incremented to signify that a task waiting for data can be made ready once the queue lock is removed. If the queue is not locked then a task can be moved from the event list, but will not be removed from the delayed list or placed in the ready list until the scheduler is unlocked. + xQueueReceiveFromISR(). As per xQueueSendFromISR(). */ /* If the queue is already full we may have to block. */ if( prvIsQueueFull( pxQueue ) ) { /* The queue is full - do we want to block or just leave without posting? */ if( xTicksToWait > ( portTickType ) 0 ) { /* We are going to place ourselves on the xTasksWaitingToSend event list, and will get woken should the delay expire, or space become available on the queue. As detailed above we do not require mutual exclusion on the event list as nothing else can modify it or the ready lists while we have the scheduler suspended and queue locked. It is possible that an ISR has removed data from the queue since we checked if any was available. If this is the case then the data will have been copied from the queue, and the queue variables updated, but the event list will not yet have been checked to see if anything is waiting as the queue is locked. */ vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait ); /* Force a context switch now as we are blocked. We can do this from within a critical section as the task we are switching to has its own context. When we return here (i.e. we unblock) we will leave the critical section as normal. It is possible that an ISR has caused an event on an unrelated and unlocked queue. If this was the case then the event list for that queue will have been updated but the ready lists left unchanged - instead the readied task will have been added to the pending ready list. */ taskENTER_CRITICAL(); { /* We can safely unlock the queue and scheduler here as interrupts are disabled. We must not yield with anything locked, but we can yield from within a critical section. Tasks that have been placed on the pending ready list cannot be tasks that are waiting for events on this queue. See in comment xTaskRemoveFromEventList(). */ prvUnlockQueue( pxQueue ); /* Resuming the scheduler may cause a yield. If so then there is no point yielding again here. */ if( !xTaskResumeAll() ) { taskYIELD(); } /* Before leaving the critical section we have to ensure exclusive access again. */ vTaskSuspendAll(); prvLockQueue( pxQueue ); } taskEXIT_CRITICAL(); } } /* When we are here it is possible that we unblocked as space became available on the queue. It is also possible that an ISR posted to the queue since we left the critical section, so it may be that again there is no space. This would only happen if a task and ISR post onto the same queue. */ taskENTER_CRITICAL(); { if( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) { /* There is room in the queue, copy the data into the queue. */ prvCopyQueueData( pxQueue, pvItemToQueue ); xReturn = pdPASS; /* Update the TxLock count so prvUnlockQueue knows to check for tasks waiting for data to become available in the queue. */ ++( pxQueue->xTxLock ); } else { xReturn = errQUEUE_FULL; } } taskEXIT_CRITICAL(); /* We no longer require exclusive access to the queue. prvUnlockQueue will remove any tasks suspended on a receive if either this function or an ISR has posted onto the queue. */ if( prvUnlockQueue( pxQueue ) ) { /* Resume the scheduler - making ready any tasks that were woken by an event while the scheduler was locked. Resuming the scheduler may cause a yield, in which case there is no point yielding again here. */ if( !xTaskResumeAll() ) { taskYIELD(); } } else { /* Resume the scheduler - making ready any tasks that were woken by an event while the scheduler was locked. */ xTaskResumeAll(); } return xReturn; }