void * pvPortMalloc( size_t xWantedSize ){
	vTaskSuspendAll();
	void * res = tlsf_malloc(xWantedSize);
	xTaskResumeAll();
	return res;
}
static void vMemCheckTask( void *pvParameters )
{
unsigned long *pulMemCheckTaskRunningCounter;
void *pvMem1, *pvMem2, *pvMem3;
static long lErrorOccurred = pdFALSE;

	/* This task is dynamically created then deleted during each cycle of the
	vErrorChecks task to check the operation of the memory allocator.  Each time
	the task is created memory is allocated for the stack and TCB.  Each time
	the task is deleted this memory is returned to the heap.  This task itself
	exercises the allocator by allocating and freeing blocks.

	The task executes at the idle priority so does not require a delay.

	pulMemCheckTaskRunningCounter is incremented each cycle to indicate to the
	vErrorChecks() task that this task is still executing without error. */

	pulMemCheckTaskRunningCounter = ( unsigned long * ) pvParameters;

	for( ;; )
	{
		if( lErrorOccurred == pdFALSE )
		{
			/* We have never seen an error so increment the counter. */
			( *pulMemCheckTaskRunningCounter )++;
		}
		else
		{
			/* Reset the count so an error is detected by the
			prvCheckOtherTasksAreStillRunning() function. */
			*pulMemCheckTaskRunningCounter = mainCOUNT_INITIAL_VALUE;
		}

		/* Allocate some memory - just to give the allocator some extra
		exercise.  This has to be in a critical section to ensure the
		task does not get deleted while it has memory allocated. */
		vTaskSuspendAll();
		{
			pvMem1 = pvPortMalloc( mainMEM_CHECK_SIZE_1 );
			if( pvMem1 == NULL )
			{
				lErrorOccurred = pdTRUE;
			}
			else
			{
				memset( pvMem1, 0xaa, mainMEM_CHECK_SIZE_1 );
				vPortFree( pvMem1 );
			}
		}
		xTaskResumeAll();

		/* Again - with a different size block. */
		vTaskSuspendAll();
		{
			pvMem2 = pvPortMalloc( mainMEM_CHECK_SIZE_2 );
			if( pvMem2 == NULL )
			{
				lErrorOccurred = pdTRUE;
			}
			else
			{
				memset( pvMem2, 0xaa, mainMEM_CHECK_SIZE_2 );
				vPortFree( pvMem2 );
			}
		}
		xTaskResumeAll();

		/* Again - with a different size block. */
		vTaskSuspendAll();
		{
			pvMem3 = pvPortMalloc( mainMEM_CHECK_SIZE_3 );
			if( pvMem3 == NULL )
			{
				lErrorOccurred = pdTRUE;
			}
			else
			{
				memset( pvMem3, 0xaa, mainMEM_CHECK_SIZE_3 );
				vPortFree( pvMem3 );
			}
		}
		xTaskResumeAll();
	}
}
Example #3
0
/*
 * Unlock routine called by Newlib on malloc / realloc / free exit to guarantee
 * a safe section as memory allocation management uses global data.
 * See the aforementioned details.
 */
void __malloc_unlock(struct _reent *ptr)
{
	xTaskResumeAll();
}
Example #4
0
signed portBASE_TYPE xQueueSend( xQueueHandle pxQueue, const void *pvItemToQueue, portTickType xTicksToWait )
{
signed portBASE_TYPE xReturn = pdPASS;
xTimeOutType xTimeOut;

	/* Make sure other tasks do not access the queue. */
	vTaskSuspendAll();

	/* Capture the current time status for future reference. */
	vTaskSetTimeOutState( &xTimeOut );

	/* It is important that this is the only thread/ISR that modifies the
	ready or delayed lists until xTaskResumeAll() is called.  Places where
	the ready/delayed lists are modified include:

		+ vTaskDelay() -  Nothing can call vTaskDelay as the scheduler is
		  suspended, vTaskDelay() cannot be called from an ISR.
		+ vTaskPrioritySet() - Has a critical section around the access.
		+ vTaskSwitchContext() - This will not get executed while the scheduler
		  is suspended.
		+ prvCheckDelayedTasks() - This will not get executed while the
		  scheduler is suspended.
		+ xTaskCreate() - Has a critical section around the access.
		+ vTaskResume() - Has a critical section around the access.
		+ xTaskResumeAll() - Has a critical section around the access.
		+ xTaskRemoveFromEventList - Checks to see if the scheduler is
		  suspended.  If so then the TCB being removed from the event is
		  removed from the event and added to the xPendingReadyList.
	*/

	/* Make sure interrupts do not access the queue event list. */
	prvLockQueue( pxQueue );

	/* It is important that interrupts to not access the event list of the
	queue being modified here.  Places where the event list is modified
	include:

		+ xQueueSendFromISR().  This checks the lock on the queue to see if
		  it has access.  If the queue is locked then the Tx lock count is
		  incremented to signify that a task waiting for data can be made ready
		  once the queue lock is removed.  If the queue is not locked then
		  a task can be moved from the event list, but will not be removed
		  from the delayed list or placed in the ready list until the scheduler
		  is unlocked.

		+ xQueueReceiveFromISR().  As per xQueueSendFromISR().
	*/
		
	/* If the queue is already full we may have to block. */
	do
	{
		if( prvIsQueueFull( pxQueue ) )
		{
			/* The queue is full - do we want to block or just leave without
			posting? */
			if( xTicksToWait > ( portTickType ) 0 )
			{
				/* We are going to place ourselves on the xTasksWaitingToSend event
				list, and will get woken should the delay expire, or space become
				available on the queue.
				
				As detailed above we do not require mutual exclusion on the event
				list as nothing else can modify it or the ready lists while we
				have the scheduler suspended and queue locked.
				
				It is possible that an ISR has removed data from the queue since we
				checked if any was available.  If this is the case then the data
				will have been copied from the queue, and the queue variables
				updated, but the event list will not yet have been checked to see if
				anything is waiting as the queue is locked. */
				vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
	
				/* Force a context switch now as we are blocked.  We can do
				this from within a critical section as the task we are
				switching to has its own context.  When we return here (i.e. we
				unblock) we will leave the critical section as normal.
				
				It is possible that an ISR has caused an event on an unrelated and
				unlocked queue.  If this was the case then the event list for that
				queue will have been updated but the ready lists left unchanged -
				instead the readied task will have been added to the pending ready
				list. */
				taskENTER_CRITICAL();
				{
					/* We can safely unlock the queue and scheduler here as
					interrupts are disabled.  We must not yield with anything
					locked, but we can yield from within a critical section.
					
					Tasks that have been placed on the pending ready list cannot
					be tasks that are waiting for events on this queue.  See
					in comment xTaskRemoveFromEventList(). */
					prvUnlockQueue( pxQueue );
	
					/* Resuming the scheduler may cause a yield.  If so then there
					is no point yielding again here. */
					if( !xTaskResumeAll() )
					{
						taskYIELD();
					}

					/* We want to check to see if the queue is still full
					before leaving the critical section.  This is to prevent
					this task placing an item into the queue due to an
					interrupt making space on the queue between critical
					sections (when there might be a higher priority task
					blocked on the queue that cannot run yet because the
					scheduler gets suspended). */
					if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
					{
						/* We unblocked but there is no space in the queue,
						we probably timed out. */
						xReturn = errQUEUE_FULL;
					}
	
					/* Before leaving the critical section we have to ensure
					exclusive access again. */
					vTaskSuspendAll();
					prvLockQueue( pxQueue );				
				}
				taskEXIT_CRITICAL();
			}
		}
			
		/* If xReturn is errQUEUE_FULL then we unblocked when the queue
		was still full.  Don't check it again now as it is possible that
		an interrupt has removed an item from the queue since we left the
		critical section and we don't want to write to the queue in case
		there is a task of higher priority blocked waiting for space to
		be available on the queue.  If this is the case the higher priority
		task will execute when the scheduler is unsupended. */
		if( xReturn != errQUEUE_FULL )
		{
			/* When we are here it is possible that we unblocked as space became
			available on the queue.  It is also possible that an ISR posted to the
			queue since we left the critical section, so it may be that again there
			is no space.  This would only happen if a task and ISR post onto the
			same queue. */
			taskENTER_CRITICAL();
			{
				if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
				{
					/* There is room in the queue, copy the data into the queue. */			
					prvCopyQueueData( pxQueue, pvItemToQueue );		
					xReturn = pdPASS;
		
					/* Update the TxLock count so prvUnlockQueue knows to check for
					tasks waiting for data to become available in the queue. */
					++( pxQueue->xTxLock );
				}
				else
				{
					xReturn = errQUEUE_FULL;
				}
			}
			taskEXIT_CRITICAL();
		}

		if( xReturn == errQUEUE_FULL )
		{
			if( xTicksToWait > 0 )
			{
				if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
				{
					xReturn = queueERRONEOUS_UNBLOCK;
				}
			}
		}
	}
	while( xReturn == queueERRONEOUS_UNBLOCK );

	prvUnlockQueue( pxQueue );
	xTaskResumeAll();

	return xReturn;
}
Example #5
0
signed portBASE_TYPE xQueueReceive( xQueueHandle pxQueue, void *pvBuffer, portTickType xTicksToWait )
{
signed portBASE_TYPE xReturn = pdTRUE;
xTimeOutType xTimeOut;

	/* This function is very similar to xQueueSend().  See comments within
	xQueueSend() for a more detailed explanation.

	Make sure other tasks do not access the queue. */
	vTaskSuspendAll();

	/* Capture the current time status for future reference. */
	vTaskSetTimeOutState( &xTimeOut );

	/* Make sure interrupts do not access the queue. */
	prvLockQueue( pxQueue );

	do
	{
		/* If there are no messages in the queue we may have to block. */
		if( prvIsQueueEmpty( pxQueue ) )
		{
			/* There are no messages in the queue, do we want to block or just
			leave with nothing? */			
			if( xTicksToWait > ( portTickType ) 0 )
			{
				vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
				taskENTER_CRITICAL();
				{
					prvUnlockQueue( pxQueue );
					if( !xTaskResumeAll() )
					{
						taskYIELD();
					}

					if( pxQueue->uxMessagesWaiting == ( unsigned portBASE_TYPE ) 0 )
					{
						/* We unblocked but the queue is empty.  We probably
						timed out. */
						xReturn = errQUEUE_EMPTY;
					}
	
					vTaskSuspendAll();
					prvLockQueue( pxQueue );
				}
				taskEXIT_CRITICAL();
			}
		}
	
		if( xReturn != errQUEUE_EMPTY )
		{
			taskENTER_CRITICAL();
			{
				if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
				{
					pxQueue->pcReadFrom += pxQueue->uxItemSize;
					if( pxQueue->pcReadFrom >= pxQueue->pcTail )
					{
						pxQueue->pcReadFrom = pxQueue->pcHead;
					}
					--( pxQueue->uxMessagesWaiting );
					memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
		
					/* Increment the lock count so prvUnlockQueue knows to check for
					tasks waiting for space to become available on the queue. */
					++( pxQueue->xRxLock );
					xReturn = pdPASS;
				}
				else
				{
					xReturn = errQUEUE_EMPTY;
				}
			}
			taskEXIT_CRITICAL();
		}

		if( xReturn == errQUEUE_EMPTY )
		{
			if( xTicksToWait > 0 )
			{
				if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
				{
					xReturn = queueERRONEOUS_UNBLOCK;
				}
			}
		}
	} while( xReturn == queueERRONEOUS_UNBLOCK );

	/* We no longer require exclusive access to the queue. */
	prvUnlockQueue( pxQueue );
	xTaskResumeAll();

	return xReturn;
}
Example #6
0
void spi_flash_op_unlock()
{
    xTaskResumeAll();
}
signed portBASE_TYPE xQueueGenericReceive( xQueueHandle pxQueue, void * const pvBuffer, portTickType xTicksToWait, portBASE_TYPE xJustPeeking )
{
signed portBASE_TYPE xReturn = pdTRUE;
xTimeOutType xTimeOut;
signed portCHAR *pcOriginalReadPosition;

	do
	{
		/* If there are no messages in the queue we may have to block. */
		if( xTicksToWait > ( portTickType ) 0 )
		{
			vTaskSuspendAll();
			prvLockQueue( pxQueue );

			if( xReturn == pdTRUE )
			{
				/* This is the first time through - we need to capture the
				time while the scheduler is locked to ensure we attempt to
				block at least once. */
				vTaskSetTimeOutState( &xTimeOut );
			}

			if( prvIsQueueEmpty( pxQueue ) )
			{
	    		/* Need to call xTaskCheckForTimeout again as time could
	    		have passed since it was last called if this is not the
	    		first time around this loop. */
				if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
				{
					traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );

					#if ( configUSE_MUTEXES == 1 )
					{
						if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
						{
							portENTER_CRITICAL();
								vTaskPriorityInherit( ( void * ) pxQueue->pxMutexHolder );
							portEXIT_CRITICAL();
						}
					}
					#endif

					vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
					prvUnlockQueue( pxQueue );
					if( !xTaskResumeAll() )
					{
						taskYIELD();
					}
				}
				else
				{
					prvUnlockQueue( pxQueue );
					( void ) xTaskResumeAll();
				}
			}
			else
			{
				prvUnlockQueue( pxQueue );
				( void ) xTaskResumeAll();
			}
		}

		taskENTER_CRITICAL();
		{
			if( pxQueue->uxMessagesWaiting > ( unsigned portBASE_TYPE ) 0 )
			{
				/* Remember our read position in case we are just peeking. */
				pcOriginalReadPosition = pxQueue->pcReadFrom;

				prvCopyDataFromQueue( pxQueue, pvBuffer );

				if( xJustPeeking == pdFALSE )
				{
					traceQUEUE_RECEIVE( pxQueue );

					/* We are actually removing data. */
					--( pxQueue->uxMessagesWaiting );

					#if ( configUSE_MUTEXES == 1 )
					{
						if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
						{
							/* Record the information required to implement
							priority inheritance should it become necessary. */
							pxQueue->pxMutexHolder = xTaskGetCurrentTaskHandle();
						}
					}
					#endif

					if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
					{
						if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) == pdTRUE )
						{
							taskYIELD();
						}
					}
				}
				else
				{
					traceQUEUE_PEEK( pxQueue );

					/* We are not removing the data, so reset our read
					pointer. */
					pxQueue->pcReadFrom = pcOriginalReadPosition;

					/* The data is being left in the queue, so see if there are
					any other tasks waiting for the data. */
					if( !listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) )
					{
						/* Tasks that are removed from the event list will get added to
						the pending ready list as the scheduler is still suspended. */
						if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
						{
							/* The task waiting has a higher priority than this task. */
							taskYIELD();
						}
					}

				}

				xReturn = pdPASS;
			}
			else
			{
				xReturn = errQUEUE_EMPTY;
			}
		}
		taskEXIT_CRITICAL();

		if( xReturn == errQUEUE_EMPTY )
		{
			if( xTicksToWait > ( portTickType ) 0 )
			{
				if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
				{
					xReturn = queueERRONEOUS_UNBLOCK;
				}
				else
				{
					traceQUEUE_RECEIVE_FAILED( pxQueue );
				}
			}
			else
			{
				traceQUEUE_RECEIVE_FAILED( pxQueue );
			}
		}

	} while( xReturn == queueERRONEOUS_UNBLOCK );

	return xReturn;
}
signed portBASE_TYPE xQueueGenericSend( xQueueHandle pxQueue, const void * const pvItemToQueue, portTickType xTicksToWait, portBASE_TYPE xCopyPosition )
{
signed portBASE_TYPE xReturn = pdTRUE;
xTimeOutType xTimeOut;

	do
	{
    	/* If xTicksToWait is zero then we are not going to block even
    	if there is no room in the queue to post. */
		if( xTicksToWait > ( portTickType ) 0 )
		{
			vTaskSuspendAll();
			prvLockQueue( pxQueue );

			if( xReturn == pdTRUE )
			{
				/* This is the first time through - we need to capture the
				time while the scheduler is locked to ensure we attempt to
				block at least once. */
				vTaskSetTimeOutState( &xTimeOut );
			}

			if( prvIsQueueFull( pxQueue ) )
			{
	    		/* Need to call xTaskCheckForTimeout again as time could
	    		have passed since it was last called if this is not the
	    		first time around this loop.  */
				if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
				{
					traceBLOCKING_ON_QUEUE_SEND( pxQueue );
					vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );

					/* Unlocking the queue means queue events can effect the
					event list.  It is possible	that interrupts occurring now
					remove this task from the event	list again - but as the
					scheduler is suspended the task will go onto the pending
					ready last instead of the actual ready list. */
					prvUnlockQueue( pxQueue );

					/* Resuming the scheduler will move tasks from the pending
					ready list into the ready list - so it is feasible that this
					task is already in a ready list before it yields - in which
					case the yield will not cause a context switch unless there
					is also a higher priority task in the pending ready list. */
					if( !xTaskResumeAll() )
					{
						taskYIELD();
					}
				}
				else
				{
					prvUnlockQueue( pxQueue );
					( void ) xTaskResumeAll();
				}
			}
			else
			{
    			/* The queue was not full so we can just unlock the
    			scheduler and queue again before carrying on. */
				prvUnlockQueue( pxQueue );
				( void ) xTaskResumeAll();
			}
		}

  		/* Higher priority tasks and interrupts can execute during
  		this time and could possible refill the queue - even if we
  		unblocked because space became available. */

		taskENTER_CRITICAL();
		{
  			/* Is there room on the queue now?  To be running we must be
  			the highest priority task wanting to access the queue. */
			if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
			{
				traceQUEUE_SEND( pxQueue );
				prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
				xReturn = pdPASS;

				/* If there was a task waiting for data to arrive on the
				queue then unblock it now. */
				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
				{
					if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) == pdTRUE )
					{
					    /* The unblocked task has a priority higher than
					    our own so yield immediately. */
					    taskYIELD();
					}
				}
			}
			else
			{
  				/* Setting xReturn to errQUEUE_FULL will force its timeout
  				to be re-evaluated.  This is necessary in case interrupts
  				and higher priority tasks accessed the queue between this
  				task being unblocked and subsequently attempting to write
  				to the queue. */
				xReturn = errQUEUE_FULL;
			}
		}
		taskEXIT_CRITICAL();

		if( xReturn == errQUEUE_FULL )
		{
			if( xTicksToWait > ( portTickType ) 0 )
			{
				if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
				{
					xReturn = queueERRONEOUS_UNBLOCK;
				}
				else
				{
					traceQUEUE_SEND_FAILED( pxQueue );
				}
			}
			else
			{
				traceQUEUE_SEND_FAILED( pxQueue );
			}
		}
	}
	while( xReturn == queueERRONEOUS_UNBLOCK );

	return xReturn;
}
Example #9
0
void *pvPortMalloc( size_t xWantedSize )
{
BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink;
static BaseType_t xHeapHasBeenInitialised = pdFALSE;
void *pvReturn = NULL;

	vTaskSuspendAll();
	{
		/* If this is the first call to malloc then the heap will require
		initialisation to setup the list of free blocks. */
		if( xHeapHasBeenInitialised == pdFALSE )
		{
			prvHeapInit();
			xHeapHasBeenInitialised = pdTRUE;
		}

		/* The wanted size is increased so it can contain a BlockLink_t
		structure in addition to the requested amount of bytes. */
		if( xWantedSize > 0 )
		{
			xWantedSize += heapSTRUCT_SIZE;

			/* Ensure that blocks are always aligned to the required number of bytes. */
			if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0 )
			{
				/* Byte alignment required. */
				xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
			}
		}

		if( ( xWantedSize > 0 ) && ( xWantedSize < configADJUSTED_HEAP_SIZE ) )
		{
			/* Blocks are stored in byte order - traverse the list from the start
			(smallest) block until one of adequate size is found. */
			pxPreviousBlock = &xStart;
			pxBlock = xStart.pxNextFreeBlock;
			while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
			{
				pxPreviousBlock = pxBlock;
				pxBlock = pxBlock->pxNextFreeBlock;
			}

			/* If we found the end marker then a block of adequate size was not found. */
			if( pxBlock != &xEnd )
			{
				/* Return the memory space - jumping over the BlockLink_t structure
				at its start. */
				pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE );

				/* This block is being returned for use so must be taken out of the
				list of free blocks. */
				pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;

				/* If the block is larger than required it can be split into two. */
				if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
				{
					/* This block is to be split into two.  Create a new block
					following the number of bytes requested. The void cast is
					used to prevent byte alignment warnings from the compiler. */
					pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );

					/* Calculate the sizes of two blocks split from the single
					block. */
					pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
					pxBlock->xBlockSize = xWantedSize;

					/* Insert the new block into the list of free blocks. */
					prvInsertBlockIntoFreeList( ( pxNewBlockLink ) );
				}

				xFreeBytesRemaining -= pxBlock->xBlockSize;
			}
		}

		traceMALLOC( pvReturn, xWantedSize );
	}
	( void ) xTaskResumeAll();

	#if( configUSE_MALLOC_FAILED_HOOK == 1 )
	{
		if( pvReturn == NULL )
		{
			extern void vApplicationMallocFailedHook( void );
			vApplicationMallocFailedHook();
		}
	}
	#endif

	return pvReturn;
}
Example #10
0
void *pvPortMalloc( size_t xWantedSize )
{
BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink;
void *pvReturn = NULL;

	vTaskSuspendAll();
	{
		/* If this is the first call to malloc then the heap will require
		initialisation to setup the list of free blocks. */
		if( pxEnd == NULL )
		{
			prvHeapInit();
		}
		else
		{
			mtCOVERAGE_TEST_MARKER();
		}

		/* Check the requested block size is not so large that the top bit is
		set.  The top bit of the block size member of the BlockLink_t structure
		is used to determine who owns the block - the application or the
		kernel, so it must be free. */
		if( ( xWantedSize & xBlockAllocatedBit ) == 0 )
		{
			/* The wanted size is increased so it can contain a BlockLink_t
			structure in addition to the requested amount of bytes. */
			if( xWantedSize > 0 )
			{
				xWantedSize += heapSTRUCT_SIZE;

				/* Ensure that blocks are always aligned to the required number
				of bytes. */
				if( ( xWantedSize & portBYTE_ALIGNMENT_MASK ) != 0x00 )
				{
					/* Byte alignment required. */
					xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
				}
				else
				{
					mtCOVERAGE_TEST_MARKER();
				}
			}
			else
			{
				mtCOVERAGE_TEST_MARKER();
			}

			if( ( xWantedSize > 0 ) && ( xWantedSize <= xFreeBytesRemaining ) )
			{
				/* Traverse the list from the start	(lowest address) block until
				one	of adequate size is found. */
				pxPreviousBlock = &xStart;
				pxBlock = xStart.pxNextFreeBlock;
				while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock != NULL ) )
				{
					pxPreviousBlock = pxBlock;
					pxBlock = pxBlock->pxNextFreeBlock;
				}

				/* If the end marker was reached then a block of adequate size
				was	not found. */
				if( pxBlock != pxEnd )
				{
					/* Return the memory space pointed to - jumping over the
					BlockLink_t structure at its start. */
					pvReturn = ( void * ) ( ( ( uint8_t * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE );

					/* This block is being returned for use so must be taken out
					of the list of free blocks. */
					pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;

					/* If the block is larger than required it can be split into
					two. */
					if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
					{
						/* This block is to be split into two.  Create a new
						block following the number of bytes requested. The void
						cast is used to prevent byte alignment warnings from the
						compiler. */
						pxNewBlockLink = ( void * ) ( ( ( uint8_t * ) pxBlock ) + xWantedSize );

						/* Calculate the sizes of two blocks split from the
						single block. */
						pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
						pxBlock->xBlockSize = xWantedSize;

						/* Insert the new block into the list of free blocks. */
						prvInsertBlockIntoFreeList( ( pxNewBlockLink ) );
					}
					else
					{
						mtCOVERAGE_TEST_MARKER();
					}

					xFreeBytesRemaining -= pxBlock->xBlockSize;

					if( xFreeBytesRemaining < xMinimumEverFreeBytesRemaining )
					{
						xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
					}
					else
					{
						mtCOVERAGE_TEST_MARKER();
					}

					/* The block is being returned - it is allocated and owned
					by the application and has no "next" block. */
					pxBlock->xBlockSize |= xBlockAllocatedBit;
					pxBlock->pxNextFreeBlock = NULL;
				}
				else
				{
					mtCOVERAGE_TEST_MARKER();
				}
			}
			else
			{
				mtCOVERAGE_TEST_MARKER();
			}
		}
		else
		{
			mtCOVERAGE_TEST_MARKER();
		}

		traceMALLOC( pvReturn, xWantedSize );
	}
	xTaskResumeAll();

	#if( configUSE_MALLOC_FAILED_HOOK == 1 )
	{
		if( pvReturn == NULL )
		{
			extern void vApplicationMallocFailedHook( void );
			vApplicationMallocFailedHook();
		}
		else
		{
			mtCOVERAGE_TEST_MARKER();
		}
	}
	#endif

	return pvReturn;
}