Example #1
0
static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
{
	struct zone *zone;
	unsigned long flags, nr_pages;
	bool isolated_page = false;
	unsigned int order;
	unsigned long pfn, buddy_pfn;
	struct page *buddy;

	zone = page_zone(page);
	spin_lock_irqsave(&zone->lock, flags);
	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
		goto out;

	/*
	 * Because freepage with more than pageblock_order on isolated
	 * pageblock is restricted to merge due to freepage counting problem,
	 * it is possible that there is free buddy page.
	 * move_freepages_block() doesn't care of merge so we need other
	 * approach in order to merge them. Isolation and free will make
	 * these pages to be merged.
	 */
	if (PageBuddy(page)) {
		order = page_order(page);
		if (order >= pageblock_order) {
			pfn = page_to_pfn(page);
			buddy_pfn = __find_buddy_pfn(pfn, order);
			buddy = page + (buddy_pfn - pfn);

			if (pfn_valid_within(buddy_pfn) &&
			    !is_migrate_isolate_page(buddy)) {
				__isolate_free_page(page, order);
				isolated_page = true;
			}
		}
	}

	/*
	 * If we isolate freepage with more than pageblock_order, there
	 * should be no freepage in the range, so we could avoid costly
	 * pageblock scanning for freepage moving.
	 */
	if (!isolated_page) {
		nr_pages = move_freepages_block(zone, page, migratetype);
		__mod_zone_freepage_state(zone, nr_pages, migratetype);
	}
	set_pageblock_migratetype(page, migratetype);
	zone->nr_isolate_pageblock--;
out:
	spin_unlock_irqrestore(&zone->lock, flags);
	if (isolated_page) {
		post_alloc_hook(page, order, __GFP_MOVABLE);
		__free_pages(page, order);
	}
}
Example #2
0
void *pvPortMalloc( size_t xWantedSize )
{
	xBlockLink *pxBlock = NULL, *pxPreviousBlock, *pxNewBlockLink;
	void *pvReturn = NULL;

	if(!xWantedSize)
		return  NULL;

	pre_alloc_hook( xWantedSize );

	vTaskSuspendAll();
	{
		/* If this is the first call to malloc then the heap will require
		initialisation to setup the list of free blocks. */
		if( xHeapHasBeenInitialised == pdFALSE )
		{
			prvHeapInit();
			xHeapHasBeenInitialised = pdTRUE;
		}

		/* The wanted size is increased so it can contain a xBlockLink
		structure in addition to the requested amount of bytes. */
		if( xWantedSize > 0 )
		{
			xWantedSize += heapSTRUCT_SIZE;

			/* Ensure that blocks are always aligned to the required number of bytes. */
			if( xWantedSize & portBYTE_ALIGNMENT_MASK )
			{
				/* Byte alignment required. */
				xWantedSize += ( portBYTE_ALIGNMENT -
						 ( xWantedSize & portBYTE_ALIGNMENT_MASK ) );
			}
		}

		if( ( xWantedSize > 0 ) && ( xWantedSize < configTOTAL_HEAP_SIZE ) )
		{
			/* Blocks are stored in byte order - traverse the list from the start
			(smallest) block until one of adequate size is found. */
			pxPreviousBlock = &xStart;
			pxBlock = xStart.pxNextFreeBlock;
			while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock ) )
			{
				pxPreviousBlock = pxBlock;
				pxBlock = pxBlock->pxNextFreeBlock;
			}

			/* If we found the end marker then a block of adequate size was not found. */
			if( pxBlock != &xEnd )
			{
				/* Return the memory space - jumping over the xBlockLink structure
				at its start. */
				pvReturn = ( void * ) ( ( ( unsigned char * ) pxPreviousBlock->pxNextFreeBlock )
							+ heapSTRUCT_SIZE );

#ifdef FREERTOS_ENABLE_MALLOC_STATS
				hI.totalAllocations++;
#endif // FREERTOS_ENABLE_MALLOC_STATS


				/* This block is being returned for use so must be taken off the
				list of free blocks. */
				pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
				pxBlock->pxNextFreeBlock = NULL;

				/* If the block is larger than required it can be split into two. */
				if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE )
				{
					/* This block is to be split into two.  Create a new block
					following the number of bytes requested. The void cast is
					used to prevent byte alignment warnings from the compiler. */
					pxNewBlockLink = ( void * ) ( ( ( unsigned char * ) pxBlock ) + xWantedSize );

					/* Calculate the sizes of two blocks split from the single
					block. */
					pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
					/* Assume bit 0 is 0 i.e. BLOCK_ALLOCATED flag is clear */
					pxBlock->xBlockSize = xWantedSize; 

					/* Add the new block to the serial list */
					pxNewBlockLink->pxPrev = pxBlock;
					if( ! IS_LAST_BLOCK(pxNewBlockLink) )
						NEXT_BLOCK( pxNewBlockLink )->pxPrev = 
							pxNewBlockLink;

					SET_ALLOCATED(pxBlock);

					/* insert the new block into the list of free blocks. */
					prvInsertBlockIntoFreeList( pxNewBlockLink );
				}
				else {
					SET_ALLOCATED(pxBlock);
				}
				xFreeBytesRemaining -= BLOCK_SIZE(pxBlock);
			}
		}
	}
	xTaskResumeAll();

#if( configUSE_MALLOC_FAILED_HOOK == 1 )
	{
		if( pvReturn == NULL )
		{
			DTRACE("Heap allocation failed.\n\r"
				       "Requested: %d\n\r"
				       "Available : %d\n\r", xWantedSize, xFreeBytesRemaining);
			extern void vApplicationMallocFailedHook( void );
			vApplicationMallocFailedHook();
		}
	}
#else
	if( pvReturn == NULL ) {
		DTRACE("Heap allocation failed.\n\r"
		      "Requested: %d\n\r"
		      "Available : %d\n\r", xWantedSize, xFreeBytesRemaining);
#ifdef FREERTOS_ENABLE_MALLOC_STATS
		hI.failedAllocations++;
#endif /* FREERTOS_ENABLE_MALLOC_STATS */
	}
#endif

	if(pvReturn) {
		SET_ACTUAL_SIZE( pxBlock );
		SET_CALLER_ADDR( pxBlock );
		ATRACE("MDC A %10x %6d %10d R: %x\r\n", pvReturn ,
		       BLOCK_SIZE( pxBlock ),
		       xFreeBytesRemaining, __builtin_return_address(0));
		randomizeAreaData((unsigned char*)pvReturn, 
				  BLOCK_SIZE( pxBlock ) - heapSTRUCT_SIZE);
		post_alloc_hook( pvReturn );

#ifdef FREERTOS_ENABLE_MALLOC_STATS
		if ((configTOTAL_HEAP_SIZE - xFreeBytesRemaining) > hI.peakHeapUsage) {
			hI.peakHeapUsage =
				(configTOTAL_HEAP_SIZE - xFreeBytesRemaining);
		}
#endif
	}
	
	return pvReturn;
}