int vHeapSelfTest( int trace ) { { // find out total size of heap xBlockLink *pxBlock = ( xBlockLink * )xHeap.ucHeap; int totalSize = 0; if( trace ) { #ifdef DEBUG_HEAP_EXTRA DTRACE( "%s%11s%11s%11s%11s%11s%11s\n\r", "TAG", "Next Free", "Prev Block", "Block Size", "Act. Size", "Caller", "Block Type"); #else /* ! DEBUG_HEAP_EXTRA */ DTRACE( "%s%11s%11s%11s%11s%11s\n\r", "TAG", "My Addr", "Next Free", "Prev Block", "Block Size", "Block Type"); #endif /* DEBUG_HEAP_EXTRA */ DTRACE( "HST%11x%11x%11x%11d%11c\n\r", &xStart, xStart.pxNextFreeBlock, xStart.pxPrev, xStart.xBlockSize, 'X' ); } /* A counter to prevent infinite loop */ int max_sane_blocks = 10000; while( max_sane_blocks-- ) { if( trace ) { #ifdef DEBUG_HEAP_EXTRA DTRACE( "HST%11x%11x%11d%11d%11x ", pxBlock->pxNextFreeBlock, pxBlock->pxPrev, BLOCK_SIZE( pxBlock ), GET_ACTUAL_SIZE( pxBlock ), GET_CALLER_ADDR( pxBlock ) ); #else /* ! DEBUG_HEAP_EXTRA */ DTRACE( "HST%11x%11x%11x%11d ", pxBlock, pxBlock->pxNextFreeBlock, pxBlock->pxPrev, BLOCK_SIZE( pxBlock ) ); #endif /* DEBUG_HEAP_EXTRA */ if (BLOCK_SIZE( pxBlock ) == 0) { DTRACE("Unknown fault: Cannot find next block\n\r"); break; } if( IS_ALLOCATED_BLOCK(pxBlock) ) { DTRACE( "A\n\r" ); } else { DTRACE( "F\n\r" ); } } totalSize += BLOCK_SIZE( pxBlock ); if( IS_LAST_BLOCK( pxBlock ) ) break; pxBlock = NEXT_BLOCK( pxBlock ); } if( totalSize != configTOTAL_HEAP_SIZE ) return 1; } return 0; }
/* * structure_status () shows the status of all the blocks */ void structure_status (char *block_ptr) { block_header_t *b; TLSF_t *ptr_TLSF; int end = 0, end2 = 0; __u32 *ptr_following; ptr_TLSF = (TLSF_t *) block_ptr; if (!ptr_TLSF || ptr_TLSF -> magic_number != MAGIC_NUMBER) { PRINT_MSG ("structure_status() error: TLSF structure is not initialized\n"); PRINT_MSG ("Hint: Execute init_memory_pool() before calling structure_status()"); return; } PRINT_DBG_C ("\nTLSF structure address 0x"); PRINT_DBG_H (ptr_TLSF); PRINT_DBG_C ("\nMax. first level index: "); PRINT_DBG_D (ptr_TLSF -> max_fl_index); PRINT_DBG_C ("\nMax. second level index: "); PRINT_DBG_D (ptr_TLSF -> max_sl_index); PRINT_DBG_C ("\n\nALL BLOCKS\n"); ptr_following = ptr_TLSF -> following_non_cont_bh; while (!end2) { end = 0; b = (block_header_t *) (ptr_following + sizeof (__u32 *)); while (!end) { print_block (b); if (IS_LAST_BLOCK(b)) end = 1; else b = (block_header_t *) (b -> ptr.buffer + TLSF_WORDS2BYTES (GET_BLOCK_SIZE(b))); } if (!(__u32 *) *ptr_following) end2 = 1; else { ptr_following = (__u32 *) *ptr_following; } } }
void *pvPortMalloc( size_t xWantedSize ) { xBlockLink *pxBlock = NULL, *pxPreviousBlock, *pxNewBlockLink; void *pvReturn = NULL; if(!xWantedSize) return NULL; pre_alloc_hook( xWantedSize ); vTaskSuspendAll(); { /* If this is the first call to malloc then the heap will require initialisation to setup the list of free blocks. */ if( xHeapHasBeenInitialised == pdFALSE ) { prvHeapInit(); xHeapHasBeenInitialised = pdTRUE; } /* The wanted size is increased so it can contain a xBlockLink structure in addition to the requested amount of bytes. */ if( xWantedSize > 0 ) { xWantedSize += heapSTRUCT_SIZE; /* Ensure that blocks are always aligned to the required number of bytes. */ if( xWantedSize & portBYTE_ALIGNMENT_MASK ) { /* Byte alignment required. */ xWantedSize += ( portBYTE_ALIGNMENT - ( xWantedSize & portBYTE_ALIGNMENT_MASK ) ); } } if( ( xWantedSize > 0 ) && ( xWantedSize < configTOTAL_HEAP_SIZE ) ) { /* Blocks are stored in byte order - traverse the list from the start (smallest) block until one of adequate size is found. */ pxPreviousBlock = &xStart; pxBlock = xStart.pxNextFreeBlock; while( ( pxBlock->xBlockSize < xWantedSize ) && ( pxBlock->pxNextFreeBlock ) ) { pxPreviousBlock = pxBlock; pxBlock = pxBlock->pxNextFreeBlock; } /* If we found the end marker then a block of adequate size was not found. */ if( pxBlock != &xEnd ) { /* Return the memory space - jumping over the xBlockLink structure at its start. */ pvReturn = ( void * ) ( ( ( unsigned char * ) pxPreviousBlock->pxNextFreeBlock ) + heapSTRUCT_SIZE ); #ifdef FREERTOS_ENABLE_MALLOC_STATS hI.totalAllocations++; #endif // FREERTOS_ENABLE_MALLOC_STATS /* This block is being returned for use so must be taken off the list of free blocks. */ pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock; pxBlock->pxNextFreeBlock = NULL; /* If the block is larger than required it can be split into two. */ if( ( pxBlock->xBlockSize - xWantedSize ) > heapMINIMUM_BLOCK_SIZE ) { /* This block is to be split into two. Create a new block following the number of bytes requested. The void cast is used to prevent byte alignment warnings from the compiler. */ pxNewBlockLink = ( void * ) ( ( ( unsigned char * ) pxBlock ) + xWantedSize ); /* Calculate the sizes of two blocks split from the single block. */ pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize; /* Assume bit 0 is 0 i.e. BLOCK_ALLOCATED flag is clear */ pxBlock->xBlockSize = xWantedSize; /* Add the new block to the serial list */ pxNewBlockLink->pxPrev = pxBlock; if( ! IS_LAST_BLOCK(pxNewBlockLink) ) NEXT_BLOCK( pxNewBlockLink )->pxPrev = pxNewBlockLink; SET_ALLOCATED(pxBlock); /* insert the new block into the list of free blocks. */ prvInsertBlockIntoFreeList( pxNewBlockLink ); } else { SET_ALLOCATED(pxBlock); } xFreeBytesRemaining -= BLOCK_SIZE(pxBlock); } } } xTaskResumeAll(); #if( configUSE_MALLOC_FAILED_HOOK == 1 ) { if( pvReturn == NULL ) { DTRACE("Heap allocation failed.\n\r" "Requested: %d\n\r" "Available : %d\n\r", xWantedSize, xFreeBytesRemaining); extern void vApplicationMallocFailedHook( void ); vApplicationMallocFailedHook(); } } #else if( pvReturn == NULL ) { DTRACE("Heap allocation failed.\n\r" "Requested: %d\n\r" "Available : %d\n\r", xWantedSize, xFreeBytesRemaining); #ifdef FREERTOS_ENABLE_MALLOC_STATS hI.failedAllocations++; #endif /* FREERTOS_ENABLE_MALLOC_STATS */ } #endif if(pvReturn) { SET_ACTUAL_SIZE( pxBlock ); SET_CALLER_ADDR( pxBlock ); ATRACE("MDC A %10x %6d %10d R: %x\r\n", pvReturn , BLOCK_SIZE( pxBlock ), xFreeBytesRemaining, __builtin_return_address(0)); randomizeAreaData((unsigned char*)pvReturn, BLOCK_SIZE( pxBlock ) - heapSTRUCT_SIZE); post_alloc_hook( pvReturn ); #ifdef FREERTOS_ENABLE_MALLOC_STATS if ((configTOTAL_HEAP_SIZE - xFreeBytesRemaining) > hI.peakHeapUsage) { hI.peakHeapUsage = (configTOTAL_HEAP_SIZE - xFreeBytesRemaining); } #endif } return pvReturn; }
/* TBD: Make this function force inline */ static inline void prvInsertBlockIntoFreeList( xBlockLink * pxBlockToInsert ) { xBlockLink *pxIterator; xBlockLink *xBlockToMerge; size_t xBlockSize; ASSERT(IS_FREE_BLOCK(pxBlockToInsert)); /* We have a block which we are about to declare as a free block. * Lets find out if there is a free block in front of us and back * of us */ /* TRACE("pxBlockToInsert->pxNextFreeBlock: %x ->pxPrev = %x\n\r", pxBlockToInsert->pxNextFreeBlock, pxBlockToInsert->pxPrev); */ /* Check for front merge */ if ( !IS_LAST_BLOCK( pxBlockToInsert ) ) { if ( IS_FREE_BLOCK(NEXT_BLOCK( pxBlockToInsert ) ) ) { xBlockToMerge = NEXT_BLOCK( pxBlockToInsert ); /* Find out xBlockToMerge's location on the free list.*/ for( pxIterator = &xStart; pxIterator->pxNextFreeBlock != xBlockToMerge && pxIterator->pxNextFreeBlock != NULL; pxIterator = pxIterator->pxNextFreeBlock ) {} #ifdef DEBUG_HEAP if(! ( pxIterator->pxNextFreeBlock == xBlockToMerge ) ) { /* * This is not a good situation. The * problem is that data structures here are * showing that the next block is free. But * ths free block could not be found in the * free list. */ ATRACE("Target block dump :\n\r"); ATRACE("pxNextFreeBlock : 0x%x\n\r", xBlockToMerge->pxNextFreeBlock); ATRACE("pxPrev : 0x%x\n\r", xBlockToMerge->pxPrev); ATRACE("xBlockSize : %d (0x%x)\n\r", xBlockToMerge->xBlockSize, xBlockToMerge->xBlockSize); #ifdef DEBUG_HEAP_EXTRA ATRACE("xActualBlockSize: %d\n\r", xBlockToMerge->xActualBlockSize); #endif /* DEBUG_HEAP_EXTRA */ ATRACE("Panic\n\r"); while(1) {} } #endif /* DEBUG_HEAP */ ASSERT( pxIterator->pxNextFreeBlock == xBlockToMerge ); //TRACE("Merge: F\n\r"); /* Delete node from Free list */ pxIterator->pxNextFreeBlock = xBlockToMerge->pxNextFreeBlock; /* Delete xBlockToMerge node from Serial List */ if( ! IS_LAST_BLOCK( xBlockToMerge ) ) NEXT_BLOCK( xBlockToMerge )->pxPrev = pxBlockToInsert; /* Update node size */ pxBlockToInsert->xBlockSize += BLOCK_SIZE( xBlockToMerge ); /* Now forget about xBlockToMerge */ } } /* Check for back merge */ if ( ! IS_FIRST_BLOCK(pxBlockToInsert) ) { if ( IS_FREE_BLOCK( PREV_BLOCK(pxBlockToInsert ))) { xBlockToMerge = PREV_BLOCK(pxBlockToInsert); /* Find out xBlockToMerge's location on the free list */ for( pxIterator = &xStart; pxIterator->pxNextFreeBlock != xBlockToMerge && pxIterator->pxNextFreeBlock != NULL; pxIterator = pxIterator->pxNextFreeBlock ) {} ASSERT( pxIterator->pxNextFreeBlock == xBlockToMerge ); //TRACE("Merge: R\n\r"); /* Delete xBlockToMerge node from Free list */ pxIterator->pxNextFreeBlock = xBlockToMerge->pxNextFreeBlock; /* Delete _ pxBlockToInsert _ node from Serial List */ if( ! IS_LAST_BLOCK( pxBlockToInsert ) ) NEXT_BLOCK( pxBlockToInsert )->pxPrev = xBlockToMerge; /* Update node size */ xBlockToMerge->xBlockSize += BLOCK_SIZE( pxBlockToInsert ); /* Now forget about pxBlockToInsert */ pxBlockToInsert = xBlockToMerge; } } xBlockSize = pxBlockToInsert->xBlockSize; /* Iterate through the list until a block is found that has a larger size */ /* than the block we are inserting. */ for( pxIterator = &xStart; pxIterator->pxNextFreeBlock->xBlockSize < xBlockSize; pxIterator = pxIterator->pxNextFreeBlock ) { /* There is nothing to do here - just iterate to the correct position. */ } /* Update the list to include the block being inserted in the correct */ /* position. */ pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock; pxIterator->pxNextFreeBlock = pxBlockToInsert; }
void *MALLOC_FUNCTION_EX (size_t size, char *block_ptr) { TLSF_t *ptr_TLSF; #ifdef SANITY_CHECK __u32 req_size = size; #endif __s32 fl, sl; __u32 old_size, last_block, aux_size, new_size; block_header_t *bh, *bh2, *bh3; // spark_print("Inside malloc\n"); ptr_TLSF = (TLSF_t *) block_ptr; #ifdef SANITY_CHECK checking_structure(ptr_TLSF, "Entering Malloc"); check_range_ptr (block_ptr, "malloc 1"); #endif if (!ptr_TLSF || ptr_TLSF -> magic_number != MAGIC_NUMBER) { // PRINT_MSG ("malloc() error: TLSF structure is not initialized\n"); // PRINT_MSG // ("Hint: Execute init_memory_pool() before calling malloc()"); return NULL; } if (!size) { // PRINT_MSG ("malloc() error: requested size must be > 0\n"); return NULL; } // Requested size must be translated in TLSF_WORDS old_size = BYTES2TLSF_WORDS(size); if (old_size < MIN_SIZE) { size = MIN_SIZE; fl = 0; sl = 0; } else { mapping_function (old_size, &fl, &sl, &size, ptr_TLSF); #ifdef SANITY_CHECK check_fl_sl (fl, sl, ptr_TLSF, "malloc 1"); #endif if (++sl == ptr_TLSF -> max_sl_index) { fl ++; sl = 0; } /* * This is the reason of the internal fragmentation * The block given is greater that the asked for size */ // The TLSF structure begins indexing size on MIN_LOG2_SIZE fl -= MIN_LOG2_SIZE; } #ifdef SANITY_CHECK if (req_size > TLSF_WORDS2BYTES(size)) { SANITY_PRINTF("SANITY error: resquested %d given %d\n", req_size, TLSF_WORDS2BYTES(size)); } check_fl_sl_2 (fl, sl, ptr_TLSF, "malloc 2"); #endif /*----------------------------------------*/ /* The search for a free block begins now */ /*----------------------------------------*/ /* * Our first try, we take the first free block * from fl_array or its buddy */ THREAD_LOCK(); sl = ptr_TLSF -> fl_array[fl].bitmapSL & ((~0) << sl); if (sl != 0) { sl = TLSF_fls(sl); #ifdef SANITY_CHECK check_fl_sl_2 (fl, sl, ptr_TLSF, "malloc 3"); #endif goto found; } /* * On the last case a free block is looked for using the bitmaps */ fl = TLSF_fls(ptr_TLSF -> bitmapFL & ((~0) << (fl + 1))); if (fl > 0) { sl = TLSF_fls(ptr_TLSF -> fl_array[fl].bitmapSL); #ifdef SANITY_CHECK check_fl_sl_2 (fl, sl, ptr_TLSF, "malloc 4"); #endif goto found; } /* * HUGGGG, NOT ENOUGHT MEMORY * I think that we have done all that we have been able, I'm sorry */ THREAD_UNLOCK(); // PRINT_MSG ("malloc() error: Memory pool exhausted!!!\n"); // PRINT_MSG ("Hint: You can add memory through add_new_block()\n"); // PRINT_MSG ("Hint: However this is not a real-time guaranteed way\n"); return NULL; /* end of the search */ /*------------------------------------------------------------*/ /* * we can say: YESSSSSSSSSSS, we have enought memory!!!! */ found: bh = ptr_TLSF -> fl_array [fl].sl_array [sl]; #ifdef SANITY_CHECK check_range_bh (bh, "malloc 1"); check_mn (bh, "malloc 1"); #endif ptr_TLSF -> fl_array [fl].sl_array [sl] = bh -> ptr.free_ptr.next; #ifdef SANITY_CHECK bh3 = ptr_TLSF -> fl_array[fl].sl_array[sl]; if (bh3 != NULL) { check_range_bh (bh3, "malloc 2"); check_mn (bh3, "malloc 2"); } #endif if (ptr_TLSF -> fl_array [fl].sl_array [sl]) { ptr_TLSF -> fl_array [fl].sl_array [sl] -> ptr.free_ptr.prev = NULL; } else { TLSF__clear_bit (sl, ptr_TLSF -> fl_array[fl].bitmapSL); if (!ptr_TLSF -> fl_array[fl].bitmapSL) TLSF__clear_bit (fl, ptr_TLSF -> bitmapFL); } /* can bh be splitted? */ new_size = (int)(GET_BLOCK_SIZE(bh) - size - beg_header_overhead); /* The result of the substraction, may be negative... but new_size is unsigned */ if ((int) new_size >= (int) MIN_SIZE) { /* * Yes, bh will be splitted into two blocks */ /* The new block will begin at the end of the current block */ /* */ last_block = IS_LAST_BLOCK(bh)?1:0; bh -> size = size; SET_USED_BLOCK(bh); bh2 = (block_header_t *) (bh -> ptr.buffer + TLSF_WORDS2BYTES (GET_BLOCK_SIZE(bh))); #ifdef SANITY_CHECK bh2 -> mw = MAGIC_NUMBER; #endif bh2 -> prev_phys_block = bh; bh2 -> size = new_size; if (last_block) SET_LAST_BLOCK (bh2); //aux_size = GET_BLOCK_SIZE(bh2); if (new_size < ptr_TLSF -> TLSF_max_struct_size) { mapping_function (new_size, &fl, &sl, &aux_size, ptr_TLSF); #ifdef SANITY_CHECK check_fl_sl (fl, sl, ptr_TLSF, "malloc 5"); #endif } else { fl = ptr_TLSF -> max_fl_index - 1; sl = ptr_TLSF -> max_sl_index - 1; } fl -= MIN_LOG2_SIZE; #ifdef SANITY_CHECK check_fl_sl_2 (fl, sl, ptr_TLSF, "malloc 6"); #endif init_and_insert_block (ptr_TLSF, bh2, fl, sl); #ifdef SANITY_CHECK check_range_bh (bh2, "malloc 3"); check_mn (bh2, "malloc 3"); #endif if (!last_block) { bh3 = (block_header_t *) (bh2 -> ptr.buffer + TLSF_WORDS2BYTES(new_size)); bh3 -> prev_phys_block = bh2; #ifdef SANITY_CHECK check_range_bh (bh3, "malloc 4"); check_mn (bh3, "malloc 4"); #endif } } SET_USED_BLOCK(bh); THREAD_UNLOCK(); #ifdef SANITY_CHECK checking_structure (ptr_TLSF, "Leaving Malloc"); #endif // spark_print("Leaving malloc\n"); return (void *) bh -> ptr.buffer; }