bool _Heap_Size_of_alloc_area( Heap_Control *heap, void *alloc_begin_ptr, uintptr_t *alloc_size ) { uintptr_t const page_size = heap->page_size; uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr; Heap_Block *block = _Heap_Block_of_alloc_area( alloc_begin, page_size ); Heap_Block *next_block = NULL; uintptr_t block_size = 0; if ( !_Heap_Is_block_in_heap( heap, block ) ) { return false; } block_size = _Heap_Block_size( block ); next_block = _Heap_Block_at( block, block_size ); if ( !_Heap_Is_block_in_heap( heap, next_block ) || !_Heap_Is_prev_used( next_block ) ) { return false; } *alloc_size = (uintptr_t) next_block + HEAP_BLOCK_SIZE_OFFSET - alloc_begin; return true; }
void _Heap_Get_free_information( Heap_Control *the_heap, Heap_Information *info ) { Heap_Block *the_block; Heap_Block *const tail = _Heap_Free_list_tail(the_heap); info->number = 0; info->largest = 0; info->total = 0; for(the_block = _Heap_Free_list_first(the_heap); the_block != tail; the_block = the_block->next) { uint32_t const the_size = _Heap_Block_size(the_block); /* As we always coalesce free blocks, prev block must have been used. */ _HAssert(_Heap_Is_prev_used(the_block)); info->number++; info->total += the_size; if ( info->largest < the_size ) info->largest = the_size; } }
static Heap_Resize_status _Heap_Resize_block_checked( Heap_Control *heap, Heap_Block *block, uintptr_t alloc_begin, uintptr_t new_alloc_size, uintptr_t *old_size, uintptr_t *new_size ) { Heap_Statistics *const stats = &heap->stats; uintptr_t const block_begin = (uintptr_t) block; uintptr_t block_size = _Heap_Block_size( block ); uintptr_t block_end = block_begin + block_size; uintptr_t alloc_size = block_end - alloc_begin + HEAP_ALLOC_BONUS; Heap_Block *next_block = _Heap_Block_at( block, block_size ); uintptr_t next_block_size = _Heap_Block_size( next_block ); bool next_block_is_free = _Heap_Is_free( next_block ); _HAssert( _Heap_Is_block_in_heap( heap, next_block ) ); _HAssert( _Heap_Is_prev_used( next_block ) ); *old_size = alloc_size; if ( next_block_is_free ) { block_size += next_block_size; alloc_size += next_block_size; } if ( new_alloc_size > alloc_size ) { return HEAP_RESIZE_UNSATISFIED; } if ( next_block_is_free ) { _Heap_Block_set_size( block, block_size ); _Heap_Free_list_remove( next_block ); next_block = _Heap_Block_at( block, block_size ); next_block->size_and_flag |= HEAP_PREV_BLOCK_USED; /* Statistics */ --stats->free_blocks; stats->free_size -= next_block_size; } block = _Heap_Block_allocate( heap, block, alloc_begin, new_alloc_size ); block_size = _Heap_Block_size( block ); next_block = _Heap_Block_at( block, block_size ); *new_size = (uintptr_t) next_block - alloc_begin + HEAP_ALLOC_BONUS; /* Statistics */ ++stats->resizes; return HEAP_RESIZE_SUCCESSFUL; }
void _Heap_Get_information( Heap_Control *the_heap, Heap_Information_block *the_info ) { Heap_Block *the_block = the_heap->first_block; Heap_Block *const end = the_heap->last_block; _HAssert(the_block->prev_size == the_heap->page_size); _HAssert(_Heap_Is_prev_used(the_block)); the_info->Free.number = 0; the_info->Free.total = 0; the_info->Free.largest = 0; the_info->Used.number = 0; the_info->Used.total = 0; the_info->Used.largest = 0; while ( the_block != end ) { uintptr_t const the_size = _Heap_Block_size(the_block); Heap_Block *const next_block = _Heap_Block_at(the_block, the_size); Heap_Information *info; if ( _Heap_Is_prev_used(next_block) ) info = &the_info->Used; else info = &the_info->Free; info->number++; info->total += the_size; if ( info->largest < the_size ) info->largest = the_size; the_block = next_block; } /* * Handle the last dummy block. Don't consider this block to be * "used" as client never allocated it. Make 'Used.total' contain this * blocks' overhead though. */ the_info->Used.total += HEAP_BLOCK_HEADER_SIZE; }
static Heap_Block *_Heap_Block_allocate_from_end( Heap_Control *heap, Heap_Block *block, Heap_Block *free_list_anchor, uintptr_t alloc_begin, uintptr_t alloc_size ) { Heap_Statistics *const stats = &heap->stats; uintptr_t block_begin = (uintptr_t) block; uintptr_t block_size = _Heap_Block_size( block ); uintptr_t block_end = block_begin + block_size; Heap_Block *const new_block = _Heap_Block_of_alloc_area( alloc_begin, heap->page_size ); uintptr_t const new_block_begin = (uintptr_t) new_block; uintptr_t const new_block_size = block_end - new_block_begin; block_end = new_block_begin; block_size = block_end - block_begin; _HAssert( block_size >= heap->min_block_size ); _HAssert( new_block_size >= heap->min_block_size ); /* Statistics */ stats->free_size += block_size; if ( _Heap_Is_prev_used( block ) ) { _Heap_Free_list_insert_after( free_list_anchor, block ); free_list_anchor = block; /* Statistics */ ++stats->free_blocks; } else { Heap_Block *const prev_block = _Heap_Prev_block( block ); uintptr_t const prev_block_size = _Heap_Block_size( prev_block ); block = prev_block; block_begin = (uintptr_t) block; block_size += prev_block_size; } block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED; new_block->prev_size = block_size; new_block->size_and_flag = new_block_size; _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size ); return new_block; }
void *_Heap_Allocate( Heap_Control *the_heap, size_t size ) { uint32_t the_size; uint32_t search_count; Heap_Block *the_block; void *ptr = NULL; Heap_Statistics *const stats = &the_heap->stats; Heap_Block *const tail = _Heap_Tail(the_heap); the_size = _Heap_Calc_block_size(size, the_heap->page_size, the_heap->min_block_size); if(the_size == 0) return NULL; /* Find large enough free block. */ for(the_block = _Heap_First(the_heap), search_count = 0; the_block != tail; the_block = the_block->next, ++search_count) { /* As we always coalesce free blocks, prev block must have been used. */ _HAssert(_Heap_Is_prev_used(the_block)); /* Don't bother to mask out the HEAP_PREV_USED bit as it won't change the result of the comparison. */ if(the_block->size >= the_size) { (void)_Heap_Block_allocate(the_heap, the_block, the_size ); ptr = _Heap_User_area(the_block); stats->allocs += 1; stats->searches += search_count + 1; _HAssert(_Heap_Is_aligned_ptr(ptr, the_heap->page_size)); break; } } if(stats->max_search < search_count) stats->max_search = search_count; return ptr; }
void _Heap_Iterate( Heap_Control *heap, Heap_Block_visitor visitor, void *visitor_arg ) { Heap_Block *current = heap->first_block; Heap_Block *end = heap->last_block; bool stop = false; while ( !stop && current != end ) { uintptr_t size = _Heap_Block_size( current ); Heap_Block *next = _Heap_Block_at( current, size ); bool used = _Heap_Is_prev_used( next ); stop = (*visitor)( current, size, used, visitor_arg ); current = next; } }
/* * Allocate block of size 'alloc_size' from 'the_block' belonging to * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely. * When split, make the upper part used, and leave the lower part free. * Return the block allocated. * * NOTE: this is similar to _Heap_Block_allocate(), except it makes different * part of the split block used, and returns address of the block instead of its * size. We do need such variant for _Heap_Allocate_aligned() as we can't allow * user pointer to be too far from the beginning of the block, so that we can * recover start-of-block address from the user pointer without additional * information stored in the heap. */ static Heap_Block *block_allocate( Heap_Control *the_heap, Heap_Block *the_block, uint32_t alloc_size) { Heap_Statistics *const stats = &the_heap->stats; uint32_t const block_size = _Heap_Block_size(the_block); uint32_t const the_rest = block_size - alloc_size; _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size)); _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size)); _HAssert(alloc_size <= block_size); _HAssert(_Heap_Is_prev_used(the_block)); if(the_rest >= the_heap->min_block_size) { /* Split the block so that lower part is still free, and upper part becomes used. */ the_block->size = the_rest | HEAP_PREV_USED; the_block = _Heap_Block_at(the_block, the_rest); the_block->prev_size = the_rest; the_block->size = alloc_size; } else { /* Don't split the block as remainder is either zero or too small to be used as a separate free block. Change 'alloc_size' to the size of the block and remove the block from the list of free blocks. */ _Heap_Block_remove(the_block); alloc_size = block_size; stats->free_blocks -= 1; } /* Mark the block as used (in the next block). */ _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED; /* Update statistics */ stats->free_size -= alloc_size; if(stats->min_free_size > stats->free_size) stats->min_free_size = stats->free_size; stats->used_blocks += 1; return the_block; }
bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr ) { Heap_Statistics *const stats = &heap->stats; uintptr_t alloc_begin = (uintptr_t) alloc_begin_ptr; Heap_Block *block = _Heap_Block_of_alloc_area( alloc_begin, heap->page_size ); Heap_Block *next_block = NULL; uintptr_t block_size = 0; uintptr_t next_block_size = 0; bool next_is_free = false; if ( !_Heap_Is_block_in_heap( heap, block ) ) { return false; } block_size = _Heap_Block_size( block ); next_block = _Heap_Block_at( block, block_size ); if ( !_Heap_Is_block_in_heap( heap, next_block ) ) { _HAssert( false ); return false; } if ( !_Heap_Is_prev_used( next_block ) ) { _HAssert( false ); return false; } next_block_size = _Heap_Block_size( next_block ); next_is_free = next_block != heap->last_block && !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size )); if ( !_Heap_Is_prev_used( block ) ) { uintptr_t const prev_size = block->prev_size; Heap_Block * const prev_block = _Heap_Block_at( block, -prev_size ); if ( !_Heap_Is_block_in_heap( heap, prev_block ) ) { _HAssert( false ); return( false ); } /* As we always coalesce free blocks, the block that preceedes prev_block must have been used. */ if ( !_Heap_Is_prev_used ( prev_block) ) { _HAssert( false ); return( false ); } if ( next_is_free ) { /* coalesce both */ uintptr_t const size = block_size + prev_size + next_block_size; _Heap_Free_list_remove( next_block ); stats->free_blocks -= 1; prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED; next_block = _Heap_Block_at( prev_block, size ); _HAssert(!_Heap_Is_prev_used( next_block)); next_block->prev_size = size; } else { /* coalesce prev */ uintptr_t const size = block_size + prev_size; prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED; next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED; next_block->prev_size = size; } } else if ( next_is_free ) { /* coalesce next */ uintptr_t const size = block_size + next_block_size; _Heap_Free_list_replace( next_block, block ); block->size_and_flag = size | HEAP_PREV_BLOCK_USED; next_block = _Heap_Block_at( block, size ); next_block->prev_size = size; } else { /* no coalesce */ /* Add 'block' to the head of the free blocks list as it tends to produce less fragmentation than adding to the tail. */ _Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block ); block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED; next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED; next_block->prev_size = block_size; /* Statistics */ ++stats->free_blocks; if ( stats->max_free_blocks < stats->free_blocks ) { stats->max_free_blocks = stats->free_blocks; } } /* Statistics */ --stats->used_blocks; ++stats->frees; stats->free_size += block_size; return( true ); }
boolean _Heap_Walk( Heap_Control *the_heap, int source, boolean do_dump ) { Heap_Block *the_block = the_heap->start; Heap_Block *const end = the_heap->final; Heap_Block *const tail = _Heap_Tail(the_heap); int error = 0; int passes = 0; do_dump = FALSE; /* * We don't want to allow walking the heap until we have * transferred control to the user task so we watch the * system state. */ /* if ( !_System_state_Is_up( _System_state_Get() ) ) return TRUE; */ if (source < 0) source = the_heap->stats.instance; if (do_dump == TRUE) printk("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n", source, the_block, end, _Heap_First(the_heap), _Heap_Last(the_heap), the_heap->begin, the_heap->end); /* * Handle the 1st block */ if (!_Heap_Is_prev_used(the_block)) { printk("PASS: %d !HEAP_PREV_USED flag of 1st block isn't set\n", source); error = 1; } if (the_block->prev_size != the_heap->page_size) { printk("PASS: %d !prev_size of 1st block isn't page_size\n", source); error = 1; } while ( the_block != end ) { uint32_t const the_size = _Heap_Block_size(the_block); Heap_Block *const next_block = _Heap_Block_at(the_block, the_size); boolean prev_used = _Heap_Is_prev_used(the_block); if (do_dump) { printk("PASS: %d block %p size %d(%c)", source, the_block, the_size, (prev_used ? 'U' : 'F')); if (prev_used) printk(" prev_size %d", the_block->prev_size); else printk(" (prev_size) %d", the_block->prev_size); } if (!_Heap_Is_block_in(the_heap, next_block)) { if (do_dump) printk("\n"); printk("PASS: %d !block %p is out of heap\n", source, next_block); error = 1; break; } if (!_Heap_Is_prev_used(next_block)) { if (do_dump) printk( " prev %p next %p", the_block->prev, the_block->next); if (_Heap_Block_size(the_block) != next_block->prev_size) { if (do_dump) printk("\n"); printk("PASS: %d !front and back sizes don't match", source); error = 1; } if (!prev_used) { if (do_dump || error) printk("\n"); printk("PASS: %d !two consecutive blocks are free", source); error = 1; } { /* Check if 'the_block' is in the free block list */ Heap_Block* block = _Heap_First(the_heap); while(block != the_block && block != tail) block = block->next; if(block != the_block) { if (do_dump || error) printk("\n"); printk("PASS: %d !the_block not in the free list", source); error = 1; } } } if (do_dump || error) printk("\n"); if (the_size < the_heap->min_block_size) { printk("PASS: %d !block size is too small\n", source); error = 1; break; } if (!_Heap_Is_aligned( the_size, the_heap->page_size)) { printk("PASS: %d !block size is misaligned\n", source); error = 1; } if (++passes > (do_dump ? 10 : 0) && error) break; the_block = next_block; } if (the_block != end) { printk("PASS: %d !last block address isn't equal to 'final' %p %p\n", source, the_block, end); error = 1; } if (_Heap_Block_size(the_block) != the_heap->page_size) { printk("PASS: %d !last block's size isn't page_size (%d != %d)\n", source, _Heap_Block_size(the_block), the_heap->page_size); error = 1; } if(do_dump && error) _Internal_error_Occurred( INTERNAL_ERROR_CORE, TRUE, 0xffff0000 ); return error; }
void *_Heap_Allocate_aligned( Heap_Control *the_heap, size_t size, uint32_t alignment ) { uint32_t search_count; Heap_Block *the_block; void *user_ptr = NULL; uint32_t const page_size = the_heap->page_size; Heap_Statistics *const stats = &the_heap->stats; Heap_Block *const tail = _Heap_Tail(the_heap); uint32_t const end_to_user_offs = size - HEAP_BLOCK_HEADER_OFFSET; uint32_t const the_size = _Heap_Calc_block_size(size, page_size, the_heap->min_block_size); if(the_size == 0) return NULL; if(alignment == 0) alignment = CPU_ALIGNMENT; /* Find large enough free block that satisfies the alignment requirements. */ for(the_block = _Heap_First(the_heap), search_count = 0; the_block != tail; the_block = the_block->next, ++search_count) { uint32_t const block_size = _Heap_Block_size(the_block); /* As we always coalesce free blocks, prev block must have been used. */ _HAssert(_Heap_Is_prev_used(the_block)); if(block_size >= the_size) { /* the_block is large enough. */ _H_uptr_t user_addr; _H_uptr_t aligned_user_addr; _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block)); /* Calculate 'aligned_user_addr' that will become the user pointer we return. It should be at least 'end_to_user_offs' bytes less than the the 'block_end' and should be aligned on 'alignment' boundary. Calculations are from the 'block_end' as we are going to split free block so that the upper part of the block becomes used block. */ _H_uptr_t const block_end = _H_p2u(the_block) + block_size; aligned_user_addr = block_end - end_to_user_offs; _Heap_Align_down_uptr(&aligned_user_addr, alignment); /* 'user_addr' is the 'aligned_user_addr' further aligned down to the 'page_size' boundary. We need it as blocks' user areas should begin only at 'page_size' aligned addresses */ user_addr = aligned_user_addr; _Heap_Align_down_uptr(&user_addr, page_size); /* Make sure 'user_addr' calculated didn't run out of 'the_block'. */ if(user_addr >= user_area) { /* The block seems to be acceptable. Check if the remainder of 'the_block' is less than 'min_block_size' so that 'the_block' won't actually be split at the address we assume. */ if(user_addr - user_area < the_heap->min_block_size) { /* The block won't be split, so 'user_addr' will be equal to the 'user_area'. */ user_addr = user_area; /* We can't allow the distance between 'user_addr' and 'aligned_user_addr' to be outside of [0,page_size) range. If we do, we will need to store this distance somewhere to be able to resurrect the block address from the user pointer. (Having the distance within [0,page_size) range allows resurrection by aligning user pointer down to the nearest 'page_size' boundary.) */ if(aligned_user_addr - user_addr >= page_size) { /* The user pointer will be too far from 'user_addr'. See if we can make 'aligned_user_addr' to be close enough to the 'user_addr'. */ aligned_user_addr = user_addr; _Heap_Align_up_uptr(&aligned_user_addr, alignment); if(aligned_user_addr - user_addr >= page_size) { /* No, we can't use the block */ aligned_user_addr = 0; } } } if(aligned_user_addr) { /* The block is indeed acceptable: calculate the size of the block to be allocated and perform allocation. */ uint32_t const alloc_size = block_end - user_addr + HEAP_BLOCK_USER_OFFSET; _HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment)); the_block = block_allocate(the_heap, the_block, alloc_size); stats->searches += search_count + 1; stats->allocs += 1; check_result(the_heap, the_block, user_addr, aligned_user_addr, size); user_ptr = (void*)aligned_user_addr; break; } } } } if(stats->max_search < search_count) stats->max_search = search_count; return user_ptr; }
void *_Heap_Allocate_aligned_with_boundary( Heap_Control *heap, uintptr_t alloc_size, uintptr_t alignment, uintptr_t boundary ) { Heap_Statistics *const stats = &heap->stats; uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE - HEAP_ALLOC_BONUS; uintptr_t const page_size = heap->page_size; Heap_Block *block = NULL; uintptr_t alloc_begin = 0; uint32_t search_count = 0; bool search_again = false; if ( block_size_floor < alloc_size ) { /* Integer overflow occured */ return NULL; } if ( boundary != 0 ) { if ( boundary < alloc_size ) { return NULL; } if ( alignment == 0 ) { alignment = page_size; } } do { Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap ); block = _Heap_Free_list_first( heap ); while ( block != free_list_tail ) { _HAssert( _Heap_Is_prev_used( block ) ); _Heap_Protection_block_check( heap, block ); /* * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag * field. Thus the value is about one unit larger than the real block * size. The greater than operator takes this into account. */ if ( block->size_and_flag > block_size_floor ) { if ( alignment == 0 ) { alloc_begin = _Heap_Alloc_area_of_block( block ); } else { alloc_begin = _Heap_Check_block( heap, block, alloc_size, alignment, boundary ); } } /* Statistics */ ++search_count; if ( alloc_begin != 0 ) { break; } block = block->next; } search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin ); } while ( search_again ); if ( alloc_begin != 0 ) { /* Statistics */ ++stats->allocs; stats->searches += search_count; block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size ); _Heap_Check_allocation( heap, block, alloc_begin, alloc_size, alignment, boundary ); } /* Statistics */ if ( stats->max_search < search_count ) { stats->max_search = search_count; } return (void *) alloc_begin; }
bool _Heap_Walk( Heap_Control *heap, int source, bool dump ) { uintptr_t const page_size = heap->page_size; uintptr_t const min_block_size = heap->min_block_size; Heap_Block *const first_block = heap->first_block; Heap_Block *const last_block = heap->last_block; Heap_Block *block = first_block; Heap_Walk_printer printer = dump ? _Heap_Walk_print : _Heap_Walk_print_nothing; if ( !_System_state_Is_up( _System_state_Get() ) ) { return true; } if ( !_Heap_Walk_check_control( source, printer, heap ) ) { return false; } do { uintptr_t const block_begin = (uintptr_t) block; uintptr_t const block_size = _Heap_Block_size( block ); bool const prev_used = _Heap_Is_prev_used( block ); Heap_Block *const next_block = _Heap_Block_at( block, block_size ); uintptr_t const next_block_begin = (uintptr_t) next_block; bool const is_not_last_block = block != last_block; if ( !_Heap_Is_block_in_heap( heap, next_block ) ) { (*printer)( source, true, "block 0x%08x: next block 0x%08x not in heap\n", block, next_block ); return false; } if ( !_Heap_Is_aligned( block_size, page_size ) && is_not_last_block ) { (*printer)( source, true, "block 0x%08x: block size %u not page aligned\n", block, block_size ); return false; } if ( block_size < min_block_size && is_not_last_block ) { (*printer)( source, true, "block 0x%08x: size %u < min block size %u\n", block, block_size, min_block_size ); return false; } if ( next_block_begin <= block_begin && is_not_last_block ) { (*printer)( source, true, "block 0x%08x: next block 0x%08x is not a successor\n", block, next_block ); return false; } if ( !_Heap_Is_prev_used( next_block ) ) { if ( !_Heap_Walk_check_free_block( source, printer, heap, block ) ) { return false; } } else if (prev_used) { (*printer)( source, false, "block 0x%08x: size %u\n", block, block_size ); } else { (*printer)( source, false, "block 0x%08x: size %u, prev_size %u\n", block, block_size, block->prev_size ); } block = next_block; } while ( block != first_block ); return true; }
static bool _Heap_Walk_check_free_block( int source, Heap_Walk_printer printer, Heap_Control *heap, Heap_Block *block ) { Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap ); Heap_Block *const free_list_head = _Heap_Free_list_head( heap ); Heap_Block *const first_free_block = _Heap_Free_list_first( heap ); Heap_Block *const last_free_block = _Heap_Free_list_last( heap ); bool const prev_used = _Heap_Is_prev_used( block ); uintptr_t const block_size = _Heap_Block_size( block ); Heap_Block *const next_block = _Heap_Block_at( block, block_size ); (*printer)( source, false, "block 0x%08x: size %u, prev 0x%08x%s, next 0x%08x%s\n", block, block_size, block->prev, block->prev == first_free_block ? " (= first free)" : (block->prev == free_list_head ? " (= head)" : ""), block->next, block->next == last_free_block ? " (= last free)" : (block->next == free_list_tail ? " (= tail)" : "") ); if ( block_size != next_block->prev_size ) { (*printer)( source, true, "block 0x%08x: size %u != size %u (in next block 0x%08x)\n", block, block_size, next_block->prev_size, next_block ); return false; } if ( !prev_used ) { (*printer)( source, true, "block 0x%08x: two consecutive blocks are free\n", block ); return false; } if ( !_Heap_Walk_is_in_free_list( heap, block ) ) { (*printer)( source, true, "block 0x%08x: free block not in free list\n", block ); return false; } return true; }
static bool _Heap_Walk_check_control( int source, Heap_Walk_printer printer, Heap_Control *heap ) { uintptr_t const page_size = heap->page_size; uintptr_t const min_block_size = heap->min_block_size; Heap_Block *const first_free_block = _Heap_Free_list_first( heap ); Heap_Block *const last_free_block = _Heap_Free_list_last( heap ); Heap_Block *const first_block = heap->first_block; Heap_Block *const last_block = heap->last_block; (*printer)( source, false, "page size %u, min block size %u\n" "\tarea begin 0x%08x, area end 0x%08x\n" "\tfirst block 0x%08x, last block 0x%08x\n" "\tfirst free 0x%08x, last free 0x%08x\n", page_size, min_block_size, heap->area_begin, heap->area_end, first_block, last_block, first_free_block, last_free_block ); if ( page_size == 0 ) { (*printer)( source, true, "page size is zero\n" ); return false; } if ( !_Addresses_Is_aligned( (void *) page_size ) ) { (*printer)( source, true, "page size %u not CPU aligned\n", page_size ); return false; } if ( !_Heap_Is_aligned( min_block_size, page_size ) ) { (*printer)( source, true, "min block size %u not page aligned\n", min_block_size ); return false; } if ( !_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size ) ) { (*printer)( source, true, "first block 0x%08x: alloc area not page aligned\n", first_block ); return false; } if ( !_Heap_Is_prev_used( first_block ) ) { (*printer)( source, true, "first block: HEAP_PREV_BLOCK_USED is cleared\n" ); return false; } if ( _Heap_Is_free( last_block ) ) { (*printer)( source, true, "last block: is free\n" ); return false; } if ( _Heap_Block_at( last_block, _Heap_Block_size( last_block ) ) != first_block ) { (*printer)( source, true, "last block: next block is not the first block\n" ); return false; } return _Heap_Walk_check_free_list( source, printer, heap ); }
bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr ) { Heap_Statistics *const stats = &heap->stats; uintptr_t alloc_begin; Heap_Block *block; Heap_Block *next_block = NULL; uintptr_t block_size = 0; uintptr_t next_block_size = 0; bool next_is_free = false; /* * If NULL return true so a free on NULL is considered a valid release. This * is a special case that could be handled by the in heap check how-ever that * would result in false being returned which is wrong. */ if ( alloc_begin_ptr == NULL ) { return true; } alloc_begin = (uintptr_t) alloc_begin_ptr; block = _Heap_Block_of_alloc_area( alloc_begin, heap->page_size ); if ( !_Heap_Is_block_in_heap( heap, block ) ) { return false; } _Heap_Protection_block_check( heap, block ); block_size = _Heap_Block_size( block ); next_block = _Heap_Block_at( block, block_size ); if ( !_Heap_Is_block_in_heap( heap, next_block ) ) { return false; } _Heap_Protection_block_check( heap, next_block ); if ( !_Heap_Is_prev_used( next_block ) ) { _Heap_Protection_block_error( heap, block ); return false; } if ( !_Heap_Protection_determine_block_free( heap, block ) ) { return true; } next_block_size = _Heap_Block_size( next_block ); next_is_free = next_block != heap->last_block && !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size )); if ( !_Heap_Is_prev_used( block ) ) { uintptr_t const prev_size = block->prev_size; Heap_Block * const prev_block = _Heap_Block_at( block, -prev_size ); if ( !_Heap_Is_block_in_heap( heap, prev_block ) ) { _HAssert( false ); return( false ); } /* As we always coalesce free blocks, the block that preceedes prev_block must have been used. */ if ( !_Heap_Is_prev_used ( prev_block) ) { _HAssert( false ); return( false ); } if ( next_is_free ) { /* coalesce both */ uintptr_t const size = block_size + prev_size + next_block_size; _Heap_Free_list_remove( next_block ); stats->free_blocks -= 1; prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED; next_block = _Heap_Block_at( prev_block, size ); _HAssert(!_Heap_Is_prev_used( next_block)); next_block->prev_size = size; } else { /* coalesce prev */ uintptr_t const size = block_size + prev_size; prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED; next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED; next_block->prev_size = size; } } else if ( next_is_free ) { /* coalesce next */ uintptr_t const size = block_size + next_block_size; _Heap_Free_list_replace( next_block, block ); block->size_and_flag = size | HEAP_PREV_BLOCK_USED; next_block = _Heap_Block_at( block, size ); next_block->prev_size = size; } else { /* no coalesce */ /* Add 'block' to the head of the free blocks list as it tends to produce less fragmentation than adding to the tail. */ _Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block ); block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED; next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED; next_block->prev_size = block_size; /* Statistics */ ++stats->free_blocks; if ( stats->max_free_blocks < stats->free_blocks ) { stats->max_free_blocks = stats->free_blocks; } } /* Statistics */ --stats->used_blocks; ++stats->frees; stats->free_size += block_size; return( true ); }