static void
check_result(
  Heap_Control* the_heap,
  Heap_Block* the_block,
  _H_uptr_t user_addr,
  _H_uptr_t aligned_user_addr,
  uint32_t size)
{
  _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
  _H_uptr_t const block_end = _H_p2u(the_block)
    + _Heap_Block_size(the_block) + HEAP_BLOCK_HEADER_OFFSET;
  _H_uptr_t const user_end = aligned_user_addr + size;
  _H_uptr_t const heap_start = _H_p2u(the_heap->start) + HEAP_OVERHEAD;
  _H_uptr_t const heap_end = _H_p2u(the_heap->final)
    + HEAP_BLOCK_HEADER_OFFSET;
  uint32_t const page_size = the_heap->page_size;

  _HAssert(user_addr == user_area);
  _HAssert(aligned_user_addr - user_area < page_size);
  _HAssert(aligned_user_addr >= user_area);
  _HAssert(aligned_user_addr < block_end);
  _HAssert(user_end > user_area);
  _HAssert(user_end <= block_end);
  _HAssert(aligned_user_addr >= heap_start);
  _HAssert(aligned_user_addr < heap_end);
  _HAssert(user_end > heap_start);
  _HAssert(user_end <= heap_end);
}
예제 #2
0
static Heap_Resize_status _Heap_Resize_block_checked(
  Heap_Control *heap,
  Heap_Block *block,
  uintptr_t alloc_begin,
  uintptr_t new_alloc_size,
  uintptr_t *old_size,
  uintptr_t *new_size
)
{
  Heap_Statistics *const stats = &heap->stats;

  uintptr_t const block_begin = (uintptr_t) block;
  uintptr_t block_size = _Heap_Block_size( block );
  uintptr_t block_end = block_begin + block_size;

  uintptr_t alloc_size = block_end - alloc_begin + HEAP_ALLOC_BONUS;

  Heap_Block *next_block = _Heap_Block_at( block, block_size );
  uintptr_t next_block_size = _Heap_Block_size( next_block );
  bool next_block_is_free = _Heap_Is_free( next_block );

  _HAssert( _Heap_Is_block_in_heap( heap, next_block ) );
  _HAssert( _Heap_Is_prev_used( next_block ) );

  *old_size = alloc_size;

  if ( next_block_is_free ) {
    block_size += next_block_size;
    alloc_size += next_block_size;
  }

  if ( new_alloc_size > alloc_size ) {
    return HEAP_RESIZE_UNSATISFIED;
  }

  if ( next_block_is_free ) {
    _Heap_Block_set_size( block, block_size );

    _Heap_Free_list_remove( next_block );

    next_block = _Heap_Block_at( block, block_size );
    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;

    /* Statistics */
    --stats->free_blocks;
    stats->free_size -= next_block_size;
  }

  block = _Heap_Block_allocate( heap, block, alloc_begin, new_alloc_size );

  block_size = _Heap_Block_size( block );
  next_block = _Heap_Block_at( block, block_size );
  *new_size = (uintptr_t) next_block - alloc_begin + HEAP_ALLOC_BONUS;

  /* Statistics */
  ++stats->resizes;

  return HEAP_RESIZE_SUCCESSFUL;
}
예제 #3
0
static Heap_Block *_Heap_Block_allocate_from_end(
  Heap_Control *heap,
  Heap_Block *block,
  Heap_Block *free_list_anchor,
  uintptr_t alloc_begin,
  uintptr_t alloc_size
)
{
  Heap_Statistics *const stats = &heap->stats;

  uintptr_t block_begin = (uintptr_t) block;
  uintptr_t block_size = _Heap_Block_size( block );
  uintptr_t block_end = block_begin + block_size;

  Heap_Block *const new_block =
    _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
  uintptr_t const new_block_begin = (uintptr_t) new_block;
  uintptr_t const new_block_size = block_end - new_block_begin;

  block_end = new_block_begin;
  block_size = block_end - block_begin;

  _HAssert( block_size >= heap->min_block_size );
  _HAssert( new_block_size >= heap->min_block_size );

  /* Statistics */
  stats->free_size += block_size;

  if ( _Heap_Is_prev_used( block ) ) {
    _Heap_Free_list_insert_after( free_list_anchor, block );

    free_list_anchor = block;

    /* Statistics */
    ++stats->free_blocks;
  } else {
    Heap_Block *const prev_block = _Heap_Prev_block( block );
    uintptr_t const prev_block_size = _Heap_Block_size( prev_block );

    block = prev_block;
    block_begin = (uintptr_t) block;
    block_size += prev_block_size;
  }

  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;

  new_block->prev_size = block_size;
  new_block->size_and_flag = new_block_size;

  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );

  return new_block;
}
void _Heap_Get_free_information(
  Heap_Control        *the_heap,
  Heap_Information    *info
)
{
  Heap_Block *the_block;
  Heap_Block *const tail = _Heap_Free_list_tail(the_heap);

  info->number = 0;
  info->largest = 0;
  info->total = 0;

  for(the_block = _Heap_Free_list_first(the_heap);
      the_block != tail;
      the_block = the_block->next)
  {
    uint32_t const the_size = _Heap_Block_size(the_block);

    /* As we always coalesce free blocks, prev block must have been used. */
    _HAssert(_Heap_Is_prev_used(the_block));

    info->number++;
    info->total += the_size;
    if ( info->largest < the_size )
        info->largest = the_size;
  }
}
void *_Heap_Allocate(
  Heap_Control        *the_heap,
  size_t               size
)
{
  uint32_t  the_size;
  uint32_t  search_count;
  Heap_Block *the_block;
  void       *ptr = NULL;
  Heap_Statistics *const stats = &the_heap->stats;
  Heap_Block *const tail = _Heap_Tail(the_heap);

  the_size =
    _Heap_Calc_block_size(size, the_heap->page_size, the_heap->min_block_size);
  if(the_size == 0)
    return NULL;

  /* Find large enough free block. */
  for(the_block = _Heap_First(the_heap), search_count = 0;
      the_block != tail;
      the_block = the_block->next, ++search_count)
  {
    /* As we always coalesce free blocks, prev block must have been used. */
    _HAssert(_Heap_Is_prev_used(the_block));

    /* Don't bother to mask out the HEAP_PREV_USED bit as it won't change the
       result of the comparison. */
    if(the_block->size >= the_size) {
      (void)_Heap_Block_allocate(the_heap, the_block, the_size );

      ptr = _Heap_User_area(the_block);

      stats->allocs += 1;
      stats->searches += search_count + 1;

      _HAssert(_Heap_Is_aligned_ptr(ptr, the_heap->page_size));
      break;
    }
  }

  if(stats->max_search < search_count)
    stats->max_search = search_count;

  return ptr;
}
예제 #6
0
void _Heap_Get_information(
  Heap_Control            *the_heap,
  Heap_Information_block  *the_info
)
{
  Heap_Block *the_block = the_heap->first_block;
  Heap_Block *const end = the_heap->last_block;

  _HAssert(the_block->prev_size == the_heap->page_size);
  _HAssert(_Heap_Is_prev_used(the_block));

  the_info->Free.number  = 0;
  the_info->Free.total   = 0;
  the_info->Free.largest = 0;
  the_info->Used.number  = 0;
  the_info->Used.total   = 0;
  the_info->Used.largest = 0;

  while ( the_block != end ) {
    uintptr_t const     the_size = _Heap_Block_size(the_block);
    Heap_Block *const  next_block = _Heap_Block_at(the_block, the_size);
    Heap_Information  *info;

    if ( _Heap_Is_prev_used(next_block) )
      info = &the_info->Used;
    else
      info = &the_info->Free;

    info->number++;
    info->total += the_size;
    if ( info->largest < the_size )
      info->largest = the_size;

    the_block = next_block;
  }

  /*
   *  Handle the last dummy block. Don't consider this block to be
   *  "used" as client never allocated it. Make 'Used.total' contain this
   *  blocks' overhead though.
   */
  the_info->Used.total += HEAP_BLOCK_HEADER_SIZE;
}
예제 #7
0
Heap_Block *_Heap_Block_allocate(
  Heap_Control *heap,
  Heap_Block *block,
  uintptr_t alloc_begin,
  uintptr_t alloc_size
)
{
  Heap_Statistics *const stats = &heap->stats;

  uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
  uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;

  Heap_Block *free_list_anchor = NULL;

  _HAssert( alloc_area_begin <= alloc_begin );

  if ( _Heap_Is_free( block ) ) {
    free_list_anchor = block->prev;

    _Heap_Free_list_remove( block );

    /* Statistics */
    --stats->free_blocks;
    ++stats->used_blocks;
    stats->free_size -= _Heap_Block_size( block );
  } else {
    free_list_anchor = _Heap_Free_list_head( heap );
  }

  if ( alloc_area_offset < heap->page_size ) {
    alloc_size += alloc_area_offset;

    block = _Heap_Block_allocate_from_begin(
      heap,
      block,
      free_list_anchor,
      alloc_size
    );
  } else {
    block = _Heap_Block_allocate_from_end(
      heap,
      block,
      free_list_anchor,
      alloc_begin,
      alloc_size
    );
  }

  /* Statistics */
  if ( stats->min_free_size > stats->free_size ) {
    stats->min_free_size = stats->free_size;
  }

  return block;
}
/*
 * Allocate block of size 'alloc_size' from 'the_block' belonging to
 * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely.
 * When split, make the upper part used, and leave the lower part free.
 * Return the block allocated.
 *
 * NOTE: this is similar to _Heap_Block_allocate(), except it makes different
 * part of the split block used, and returns address of the block instead of its
 * size. We do need such variant for _Heap_Allocate_aligned() as we can't allow
 * user pointer to be too far from the beginning of the block, so that we can
 * recover start-of-block address from the user pointer without additional
 * information stored in the heap.
 */
static
Heap_Block *block_allocate(
  Heap_Control  *the_heap,
  Heap_Block    *the_block,
  uint32_t      alloc_size)
{
  Heap_Statistics *const stats = &the_heap->stats;
  uint32_t const block_size = _Heap_Block_size(the_block);
  uint32_t const the_rest = block_size - alloc_size;

  _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
  _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
  _HAssert(alloc_size <= block_size);
  _HAssert(_Heap_Is_prev_used(the_block));

  if(the_rest >= the_heap->min_block_size) {
    /* Split the block so that lower part is still free, and upper part
       becomes used. */
    the_block->size = the_rest | HEAP_PREV_USED;
    the_block = _Heap_Block_at(the_block, the_rest);
    the_block->prev_size = the_rest;
    the_block->size = alloc_size;
  }
  else {
    /* Don't split the block as remainder is either zero or too small to be
       used as a separate free block. Change 'alloc_size' to the size of the
       block and remove the block from the list of free blocks. */
    _Heap_Block_remove(the_block);
    alloc_size = block_size;
    stats->free_blocks -= 1;
  }
  /* Mark the block as used (in the next block). */
  _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
  /* Update statistics */
  stats->free_size -= alloc_size;
  if(stats->min_free_size > stats->free_size)
    stats->min_free_size = stats->free_size;
  stats->used_blocks += 1;
  return the_block;
}
예제 #9
0
파일: heapfree.c 프로젝트: epicsdeb/rtems
bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr )
{
    Heap_Statistics *const stats = &heap->stats;
    uintptr_t alloc_begin = (uintptr_t) alloc_begin_ptr;
    Heap_Block *block =
        _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
    Heap_Block *next_block = NULL;
    uintptr_t block_size = 0;
    uintptr_t next_block_size = 0;
    bool next_is_free = false;

    if ( !_Heap_Is_block_in_heap( heap, block ) ) {
        return false;
    }

    block_size = _Heap_Block_size( block );
    next_block = _Heap_Block_at( block, block_size );

    if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
        _HAssert( false );
        return false;
    }

    if ( !_Heap_Is_prev_used( next_block ) ) {
        _HAssert( false );
        return false;
    }

    next_block_size = _Heap_Block_size( next_block );
    next_is_free = next_block != heap->last_block
                   && !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));

    if ( !_Heap_Is_prev_used( block ) ) {
        uintptr_t const prev_size = block->prev_size;
        Heap_Block * const prev_block = _Heap_Block_at( block, -prev_size );

        if ( !_Heap_Is_block_in_heap( heap, prev_block ) ) {
            _HAssert( false );
            return( false );
        }

        /* As we always coalesce free blocks, the block that preceedes prev_block
           must have been used. */
        if ( !_Heap_Is_prev_used ( prev_block) ) {
            _HAssert( false );
            return( false );
        }

        if ( next_is_free ) {       /* coalesce both */
            uintptr_t const size = block_size + prev_size + next_block_size;
            _Heap_Free_list_remove( next_block );
            stats->free_blocks -= 1;
            prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
            next_block = _Heap_Block_at( prev_block, size );
            _HAssert(!_Heap_Is_prev_used( next_block));
            next_block->prev_size = size;
        } else {                      /* coalesce prev */
            uintptr_t const size = block_size + prev_size;
            prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
            next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
            next_block->prev_size = size;
        }
    } else if ( next_is_free ) {    /* coalesce next */
        uintptr_t const size = block_size + next_block_size;
        _Heap_Free_list_replace( next_block, block );
        block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
        next_block  = _Heap_Block_at( block, size );
        next_block->prev_size = size;
    } else {                        /* no coalesce */
        /* Add 'block' to the head of the free blocks list as it tends to
           produce less fragmentation than adding to the tail. */
        _Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
        block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
        next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
        next_block->prev_size = block_size;

        /* Statistics */
        ++stats->free_blocks;
        if ( stats->max_free_blocks < stats->free_blocks ) {
            stats->max_free_blocks = stats->free_blocks;
        }
    }

    /* Statistics */
    --stats->used_blocks;
    ++stats->frees;
    stats->free_size += block_size;

    return( true );
}
void *_Heap_Allocate_aligned(
  Heap_Control *the_heap,
  size_t        size,
  uint32_t      alignment
)
{
  uint32_t search_count;
  Heap_Block *the_block;

  void *user_ptr = NULL;
  uint32_t  const page_size = the_heap->page_size;
  Heap_Statistics *const stats = &the_heap->stats;
  Heap_Block *const tail = _Heap_Tail(the_heap);

  uint32_t const end_to_user_offs = size - HEAP_BLOCK_HEADER_OFFSET;

  uint32_t const the_size =
    _Heap_Calc_block_size(size, page_size, the_heap->min_block_size);

  if(the_size == 0)
    return NULL;

  if(alignment == 0)
    alignment = CPU_ALIGNMENT;

  /* Find large enough free block that satisfies the alignment requirements. */

  for(the_block = _Heap_First(the_heap), search_count = 0;
      the_block != tail;
      the_block = the_block->next, ++search_count)
  {
    uint32_t const block_size = _Heap_Block_size(the_block);

    /* As we always coalesce free blocks, prev block must have been used. */
    _HAssert(_Heap_Is_prev_used(the_block));

    if(block_size >= the_size) { /* the_block is large enough. */

      _H_uptr_t user_addr;
      _H_uptr_t aligned_user_addr;
      _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));

      /* Calculate 'aligned_user_addr' that will become the user pointer we
         return. It should be at least 'end_to_user_offs' bytes less than the
         the 'block_end' and should be aligned on 'alignment' boundary.
         Calculations are from the 'block_end' as we are going to split free
         block so that the upper part of the block becomes used block. */
      _H_uptr_t const block_end = _H_p2u(the_block) + block_size;
      aligned_user_addr = block_end - end_to_user_offs;
      _Heap_Align_down_uptr(&aligned_user_addr, alignment);

      /* 'user_addr' is the 'aligned_user_addr' further aligned down to the
         'page_size' boundary. We need it as blocks' user areas should begin
         only at 'page_size' aligned addresses */
      user_addr = aligned_user_addr;
      _Heap_Align_down_uptr(&user_addr, page_size);

      /* Make sure 'user_addr' calculated didn't run out of 'the_block'. */
      if(user_addr >= user_area) {

        /* The block seems to be acceptable. Check if the remainder of
           'the_block' is less than 'min_block_size' so that 'the_block' won't
           actually be split at the address we assume. */
        if(user_addr - user_area < the_heap->min_block_size) {

          /* The block won't be split, so 'user_addr' will be equal to the
             'user_area'. */
          user_addr = user_area;

          /* We can't allow the distance between 'user_addr' and
           'aligned_user_addr' to be outside of [0,page_size) range. If we do,
           we will need to store this distance somewhere to be able to
           resurrect the block address from the user pointer. (Having the
           distance within [0,page_size) range allows resurrection by
           aligning user pointer down to the nearest 'page_size' boundary.) */
          if(aligned_user_addr - user_addr >= page_size) {

            /* The user pointer will be too far from 'user_addr'. See if we
               can make 'aligned_user_addr' to be close enough to the
               'user_addr'. */
            aligned_user_addr = user_addr;
            _Heap_Align_up_uptr(&aligned_user_addr, alignment);
            if(aligned_user_addr - user_addr >= page_size) {
              /* No, we can't use the block */
              aligned_user_addr = 0;
            }
          }
        }

        if(aligned_user_addr) {

          /* The block is indeed acceptable: calculate the size of the block
             to be allocated and perform allocation. */
          uint32_t const alloc_size =
            block_end - user_addr + HEAP_BLOCK_USER_OFFSET;

          _HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment));

          the_block = block_allocate(the_heap, the_block, alloc_size);

          stats->searches += search_count + 1;
          stats->allocs += 1;

          check_result(the_heap, the_block, user_addr,
            aligned_user_addr, size);

          user_ptr = (void*)aligned_user_addr;
          break;
        }
      }
    }
  }

  if(stats->max_search < search_count)
    stats->max_search = search_count;

  return user_ptr;
}
예제 #11
0
  static void _Heap_Check_allocation(
    const Heap_Control *heap,
    const Heap_Block *block,
    uintptr_t alloc_begin,
    uintptr_t alloc_size,
    uintptr_t alignment,
    uintptr_t boundary
  )
  {
    uintptr_t const min_block_size = heap->min_block_size;
    uintptr_t const page_size = heap->page_size;

    uintptr_t const block_begin = (uintptr_t) block;
    uintptr_t const block_size = _Heap_Block_size( block );
    uintptr_t const block_end = block_begin + block_size;

    uintptr_t const alloc_end = alloc_begin + alloc_size;

    uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
    uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;

    _HAssert( block_size >= min_block_size );
    _HAssert( block_begin < block_end );
    _HAssert(
      _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
    );
    _HAssert(
      _Heap_Is_aligned( block_size, page_size )
    );

    _HAssert( alloc_end <= block_end + HEAP_ALLOC_BONUS );
    _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
    _HAssert( alloc_area_offset < page_size );

    _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
    if ( alignment == 0 ) {
      _HAssert( alloc_begin == alloc_area_begin );
    } else {
      _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
    }

    if ( boundary != 0 ) {
      uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );

      _HAssert( alloc_size <= boundary );
      _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
    }
  }
예제 #12
0
void *_Heap_Allocate_aligned_with_boundary(
  Heap_Control *heap,
  uintptr_t alloc_size,
  uintptr_t alignment,
  uintptr_t boundary
)
{
  Heap_Statistics *const stats = &heap->stats;
  uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
    - HEAP_ALLOC_BONUS;
  uintptr_t const page_size = heap->page_size;
  Heap_Block *block = NULL;
  uintptr_t alloc_begin = 0;
  uint32_t search_count = 0;
  bool search_again = false;

  if ( block_size_floor < alloc_size ) {
    /* Integer overflow occured */
    return NULL;
  }

  if ( boundary != 0 ) {
    if ( boundary < alloc_size ) {
      return NULL;
    }

    if ( alignment == 0 ) {
      alignment = page_size;
    }
  }

  do {
    Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );

    block = _Heap_Free_list_first( heap );
    while ( block != free_list_tail ) {
      _HAssert( _Heap_Is_prev_used( block ) );

      _Heap_Protection_block_check( heap, block );

      /*
       * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
       * field.  Thus the value is about one unit larger than the real block
       * size.  The greater than operator takes this into account.
       */
      if ( block->size_and_flag > block_size_floor ) {
        if ( alignment == 0 ) {
          alloc_begin = _Heap_Alloc_area_of_block( block );
        } else {
          alloc_begin = _Heap_Check_block(
            heap,
            block,
            alloc_size,
            alignment,
            boundary
          );
        }
      }

      /* Statistics */
      ++search_count;

      if ( alloc_begin != 0 ) {
        break;
      }

      block = block->next;
    }

    search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
  } while ( search_again );

  if ( alloc_begin != 0 ) {
    /* Statistics */
    ++stats->allocs;
    stats->searches += search_count;

    block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );

    _Heap_Check_allocation(
      heap,
      block,
      alloc_begin,
      alloc_size,
      alignment,
      boundary
    );
  }

  /* Statistics */
  if ( stats->max_search < search_count ) {
    stats->max_search = search_count;
  }

  return (void *) alloc_begin;
}
예제 #13
0
파일: heapfree.c 프로젝트: chch1028/rtems
bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr )
{
  Heap_Statistics *const stats = &heap->stats;
  uintptr_t alloc_begin;
  Heap_Block *block;
  Heap_Block *next_block = NULL;
  uintptr_t block_size = 0;
  uintptr_t next_block_size = 0;
  bool next_is_free = false;

  /*
   * If NULL return true so a free on NULL is considered a valid release. This
   * is a special case that could be handled by the in heap check how-ever that
   * would result in false being returned which is wrong.
   */
  if ( alloc_begin_ptr == NULL ) {
    return true;
  }

  alloc_begin = (uintptr_t) alloc_begin_ptr;
  block = _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );

  if ( !_Heap_Is_block_in_heap( heap, block ) ) {
    return false;
  }

  _Heap_Protection_block_check( heap, block );

  block_size = _Heap_Block_size( block );
  next_block = _Heap_Block_at( block, block_size );

  if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
    return false;
  }

  _Heap_Protection_block_check( heap, next_block );

  if ( !_Heap_Is_prev_used( next_block ) ) {
    _Heap_Protection_block_error( heap, block );
    return false;
  }

  if ( !_Heap_Protection_determine_block_free( heap, block ) ) {
    return true;
  }

  next_block_size = _Heap_Block_size( next_block );
  next_is_free = next_block != heap->last_block
    && !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));

  if ( !_Heap_Is_prev_used( block ) ) {
    uintptr_t const prev_size = block->prev_size;
    Heap_Block * const prev_block = _Heap_Block_at( block, -prev_size );

    if ( !_Heap_Is_block_in_heap( heap, prev_block ) ) {
      _HAssert( false );
      return( false );
    }

    /* As we always coalesce free blocks, the block that preceedes prev_block
       must have been used. */
    if ( !_Heap_Is_prev_used ( prev_block) ) {
      _HAssert( false );
      return( false );
    }

    if ( next_is_free ) {       /* coalesce both */
      uintptr_t const size = block_size + prev_size + next_block_size;
      _Heap_Free_list_remove( next_block );
      stats->free_blocks -= 1;
      prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
      next_block = _Heap_Block_at( prev_block, size );
      _HAssert(!_Heap_Is_prev_used( next_block));
      next_block->prev_size = size;
    } else {                      /* coalesce prev */
      uintptr_t const size = block_size + prev_size;
      prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
      next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
      next_block->prev_size = size;
    }
  } else if ( next_is_free ) {    /* coalesce next */
    uintptr_t const size = block_size + next_block_size;
    _Heap_Free_list_replace( next_block, block );
    block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
    next_block  = _Heap_Block_at( block, size );
    next_block->prev_size = size;
  } else {                        /* no coalesce */
    /* Add 'block' to the head of the free blocks list as it tends to
       produce less fragmentation than adding to the tail. */
    _Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
    block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
    next_block->prev_size = block_size;

    /* Statistics */
    ++stats->free_blocks;
    if ( stats->max_free_blocks < stats->free_blocks ) {
      stats->max_free_blocks = stats->free_blocks;
    }
  }

  /* Statistics */
  --stats->used_blocks;
  ++stats->frees;
  stats->free_size += block_size;

  return( true );
}
예제 #14
0
void _Heap_Block_split(
  Heap_Control *heap,
  Heap_Block *block,
  Heap_Block *free_list_anchor,
  uintptr_t alloc_size
)
{
  Heap_Statistics *const stats = &heap->stats;

  uintptr_t const page_size = heap->page_size;
  uintptr_t const min_block_size = heap->min_block_size;
  uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;

  uintptr_t const block_size = _Heap_Block_size( block );

  uintptr_t const used_size =
    _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
  uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );

  uintptr_t const free_size = block_size + HEAP_BLOCK_SIZE_OFFSET - used_size;
  uintptr_t const free_size_limit = min_block_size + HEAP_BLOCK_SIZE_OFFSET;

  Heap_Block *next_block = _Heap_Block_at( block, block_size );

  _HAssert( used_size <= block_size + HEAP_BLOCK_SIZE_OFFSET );
  _HAssert( used_size + free_size == block_size + HEAP_BLOCK_SIZE_OFFSET );

  if ( free_size >= free_size_limit ) {
    Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
    uintptr_t free_block_size = block_size - used_block_size;

    _HAssert( used_block_size + free_block_size == block_size );

    _Heap_Block_set_size( block, used_block_size );

    /* Statistics */
    stats->free_size += free_block_size;

    if ( _Heap_Is_used( next_block ) ) {
      _Heap_Free_list_insert_after( free_list_anchor, free_block );

      /* Statistics */
      ++stats->free_blocks;
    } else {
      uintptr_t const next_block_size = _Heap_Block_size( next_block );

      _Heap_Free_list_replace( next_block, free_block );

      free_block_size += next_block_size;

      next_block = _Heap_Block_at( free_block, free_block_size );
    }

    free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;

    next_block->prev_size = free_block_size;
    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
  } else {
    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
  }
}
예제 #15
0
uintptr_t _Heap_Initialize(
  Heap_Control *heap,
  void *heap_area_begin_ptr,
  uintptr_t heap_area_size,
  uintptr_t page_size
)
{
  Heap_Statistics *const stats = &heap->stats;
  uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
  uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_HEADER_SIZE;
  uintptr_t alloc_area_size = 0;
  uintptr_t first_block_begin = 0;
  uintptr_t first_block_size = 0;
  uintptr_t min_block_size = 0;
  uintptr_t overhead = 0;
  Heap_Block *first_block = NULL;
  Heap_Block *last_block = NULL;

  if ( page_size == 0 ) {
    page_size = CPU_ALIGNMENT;
  } else {
    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );

    if ( page_size < CPU_ALIGNMENT ) {
      /* Integer overflow */
      return 0;
    }
  }
  min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );

  alloc_area_begin = _Heap_Align_up( alloc_area_begin, page_size );
  first_block_begin = alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
  overhead = HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
  first_block_size = heap_area_size - overhead;
  first_block_size = _Heap_Align_down ( first_block_size, page_size );
  alloc_area_size = first_block_size - HEAP_BLOCK_HEADER_SIZE;

  if (
    heap_area_end < heap_area_begin
      || heap_area_size <= overhead
      || first_block_size < min_block_size
  ) {
    /* Invalid area or area too small */
    return 0;
  }

  /* First block */
  first_block = (Heap_Block *) first_block_begin;
  first_block->prev_size = page_size;
  first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
  first_block->next = _Heap_Free_list_tail( heap );
  first_block->prev = _Heap_Free_list_head( heap );

  /*
   * Last block.
   *
   * The next block of the last block is the first block.  Since the first
   * block indicates that the previous block is used, this ensures that the
   * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
   * functions.
   */
  last_block = _Heap_Block_at( first_block, first_block_size );
  last_block->prev_size = first_block_size;
  last_block->size_and_flag = first_block_begin - (uintptr_t) last_block;

  /* Heap control */
  heap->page_size = page_size;
  heap->min_block_size = min_block_size;
  heap->area_begin = heap_area_begin;
  heap->area_end = heap_area_end;
  heap->first_block = first_block;
  heap->last_block = last_block;
  _Heap_Free_list_head( heap )->next = first_block;
  _Heap_Free_list_tail( heap )->prev = first_block;

  /* Statistics */
  stats->size = first_block_size;
  stats->free_size = first_block_size;
  stats->min_free_size = first_block_size;
  stats->free_blocks = 1;
  stats->max_free_blocks = 1;
  stats->used_blocks = 0;
  stats->max_search = 0;
  stats->allocs = 0;
  stats->searches = 0;
  stats->frees = 0;
  stats->resizes = 0;
  stats->instance = instance++;

  _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
  _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
  _HAssert(
    _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
  );
  _HAssert(
    _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
  );

  return alloc_area_size;
}