Esempio n. 1
0
char *static_allocator_allocate(StaticAllocator *sa, zend_uint size)
{
	char *retval;

	retval = block_allocate(&sa->Blocks[sa->current_block], size);
	if (retval) {
		return retval;
	}
	sa->Blocks = (Block *) erealloc(sa->Blocks, ++sa->num_blocks);
	sa->current_block++;
	block_init(&sa->Blocks[sa->current_block], (size > ALLOCATOR_BLOCK_SIZE) ? size : ALLOCATOR_BLOCK_SIZE);
	retval = block_allocate(&sa->Blocks[sa->current_block], size);
	return retval;
}
Esempio n. 2
0
uint32_t nrf_mem_reserve(uint8_t ** pp_buffer, uint32_t * p_size)
{
    VERIFY_MODULE_INITIALIZED();
    NULL_PARAM_CHECK(pp_buffer);
    NULL_PARAM_CHECK(p_size);

    const uint32_t requested_size = (*p_size);

    VERIFY_REQUESTED_SIZE(requested_size);

    NRF_LOG_DEBUG("[MM]: >> nrf_mem_reserve, size 0x%04lX.\r\n", requested_size);

    MM_MUTEX_LOCK();

    const uint32_t block_cat    = get_block_cat(requested_size, TOTAL_BLOCK_COUNT);
    uint32_t       block_index  = m_block_start[block_cat];
    uint32_t       memory_index = m_block_mem_start[block_cat];
    uint32_t       err_code     = (NRF_ERROR_NO_MEM | MEMORY_MANAGER_ERR_BASE);

    NRF_LOG_DEBUG("[MM]: Start index for the pool = 0x%08lX, total block count 0x%08X\r\n",
                  block_index,
                  TOTAL_BLOCK_COUNT);

    for (; block_index < TOTAL_BLOCK_COUNT; block_index++)
    {
        uint32_t block_size = get_block_size(block_index);

        if (is_block_free(block_index) == true)
        {
            NRF_LOG_DEBUG("[MM]: Reserving block 0x%08lX\r\n", block_index);

            // Search succeeded, found free block.
            err_code     = NRF_SUCCESS;

            // Allocate block.
            block_allocate(block_index);

            (*pp_buffer) = &m_memory[memory_index];
            (*p_size)    = block_size;

#ifdef MEM_MANAGER_ENABLE_DIAGNOSTICS
            (*p_min_size) = MIN((*p_min_size), requested_size);
            (*p_max_size) = MAX((*p_max_size), requested_size);
#endif // MEM_MANAGER_ENABLE_DIAGNOSTICS

            break;
        }
        memory_index += block_size;
    }
    if (err_code != NRF_SUCCESS)
    {
        NRF_LOG_DEBUG ("[MM]: Memory reservation result %d, memory %p, size %d!",
                       err_code,
                       (uint32_t)(*pp_buffer),
                       (*p_size));

#ifdef MEM_MANAGER_ENABLE_DIAGNOSTICS
        nrf_mem_diagnose();
#endif // MEM_MANAGER_ENABLE_DIAGNOSTICS
    }

    MM_MUTEX_UNLOCK();

    NRF_LOG_DEBUG("[MM]: << nrf_mem_reserve %p, result 0x%08lX.\r\n",
                  (uint32_t)(*pp_buffer), err_code);

    return err_code;
}
void *_Heap_Allocate_aligned(
  Heap_Control *the_heap,
  size_t        size,
  uint32_t      alignment
)
{
  uint32_t search_count;
  Heap_Block *the_block;

  void *user_ptr = NULL;
  uint32_t  const page_size = the_heap->page_size;
  Heap_Statistics *const stats = &the_heap->stats;
  Heap_Block *const tail = _Heap_Tail(the_heap);

  uint32_t const end_to_user_offs = size - HEAP_BLOCK_HEADER_OFFSET;

  uint32_t const the_size =
    _Heap_Calc_block_size(size, page_size, the_heap->min_block_size);

  if(the_size == 0)
    return NULL;

  if(alignment == 0)
    alignment = CPU_ALIGNMENT;

  /* Find large enough free block that satisfies the alignment requirements. */

  for(the_block = _Heap_First(the_heap), search_count = 0;
      the_block != tail;
      the_block = the_block->next, ++search_count)
  {
    uint32_t const block_size = _Heap_Block_size(the_block);

    /* As we always coalesce free blocks, prev block must have been used. */
    _HAssert(_Heap_Is_prev_used(the_block));

    if(block_size >= the_size) { /* the_block is large enough. */

      _H_uptr_t user_addr;
      _H_uptr_t aligned_user_addr;
      _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));

      /* Calculate 'aligned_user_addr' that will become the user pointer we
         return. It should be at least 'end_to_user_offs' bytes less than the
         the 'block_end' and should be aligned on 'alignment' boundary.
         Calculations are from the 'block_end' as we are going to split free
         block so that the upper part of the block becomes used block. */
      _H_uptr_t const block_end = _H_p2u(the_block) + block_size;
      aligned_user_addr = block_end - end_to_user_offs;
      _Heap_Align_down_uptr(&aligned_user_addr, alignment);

      /* 'user_addr' is the 'aligned_user_addr' further aligned down to the
         'page_size' boundary. We need it as blocks' user areas should begin
         only at 'page_size' aligned addresses */
      user_addr = aligned_user_addr;
      _Heap_Align_down_uptr(&user_addr, page_size);

      /* Make sure 'user_addr' calculated didn't run out of 'the_block'. */
      if(user_addr >= user_area) {

        /* The block seems to be acceptable. Check if the remainder of
           'the_block' is less than 'min_block_size' so that 'the_block' won't
           actually be split at the address we assume. */
        if(user_addr - user_area < the_heap->min_block_size) {

          /* The block won't be split, so 'user_addr' will be equal to the
             'user_area'. */
          user_addr = user_area;

          /* We can't allow the distance between 'user_addr' and
           'aligned_user_addr' to be outside of [0,page_size) range. If we do,
           we will need to store this distance somewhere to be able to
           resurrect the block address from the user pointer. (Having the
           distance within [0,page_size) range allows resurrection by
           aligning user pointer down to the nearest 'page_size' boundary.) */
          if(aligned_user_addr - user_addr >= page_size) {

            /* The user pointer will be too far from 'user_addr'. See if we
               can make 'aligned_user_addr' to be close enough to the
               'user_addr'. */
            aligned_user_addr = user_addr;
            _Heap_Align_up_uptr(&aligned_user_addr, alignment);
            if(aligned_user_addr - user_addr >= page_size) {
              /* No, we can't use the block */
              aligned_user_addr = 0;
            }
          }
        }

        if(aligned_user_addr) {

          /* The block is indeed acceptable: calculate the size of the block
             to be allocated and perform allocation. */
          uint32_t const alloc_size =
            block_end - user_addr + HEAP_BLOCK_USER_OFFSET;

          _HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment));

          the_block = block_allocate(the_heap, the_block, alloc_size);

          stats->searches += search_count + 1;
          stats->allocs += 1;

          check_result(the_heap, the_block, user_addr,
            aligned_user_addr, size);

          user_ptr = (void*)aligned_user_addr;
          break;
        }
      }
    }
  }

  if(stats->max_search < search_count)
    stats->max_search = search_count;

  return user_ptr;
}