Ejemplo n.º 1
0
/**
 *  Collect empty pool chunks
 */
void
jmem_pools_collect_empty ()
{
  jmem_pools_chunk_t *chunk_p = JERRY_CONTEXT (jmem_free_8_byte_chunk_p);
  JERRY_CONTEXT (jmem_free_8_byte_chunk_p) = NULL;

  while (chunk_p)
  {
    VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
    jmem_pools_chunk_t *const next_p = chunk_p->next_p;
    VALGRIND_NOACCESS_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));

    jmem_heap_free_block (chunk_p, 8);
    JMEM_POOLS_STAT_DEALLOC ();
    chunk_p = next_p;
  }

#ifdef JERRY_CPOINTER_32_BIT
  chunk_p = JERRY_CONTEXT (jmem_free_16_byte_chunk_p);
  JERRY_CONTEXT (jmem_free_16_byte_chunk_p) = NULL;

  while (chunk_p)
  {
    VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));
    jmem_pools_chunk_t *const next_p = chunk_p->next_p;
    VALGRIND_NOACCESS_SPACE (chunk_p, sizeof (jmem_pools_chunk_t));

    jmem_heap_free_block (chunk_p, 16);
    JMEM_POOLS_STAT_DEALLOC ();
    chunk_p = next_p;
  }
#endif /* JERRY_CPOINTER_32_BIT */
} /* jmem_pools_collect_empty */
Ejemplo n.º 2
0
/**
 * Free the chunk
 */
inline void __attr_hot___ __attr_always_inline___
jmem_pools_free (void *chunk_p, /**< pointer to the chunk */
                 size_t size) /**< size of the chunk */
{
  JERRY_ASSERT (chunk_p != NULL);

  jmem_pools_chunk_t *const chunk_to_free_p = (jmem_pools_chunk_t *) chunk_p;

  VALGRIND_DEFINED_SPACE (chunk_to_free_p, size);

  if (size <= 8)
  {
    chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_8_byte_chunk_p);
    JERRY_CONTEXT (jmem_free_8_byte_chunk_p) = chunk_to_free_p;
  }
  else
  {
#ifdef JERRY_CPOINTER_32_BIT
    JERRY_ASSERT (size <= 16);

    chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_16_byte_chunk_p);
    JERRY_CONTEXT (jmem_free_16_byte_chunk_p) = chunk_to_free_p;
#else /* !JERRY_CPOINTER_32_BIT */
    JERRY_UNREACHABLE ();
#endif /* JERRY_CPOINTER_32_BIT */
  }

  VALGRIND_NOACCESS_SPACE (chunk_to_free_p, size);

  JMEM_POOLS_STAT_FREE_POOL ();
} /* jmem_pools_free */
Ejemplo n.º 3
0
/**
 * Check pool state consistency
 */
static void
mem_check_pool (mem_pool_state_t __attr_unused___ *pool_p) /**< pool (unused #ifdef JERRY_DISABLE_HEAVY_DEBUG) */
{
#ifndef JERRY_DISABLE_HEAVY_DEBUG
  JERRY_ASSERT (pool_p->free_chunks_number <= MEM_POOL_CHUNKS_NUMBER);

  size_t met_free_chunks_number = 0;
  mem_pool_chunk_index_t chunk_index = pool_p->first_free_chunk;

  while (chunk_index != MEM_POOL_CHUNKS_NUMBER)
  {
    uint8_t *chunk_p = MEM_POOL_CHUNK_ADDRESS (pool_p, chunk_index);
    mem_pool_chunk_index_t *next_free_chunk_index_p = (mem_pool_chunk_index_t*) chunk_p;

    met_free_chunks_number++;

    VALGRIND_DEFINED_SPACE (next_free_chunk_index_p, MEM_POOL_CHUNK_SIZE);

    chunk_index = *next_free_chunk_index_p;

    VALGRIND_NOACCESS_SPACE (next_free_chunk_index_p, MEM_POOL_CHUNK_SIZE);
  }

  JERRY_ASSERT (met_free_chunks_number == pool_p->free_chunks_number);
#else /* !JERRY_DISABLE_HEAVY_DEBUG */
  (void) pool_p;
#endif /* JERRY_DISABLE_HEAVY_DEBUG */
} /* mem_check_pool */
Ejemplo n.º 4
0
/**
 * Startup initialization of heap
 */
void
jmem_heap_init (void)
{
#ifndef JERRY_CPOINTER_32_BIT
  JERRY_STATIC_ASSERT (((UINT16_MAX + 1) << JMEM_ALIGNMENT_LOG) >= JMEM_HEAP_SIZE,
                       maximum_heap_size_for_16_bit_compressed_pointers_is_512K);
#endif /* !JERRY_CPOINTER_32_BIT */

  JERRY_ASSERT ((uintptr_t) JERRY_HEAP_CONTEXT (area) % JMEM_ALIGNMENT == 0);

  JERRY_CONTEXT (jmem_heap_limit) = CONFIG_MEM_HEAP_DESIRED_LIMIT;

  jmem_heap_free_t *const region_p = (jmem_heap_free_t *) JERRY_HEAP_CONTEXT (area);

  region_p->size = JMEM_HEAP_AREA_SIZE;
  region_p->next_offset = JMEM_HEAP_END_OF_LIST;

  JERRY_HEAP_CONTEXT (first).size = 0;
  JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);

  JERRY_CONTEXT (jmem_heap_list_skip_p) = &JERRY_HEAP_CONTEXT (first);

  VALGRIND_NOACCESS_SPACE (JERRY_HEAP_CONTEXT (area), JMEM_HEAP_AREA_SIZE);

  JMEM_HEAP_STAT_INIT ();
} /* jmem_heap_init */
Ejemplo n.º 5
0
/**
 * Startup initialization of heap
 *
 * Note:
 *      heap start and size should be aligned on MEM_HEAP_CHUNK_SIZE
 */
void
mem_heap_init (uint8_t *heap_start, /**< first address of heap space */
               size_t heap_size)    /**< heap space size */
{
  JERRY_ASSERT (heap_start != NULL);
  JERRY_ASSERT (heap_size != 0);

  JERRY_STATIC_ASSERT ((MEM_HEAP_CHUNK_SIZE & (MEM_HEAP_CHUNK_SIZE - 1u)) == 0);
  JERRY_ASSERT ((uintptr_t) heap_start % MEM_ALIGNMENT == 0);
  JERRY_ASSERT ((uintptr_t) heap_start % MEM_HEAP_CHUNK_SIZE == 0);
  JERRY_ASSERT (heap_size % MEM_HEAP_CHUNK_SIZE == 0);

  JERRY_ASSERT (heap_size <= (1u << MEM_HEAP_OFFSET_LOG));

  mem_heap.heap_start = heap_start;
  mem_heap.heap_size = heap_size;
  mem_heap.limit = CONFIG_MEM_HEAP_DESIRED_LIMIT;

  VALGRIND_NOACCESS_SPACE (heap_start, heap_size);

  mem_init_block_header (mem_heap.heap_start,
                         0,
                         MEM_BLOCK_FREE,
                         mem_block_length_type_t::GENERAL,
                         NULL,
                         NULL);

  mem_heap.first_block_p = (mem_block_header_t*) mem_heap.heap_start;
  mem_heap.last_block_p = mem_heap.first_block_p;

  MEM_HEAP_STAT_INIT ();
} /* mem_heap_init */
Ejemplo n.º 6
0
/**
 * Finalize heap
 */
void
mem_heap_finalize (void)
{
  VALGRIND_DEFINED_SPACE (mem_heap.heap_start, mem_heap.heap_size);

  JERRY_ASSERT (mem_heap.first_block_p == mem_heap.last_block_p);
  JERRY_ASSERT (mem_is_block_free (mem_heap.first_block_p));

  VALGRIND_NOACCESS_SPACE (mem_heap.heap_start, mem_heap.heap_size);

  memset (&mem_heap, 0, sizeof (mem_heap));
} /* mem_heap_finalize */
Ejemplo n.º 7
0
jmem_pools_free (void *chunk_p) /**< pointer to the chunk */
{
  jmem_pools_chunk_t *const chunk_to_free_p = (jmem_pools_chunk_t *) chunk_p;

  VALGRIND_DEFINED_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE);

  chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_chunk_p);
  JERRY_CONTEXT (jmem_free_chunk_p) = chunk_to_free_p;

  VALGRIND_NOACCESS_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE);

  JMEM_POOLS_STAT_FREE_POOL ();
} /* jmem_pools_free */
Ejemplo n.º 8
0
/**
 *  Collect empty pool chunks
 */
void
jmem_pools_collect_empty ()
{
  while (JERRY_CONTEXT (jmem_free_chunk_p))
  {
    VALGRIND_DEFINED_SPACE (JERRY_CONTEXT (jmem_free_chunk_p), sizeof (jmem_pools_chunk_t));
    jmem_pools_chunk_t *const next_p = JERRY_CONTEXT (jmem_free_chunk_p)->next_p;
    VALGRIND_NOACCESS_SPACE (JERRY_CONTEXT (jmem_free_chunk_p), sizeof (jmem_pools_chunk_t));

    jmem_heap_free_block (JERRY_CONTEXT (jmem_free_chunk_p), JMEM_POOL_CHUNK_SIZE);
    JMEM_POOLS_STAT_DEALLOC ();
    JERRY_CONTEXT (jmem_free_chunk_p) = next_p;
  }
} /* jmem_pools_collect_empty */
Ejemplo n.º 9
0
/**
 * Initialization of memory pool.
 *
 * Pool will be located in the segment [pool_start; pool_start + pool_size).
 * Part of pool space will be used for bitmap and the rest will store chunks.
 */
void
mem_pool_init (mem_pool_state_t *pool_p, /**< pool */
               size_t pool_size)         /**< pool size */
{
  JERRY_ASSERT (pool_p != NULL);
  JERRY_ASSERT ((size_t)MEM_POOL_SPACE_START (pool_p) % MEM_ALIGNMENT == 0);

  JERRY_STATIC_ASSERT (MEM_POOL_CHUNK_SIZE % MEM_ALIGNMENT == 0);
  JERRY_STATIC_ASSERT (MEM_POOL_MAX_CHUNKS_NUMBER_LOG <= sizeof (mem_pool_chunk_index_t) * JERRY_BITSINBYTE);
  JERRY_ASSERT (sizeof (mem_pool_chunk_index_t) <= MEM_POOL_CHUNK_SIZE);

  JERRY_ASSERT (MEM_POOL_SIZE == sizeof (mem_pool_state_t) + MEM_POOL_CHUNKS_NUMBER * MEM_POOL_CHUNK_SIZE);
  JERRY_ASSERT (MEM_POOL_CHUNKS_NUMBER >= CONFIG_MEM_LEAST_CHUNK_NUMBER_IN_POOL);

  JERRY_ASSERT (pool_size == MEM_POOL_SIZE);

  /*
   * All chunks are free right after initialization
   */
  pool_p->free_chunks_number = (mem_pool_chunk_index_t) MEM_POOL_CHUNKS_NUMBER;
  JERRY_ASSERT (pool_p->free_chunks_number == MEM_POOL_CHUNKS_NUMBER);

  /*
   * Chunk with zero index is first free chunk in the pool now
   */
  pool_p->first_free_chunk = 0;

  for (mem_pool_chunk_index_t chunk_index = 0;
       chunk_index < MEM_POOL_CHUNKS_NUMBER;
       chunk_index++)
  {
    mem_pool_chunk_index_t *next_free_chunk_index_p = (mem_pool_chunk_index_t*) MEM_POOL_CHUNK_ADDRESS (pool_p,
                                                                                                        chunk_index);

    *next_free_chunk_index_p = (mem_pool_chunk_index_t) (chunk_index + 1u);

    VALGRIND_NOACCESS_SPACE (next_free_chunk_index_p, MEM_POOL_CHUNK_SIZE);
  }

  mem_check_pool (pool_p);
} /* mem_pool_init */
Ejemplo n.º 10
0
/**
 * Free the chunk
 */
void __attr_always_inline___
mem_pools_free (uint8_t *chunk_p) /**< pointer to the chunk */
{
    mem_check_pools ();

    mem_pool_chunk_t *chunk_to_free_p = (mem_pool_chunk_t *) chunk_p;

    chunk_to_free_p->u.free.next_p = mem_free_chunk_p;
    mem_free_chunk_p = chunk_to_free_p;

    VALGRIND_FREYA_FREELIKE_SPACE (chunk_to_free_p);
    VALGRIND_NOACCESS_SPACE (chunk_to_free_p, MEM_POOL_CHUNK_SIZE);

#ifndef JERRY_NDEBUG
    mem_free_chunks_number++;
#endif /* !JERRY_NDEBUG */

    MEM_POOLS_STAT_FREE_CHUNK ();

    mem_check_pools ();
} /* mem_pools_free */
Ejemplo n.º 11
0
/**
 * Check correctness of pool allocator state
 */
static void
mem_check_pools (void)
{
#ifndef JERRY_DISABLE_HEAVY_DEBUG
    size_t free_chunks_met = 0;

    for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p;
            free_chunk_iter_p != NULL;
            free_chunk_iter_p = next_free_chunk_p)
    {
        VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);

        next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

        VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);

        free_chunks_met++;
    }

    JERRY_ASSERT (free_chunks_met == mem_free_chunks_number);
#endif /* !JERRY_DISABLE_HEAVY_DEBUG */
} /* mem_check_pools */
Ejemplo n.º 12
0
/**
 * Startup initialization of heap
 */
void
jmem_heap_init (void)
{
  JERRY_STATIC_ASSERT ((1u << JMEM_HEAP_OFFSET_LOG) >= JMEM_HEAP_SIZE,
                       two_pow_mem_heap_offset_should_not_be_less_than_mem_heap_size);

  JERRY_ASSERT ((uintptr_t) JERRY_HEAP_CONTEXT (area) % JMEM_ALIGNMENT == 0);

  JERRY_CONTEXT (jmem_heap_limit) = CONFIG_MEM_HEAP_DESIRED_LIMIT;

  jmem_heap_free_t *const region_p = (jmem_heap_free_t *) JERRY_HEAP_CONTEXT (area);

  region_p->size = JMEM_HEAP_AREA_SIZE;
  region_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST);

  JERRY_HEAP_CONTEXT (first).size = 0;
  JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);

  JERRY_CONTEXT (jmem_heap_list_skip_p) = &JERRY_HEAP_CONTEXT (first);

  VALGRIND_NOACCESS_SPACE (JERRY_HEAP_CONTEXT (area), JMEM_HEAP_AREA_SIZE);

  JMEM_HEAP_STAT_INIT ();
} /* jmem_heap_init */
Ejemplo n.º 13
0
/**
 * Free the chunk in the pool
 */
void
mem_pool_free_chunk (mem_pool_state_t *pool_p,  /**< pool */
                     uint8_t *chunk_p)         /**< chunk pointer */
{
  JERRY_ASSERT (pool_p->free_chunks_number < MEM_POOL_CHUNKS_NUMBER);
  JERRY_ASSERT (mem_pool_is_chunk_inside (pool_p, chunk_p));
  JERRY_ASSERT (((uintptr_t) chunk_p - (uintptr_t) MEM_POOL_SPACE_START (pool_p)) % MEM_POOL_CHUNK_SIZE == 0);

  mem_check_pool (pool_p);

  const size_t chunk_byte_offset = (size_t) (chunk_p - MEM_POOL_SPACE_START (pool_p));
  const mem_pool_chunk_index_t chunk_index = (mem_pool_chunk_index_t) (chunk_byte_offset / MEM_POOL_CHUNK_SIZE);

  mem_pool_chunk_index_t *next_free_chunk_index_p = (mem_pool_chunk_index_t*) chunk_p;

  *next_free_chunk_index_p = pool_p->first_free_chunk;

  pool_p->first_free_chunk = chunk_index;
  pool_p->free_chunks_number++;

  VALGRIND_NOACCESS_SPACE (next_free_chunk_index_p, MEM_POOL_CHUNK_SIZE);

  mem_check_pool (pool_p);
} /* mem_pool_free_chunk */
Ejemplo n.º 14
0
/**
 * Long path for mem_pools_alloc
 */
static void __attr_noinline___
mem_pools_alloc_longpath (void)
{
    mem_check_pools ();

    JERRY_ASSERT (mem_free_chunk_p == NULL);

    JERRY_ASSERT (MEM_POOL_SIZE <= mem_heap_get_chunked_block_data_size ());
    JERRY_ASSERT (MEM_POOL_CHUNKS_NUMBER >= 1);

    MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST ();
    mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t*) mem_heap_alloc_chunked_block (MEM_HEAP_ALLOC_LONG_TERM);

    if (mem_free_chunk_p != NULL)
    {
        /* some chunks were freed due to GC invoked by heap allocator */
        MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST ();
        mem_heap_free_block (pool_start_p);

        return;
    }

#ifndef JERRY_NDEBUG
    mem_free_chunks_number += MEM_POOL_CHUNKS_NUMBER;
#endif /* !JERRY_NDEBUG */

    JERRY_STATIC_ASSERT (MEM_POOL_CHUNK_SIZE % MEM_ALIGNMENT == 0);
    JERRY_STATIC_ASSERT (sizeof (mem_pool_chunk_t) == MEM_POOL_CHUNK_SIZE);
    JERRY_STATIC_ASSERT (sizeof (mem_pool_chunk_index_t) <= MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT ((mem_pool_chunk_index_t) MEM_POOL_CHUNKS_NUMBER == MEM_POOL_CHUNKS_NUMBER);
    JERRY_ASSERT (MEM_POOL_SIZE == MEM_POOL_CHUNKS_NUMBER * MEM_POOL_CHUNK_SIZE);

    JERRY_ASSERT (((uintptr_t) pool_start_p) % MEM_ALIGNMENT == 0);

    mem_pool_chunk_t *prev_free_chunk_p = NULL;

    for (mem_pool_chunk_index_t chunk_index = 0;
            chunk_index < MEM_POOL_CHUNKS_NUMBER;
            chunk_index++)
    {
        mem_pool_chunk_t *chunk_p = pool_start_p + chunk_index;

        if (prev_free_chunk_p != NULL)
        {
            prev_free_chunk_p->u.free.next_p = chunk_p;
        }

        prev_free_chunk_p = chunk_p;
    }

    prev_free_chunk_p->u.free.next_p = NULL;

#ifdef JERRY_VALGRIND
    for (mem_pool_chunk_index_t chunk_index = 0;
            chunk_index < MEM_POOL_CHUNKS_NUMBER;
            chunk_index++)
    {
        mem_pool_chunk_t *chunk_p = pool_start_p + chunk_index;

        VALGRIND_NOACCESS_SPACE (chunk_p, MEM_POOL_CHUNK_SIZE);
    }
#endif /* JERRY_VALGRIND */

    mem_free_chunk_p = pool_start_p;

    MEM_POOLS_STAT_ALLOC_POOL ();

    mem_check_pools ();
} /* mem_pools_alloc_longpath */
Ejemplo n.º 15
0
/**
 * Collect chunks from empty pools and free the pools
 */
void
mem_pools_collect_empty (void)
{
    /*
     * Hint magic number in header of pools with free pool-first chunks
     */
    const uint16_t hint_magic_num_value = 0x7e89;

    /*
     * Collection-time chunk lists
     */
    mem_pool_chunk_t *first_chunks_list_p = NULL;
    mem_pool_chunk_t *non_first_chunks_list_p = NULL;

    /*
     * At first stage collect free pool-first chunks to separate collection-time lists
     * and change their layout from mem_pool_chunk_t::u::free to mem_pool_chunk_t::u::pool_gc
     */
    {
        mem_pool_chunk_t tmp_header;
        tmp_header.u.free.next_p = mem_free_chunk_p;

        for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p,
                *prev_free_chunk_p = &tmp_header,
                *next_free_chunk_p;
                free_chunk_iter_p != NULL;
                free_chunk_iter_p = next_free_chunk_p)
        {
            mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

            VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);

            next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

            if (pool_start_p == free_chunk_iter_p)
            {
                /*
                 * The chunk is first at its pool
                 *
                 * Remove the chunk from common list of free chunks
                 */
                prev_free_chunk_p->u.free.next_p = next_free_chunk_p;

                /*
                 * Initialize pool-first chunk as pool header and it insert into list of free pool-first chunks
                 */
                free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL;
                free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */
                free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value;
                free_chunk_iter_p->u.pool_gc.traversal_check_flag = false;

                MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, first_chunks_list_p);
                first_chunks_list_p = free_chunk_iter_p;
            }
            else
            {
                prev_free_chunk_p = free_chunk_iter_p;
            }
        }

        mem_free_chunk_p = tmp_header.u.free.next_p;
    }

    if (first_chunks_list_p == NULL)
    {
        /* there are no empty pools */

        return;
    }

    /*
     * At second stage we collect all free non-pool-first chunks, for which corresponding pool-first chunks are free,
     * and link them into the corresponding mem_pool_chunk_t::u::pool_gc::free_list_cp list, while also maintaining
     * the corresponding mem_pool_chunk_t::u::pool_gc::free_chunks_num:
     *  - at first, for each non-pool-first free chunk we check whether traversal check flag is cleared in corresponding
     *    first chunk in the same pool, and move those chunks, for which the condition is true,
     *    to separate temporary list.
     *
     *  - then, we flip the traversal check flags for each of free pool-first chunks.
     *
     *  - at last, we perform almost the same as at first step, but check only non-pool-first chunks from the temporary
     *    list, and send the chunks, for which the corresponding traversal check flag is cleared, back to the common list
     *    of free chunks, and the rest chunks from the temporary list are linked to corresponding pool-first chunks.
     *    Also, counter of the linked free chunks is maintained in every free pool-first chunk.
     */
    {
        {
            mem_pool_chunk_t tmp_header;
            tmp_header.u.free.next_p = mem_free_chunk_p;

            for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p,
                    *prev_free_chunk_p = &tmp_header,
                    *next_free_chunk_p;
                    free_chunk_iter_p != NULL;
                    free_chunk_iter_p = next_free_chunk_p)
            {
                mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

                next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

                /*
                 * The magic number doesn't guarantee that the chunk is actually a free pool-first chunk,
                 * so we test the traversal check flag after flipping values of the flags in every
                 * free pool-first chunk.
                 */
                uint16_t magic_num_field;
                bool traversal_check_flag;

                mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag);

                /*
                 * During this traversal the flag in the free header chunks is in cleared state
                 */
                if (!traversal_check_flag
                        && magic_num_field == hint_magic_num_value)
                {
                    free_chunk_iter_p->u.free.next_p = non_first_chunks_list_p;
                    non_first_chunks_list_p = free_chunk_iter_p;

                    prev_free_chunk_p->u.free.next_p = next_free_chunk_p;
                }
                else
                {
                    prev_free_chunk_p = free_chunk_iter_p;
                }
            }

            mem_free_chunk_p = tmp_header.u.free.next_p;
        }

        {
            /*
             * Now, flip the traversal check flag in free pool-first chunks
             */
            for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p;
                    first_chunks_iter_p != NULL;
                    first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                          first_chunks_iter_p->u.pool_gc.next_first_cp))
            {
                JERRY_ASSERT (!first_chunks_iter_p->u.pool_gc.traversal_check_flag);

                first_chunks_iter_p->u.pool_gc.traversal_check_flag = true;
            }
        }

        {
            for (mem_pool_chunk_t *non_first_chunks_iter_p = non_first_chunks_list_p, *next_p;
                    non_first_chunks_iter_p != NULL;
                    non_first_chunks_iter_p = next_p)
            {
                next_p = non_first_chunks_iter_p->u.free.next_p;

                mem_pool_chunk_t *pool_start_p;
                pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (non_first_chunks_iter_p);

                uint16_t magic_num_field;
                bool traversal_check_flag;

                mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag);

                JERRY_ASSERT (magic_num_field == hint_magic_num_value);

#ifndef JERRY_DISABLE_HEAVY_DEBUG
                bool is_occured = false;

                for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p;
                        first_chunks_iter_p != NULL;
                        first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                              first_chunks_iter_p->u.pool_gc.next_first_cp))
                {
                    if (pool_start_p == first_chunks_iter_p)
                    {
                        is_occured = true;
                        break;
                    }
                }

                JERRY_ASSERT (is_occured == traversal_check_flag);
#endif /* !JERRY_DISABLE_HEAVY_DEBUG */

                /*
                 * During this traversal the flag in the free header chunks is in set state
                 *
                 * If the flag is set, it is guaranteed that the pool-first chunk,
                 * from the same pool, as the current non-pool-first chunk, is free
                 * and is placed in the corresponding list of free pool-first chunks.
                 */
                if (traversal_check_flag)
                {
                    pool_start_p->u.pool_gc.free_chunks_num++;

                    non_first_chunks_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                            pool_start_p->u.pool_gc.free_list_cp);
                    MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, non_first_chunks_iter_p);
                }
                else
                {
                    non_first_chunks_iter_p->u.free.next_p = mem_free_chunk_p;
                    mem_free_chunk_p = non_first_chunks_iter_p;
                }
            }
        }

        non_first_chunks_list_p = NULL;
    }

    /*
     * At third stage we check each free pool-first chunk in collection-time list for counted
     * number of free chunks in the pool, containing the chunk.
     *
     * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed,
     * otherwise - free chunks of the pool are returned to the common list of free chunks.
     */
    for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p, *next_p;
            first_chunks_iter_p != NULL;
            first_chunks_iter_p = next_p)
    {
        next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                     first_chunks_iter_p->u.pool_gc.next_first_cp);

        JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.hint_magic_num == hint_magic_num_value);
        JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.traversal_check_flag);
        JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.free_chunks_num <= MEM_POOL_CHUNKS_NUMBER);

        if (first_chunks_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER)
        {
#ifndef JERRY_NDEBUG
            mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER;
#endif /* !JERRY_NDEBUG */

            MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST ();
            mem_heap_free_block (first_chunks_iter_p);

            MEM_POOLS_STAT_FREE_POOL ();
        }
        else
        {
            mem_pool_chunk_t *first_chunk_p = first_chunks_iter_p;

            /*
             * Convert layout of first chunk from collection-time pool-first chunk's layout to the common free chunk layout
             */
            first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                           first_chunks_iter_p->u.pool_gc.free_list_cp);

            /*
             * Link local pool's list of free chunks into the common list of free chunks
             */
            for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p;
                    ;
                    pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p)
            {
                JERRY_ASSERT (pool_chunks_iter_p != NULL);

                if (pool_chunks_iter_p->u.free.next_p == NULL)
                {
                    pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p;

                    break;
                }
            }

            mem_free_chunk_p = first_chunk_p;
        }
    }

#ifdef JERRY_VALGRIND
    /*
     * Valgrind-mode specific pass that marks all free chunks inaccessible
     */
    for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p;
            free_chunk_iter_p != NULL;
            free_chunk_iter_p = next_free_chunk_p)
    {
        next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

        VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);
    }
#endif /* JERRY_VALGRIND */
} /* mem_pools_collect_empty */
Ejemplo n.º 16
0
/**
 * Free the memory block.
 */
void
mem_heap_free_block (void *ptr) /**< pointer to beginning of data space of the block */
{
  uint8_t *uint8_ptr = (uint8_t*) ptr;

  /* checking that uint8_ptr points to the heap */
  JERRY_ASSERT (uint8_ptr >= mem_heap.heap_start
                && uint8_ptr <= mem_heap.heap_start + mem_heap.heap_size);

  mem_check_heap ();

  mem_block_header_t *block_p = (mem_block_header_t*) uint8_ptr - 1;

  VALGRIND_DEFINED_STRUCT (block_p);

  mem_block_header_t *prev_block_p = mem_get_next_block_by_direction (block_p, MEM_DIRECTION_PREV);
  mem_block_header_t *next_block_p = mem_get_next_block_by_direction (block_p, MEM_DIRECTION_NEXT);

  JERRY_ASSERT (mem_heap.limit >= mem_heap.allocated_chunks * MEM_HEAP_CHUNK_SIZE);

  size_t chunks = mem_get_block_chunks_count_from_data_size (mem_get_block_data_space_size (block_p));
  JERRY_ASSERT (mem_heap.allocated_chunks >= chunks);
  mem_heap.allocated_chunks -= chunks;

  if (mem_heap.allocated_chunks * MEM_HEAP_CHUNK_SIZE * 3 <= mem_heap.limit)
  {
    mem_heap.limit /= 2;
  }
  else if (mem_heap.allocated_chunks * MEM_HEAP_CHUNK_SIZE + CONFIG_MEM_HEAP_DESIRED_LIMIT <= mem_heap.limit)
  {
    mem_heap.limit -= CONFIG_MEM_HEAP_DESIRED_LIMIT;
  }

  JERRY_ASSERT (mem_heap.limit >= mem_heap.allocated_chunks * MEM_HEAP_CHUNK_SIZE);

  MEM_HEAP_STAT_FREE_BLOCK (block_p);

  VALGRIND_NOACCESS_SPACE (uint8_ptr, block_p->allocated_bytes);

  JERRY_ASSERT (!mem_is_block_free (block_p));

  /* marking the block free */
  block_p->allocated_bytes = 0;
  block_p->length_type = mem_block_length_type_t::GENERAL;

  if (next_block_p != NULL)
  {
    VALGRIND_DEFINED_STRUCT (next_block_p);

    if (mem_is_block_free (next_block_p))
    {
      /* merge with the next block */
      MEM_HEAP_STAT_FREE_BLOCK_MERGE ();

      mem_block_header_t *next_next_block_p = mem_get_next_block_by_direction (next_block_p, MEM_DIRECTION_NEXT);

      VALGRIND_NOACCESS_STRUCT (next_block_p);

      next_block_p = next_next_block_p;

      VALGRIND_DEFINED_STRUCT (next_block_p);

      mem_set_block_next (block_p, next_block_p);
      if (next_block_p != NULL)
      {
        mem_set_block_prev (next_block_p, block_p);
      }
      else
      {
        mem_heap.last_block_p = block_p;
      }
    }

    VALGRIND_NOACCESS_STRUCT (next_block_p);
  }

  if (prev_block_p != NULL)
  {
    VALGRIND_DEFINED_STRUCT (prev_block_p);

    if (mem_is_block_free (prev_block_p))
    {
      /* merge with the previous block */
      MEM_HEAP_STAT_FREE_BLOCK_MERGE ();

      mem_set_block_next (prev_block_p, next_block_p);
      if (next_block_p != NULL)
      {
        VALGRIND_DEFINED_STRUCT (next_block_p);

        mem_block_header_t* prev_block_p = mem_get_next_block_by_direction (block_p, MEM_DIRECTION_PREV);
        mem_set_block_prev (next_block_p, prev_block_p);

        VALGRIND_NOACCESS_STRUCT (next_block_p);
      }
      else
      {
        mem_heap.last_block_p = prev_block_p;
      }
    }

    VALGRIND_NOACCESS_STRUCT (prev_block_p);
  }

  VALGRIND_NOACCESS_STRUCT (block_p);

  mem_check_heap ();
} /* mem_heap_free_block */
Ejemplo n.º 17
0
void
mem_pools_collect_empty (void)
{
  /*
   * Hint magic number in header of pools with free first chunks
   */
  const uint16_t hint_magic_num_value = 0x7e89;

  /*
   * At first pass collect pointers to those of free chunks that are first at their pools
   * to separate lists (collection-time pool lists) and change them to headers of corresponding pools
   */

  /*
   * Number of collection-time pool lists
   */
  constexpr uint32_t pool_lists_number = 8;

  /*
   * Collection-time pool lists
   */
  mem_pool_chunk_t *pool_lists_p[pool_lists_number];
  for (uint32_t i = 0; i < pool_lists_number; i++)
  {
    pool_lists_p[i] = NULL;
  }

  /*
   * Number of the pools, included into the lists
   */
  uint32_t pools_in_lists_number = 0;

  for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p;
       free_chunk_iter_p != NULL;
       free_chunk_iter_p = next_free_chunk_p)
  {
    mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

    VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);

    next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

    if (pool_start_p == free_chunk_iter_p)
    {
      /*
       * The chunk is first at its pool
       *
       * Remove the chunk from common list of free chunks
       */
      if (prev_free_chunk_p == NULL)
      {
        JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p);

        mem_free_chunk_p = next_free_chunk_p;
      }
      else
      {
        prev_free_chunk_p->u.free.next_p = next_free_chunk_p;
      }

      pools_in_lists_number++;

      uint8_t list_id = pools_in_lists_number % pool_lists_number;

      /*
       * Initialize pool header and insert the pool into one of lists
       */
      free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL;
      free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */
      free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value;
      free_chunk_iter_p->u.pool_gc.list_id = list_id;

      MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, pool_lists_p[list_id]);
      pool_lists_p[list_id] = free_chunk_iter_p;
    }
    else
    {
      prev_free_chunk_p = free_chunk_iter_p;
    }
  }

  if (pools_in_lists_number == 0)
  {
    /* there are no empty pools */

    return;
  }

  /*
   * At second pass we check for all rest free chunks whether they are in pools that were included into
   * collection-time pool lists.
   *
   * For each of the chunk, try to find the corresponding pool through iterating the list.
   *
   * If pool is found in a list (so, first chunk of the pool is free) for a chunk, increment counter
   * of free chunks in the pools, and move the chunk from global free chunks list to collection-time
   * local list of corresponding pool's free chunks.
   */
  for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p;
       free_chunk_iter_p != NULL;
       free_chunk_iter_p = next_free_chunk_p)
  {
    mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p);

    next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

    bool is_chunk_moved_to_local_list = false;

#ifdef JERRY_VALGRIND
    /*
     * If the chunk is not free, there may be undefined bytes at hint_magic_num and list_id fields.
     *
     * Although, it is correct for the routine, valgrind issues warning about using uninitialized data
     * in conditional expression. To suppress the false-positive warning, the chunk is temporarily marked
     * as defined, and after reading hint magic number and list identifier, valgrind state of the chunk is restored.
     */
    uint8_t vbits[MEM_POOL_CHUNK_SIZE];
    unsigned status;

    status = VALGRIND_GET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT (status == 0 || status == 1);

    VALGRIND_DEFINED_SPACE (pool_start_p, MEM_POOL_CHUNK_SIZE);
#endif /* JERRY_VALGRIND */

    /*
     * The magic number doesn't guarantee that the chunk is actually a pool header,
     * so it is only optimization to reduce number of unnecessary iterations over
     * pool lists.
     */
    uint16_t magic_num_field = pool_start_p->u.pool_gc.hint_magic_num;
    uint8_t id_to_search_in = pool_start_p->u.pool_gc.list_id;

#ifdef JERRY_VALGRIND
    status = VALGRIND_SET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE);
    JERRY_ASSERT (status == 0 || status == 1);
#endif /* JERRY_VALGRIND */

    if (magic_num_field == hint_magic_num_value)
    {
      /*
       * Maybe, the first chunk is free.
       *
       * If it is so, it is included in the list of pool's first free chunks.
       */

      if (id_to_search_in < pool_lists_number)
      {
        for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[id_to_search_in];
             pool_list_iter_p != NULL;
             pool_list_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                                    pool_list_iter_p->u.pool_gc.next_first_cp))
        {
          if (pool_list_iter_p == pool_start_p)
          {
            /*
             * The first chunk is actually free.
             *
             * So, incrementing free chunks counter in it.
             */
            pool_start_p->u.pool_gc.free_chunks_num++;

            /*
             * It is possible that the corresponding pool is empty
             *
             * Moving current chunk from common list of free chunks to temporary list, local to the pool
             */
            if (prev_free_chunk_p == NULL)
            {
              JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p);

              mem_free_chunk_p = next_free_chunk_p;
            }
            else
            {
              prev_free_chunk_p->u.free.next_p = next_free_chunk_p;
            }

            free_chunk_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                                                   pool_start_p->u.pool_gc.free_list_cp);
            MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, free_chunk_iter_p);

            is_chunk_moved_to_local_list = true;

            break;
          }
        }
      }
    }

    if (!is_chunk_moved_to_local_list)
    {
      prev_free_chunk_p = free_chunk_iter_p;
    }
  }

  /*
   * At third pass we check each pool in collection-time pool lists free for counted
   * number of free chunks in the pool.
   *
   * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed,
   * otherwise - free chunks of the pool are returned to common list of free chunks.
   */
  for (uint8_t list_id = 0; list_id < pool_lists_number; list_id++)
  {
    for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[list_id], *next_p;
         pool_list_iter_p != NULL;
         pool_list_iter_p = next_p)
    {
      next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                   pool_list_iter_p->u.pool_gc.next_first_cp);

      if (pool_list_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER)
      {
#ifndef JERRY_NDEBUG
        mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER;
#endif /* !JERRY_NDEBUG */

        MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST ();
        mem_heap_free_block (pool_list_iter_p);

        MEM_POOLS_STAT_FREE_POOL ();
      }
      else
      {
        mem_pool_chunk_t *first_chunk_p = pool_list_iter_p;

        /*
         * Convert layout of first chunk from collection-time pool header to common free chunk
         */
        first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t,
                                                           pool_list_iter_p->u.pool_gc.free_list_cp);

        /*
         * Link local pool's list of free chunks into global list of free chunks
         */
        for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p;
             ;
             pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p)
        {
          JERRY_ASSERT (pool_chunks_iter_p != NULL);

          if (pool_chunks_iter_p->u.free.next_p == NULL)
          {
            pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p;

            break;
          }
        }

        mem_free_chunk_p = first_chunk_p;
      }
    }
  }

#ifdef JERRY_VALGRIND
  /*
   * Valgrind-mode specific pass that marks all free chunks inaccessible
   */
  for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p;
       free_chunk_iter_p != NULL;
       free_chunk_iter_p = next_free_chunk_p)
  {
    next_free_chunk_p = free_chunk_iter_p->u.free.next_p;

    VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE);
  }
#endif /* JERRY_VALGRIND */
} /* mem_pools_collect_empty */
Ejemplo n.º 18
0
/**
 * Free the memory block.
 */
void __attr_hot___
jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the block */
                      const size_t size) /**< size of allocated region */
{
  VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST;

  /* checking that ptr points to the heap */
  JERRY_ASSERT (jmem_is_heap_pointer (ptr));
  JERRY_ASSERT (size > 0);
  JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));

  VALGRIND_FREYA_FREELIKE_SPACE (ptr);
  VALGRIND_NOACCESS_SPACE (ptr, size);
  JMEM_HEAP_STAT_FREE_ITER ();

  jmem_heap_free_t *block_p = (jmem_heap_free_t *) ptr;
  jmem_heap_free_t *prev_p;
  jmem_heap_free_t *next_p;

  VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));

  if (block_p > JERRY_CONTEXT (jmem_heap_list_skip_p))
  {
    prev_p = JERRY_CONTEXT (jmem_heap_list_skip_p);
    JMEM_HEAP_STAT_SKIP ();
  }
  else
  {
    prev_p = &JERRY_HEAP_CONTEXT (first);
    JMEM_HEAP_STAT_NONSKIP ();
  }

  JERRY_ASSERT (jmem_is_heap_pointer (block_p));
  const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);

  VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
  // Find position of region in the list
  while (prev_p->next_offset < block_offset)
  {
    jmem_heap_free_t *const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
    JERRY_ASSERT (jmem_is_heap_pointer (next_p));

    VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
    VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
    prev_p = next_p;

    JMEM_HEAP_STAT_FREE_ITER ();
  }

  next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
  VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));

  /* Realign size */
  const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;

  VALGRIND_DEFINED_SPACE (block_p, sizeof (jmem_heap_free_t));
  VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
  // Update prev
  if (jmem_heap_get_region_end (prev_p) == block_p)
  {
    // Can be merged
    prev_p->size += (uint32_t) aligned_size;
    VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t));
    block_p = prev_p;
  }
  else
  {
    block_p->size = (uint32_t) aligned_size;
    prev_p->next_offset = block_offset;
  }

  VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
  // Update next
  if (jmem_heap_get_region_end (block_p) == next_p)
  {
    if (unlikely (next_p == JERRY_CONTEXT (jmem_heap_list_skip_p)))
    {
      JERRY_CONTEXT (jmem_heap_list_skip_p) = block_p;
    }

    // Can be merged
    block_p->size += next_p->size;
    block_p->next_offset = next_p->next_offset;

  }
  else
  {
    block_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
  }

  JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;

  VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
  VALGRIND_NOACCESS_SPACE (block_p, size);
  VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t));

  JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) > 0);
  JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_size;

  while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_MEM_HEAP_DESIRED_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
  {
    JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_MEM_HEAP_DESIRED_LIMIT;
  }

  VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
  JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
  JMEM_HEAP_STAT_FREE (size);
} /* jmem_heap_free_block */
Ejemplo n.º 19
0
/**
 * Allocation of memory region.
 *
 * See also:
 *          jmem_heap_alloc_block
 *
 * @return pointer to allocated memory block - if allocation is successful,
 *         NULL - if there is not enough memory.
 */
static __attr_hot___
void *jmem_heap_alloc_block_internal (const size_t size)
{
  // Align size
  const size_t required_size = ((size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT) * JMEM_ALIGNMENT;
  jmem_heap_free_t *data_space_p = NULL;

  VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));

  // Fast path for 8 byte chunks, first region is guaranteed to be sufficient
  if (required_size == JMEM_ALIGNMENT
      && likely (JERRY_HEAP_CONTEXT (first).next_offset != JMEM_HEAP_END_OF_LIST))
  {
    data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
    JERRY_ASSERT (jmem_is_heap_pointer (data_space_p));

    VALGRIND_DEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
    JERRY_CONTEXT (jmem_heap_allocated_size) += JMEM_ALIGNMENT;
    JMEM_HEAP_STAT_ALLOC_ITER ();

    if (data_space_p->size == JMEM_ALIGNMENT)
    {
      JERRY_HEAP_CONTEXT (first).next_offset = data_space_p->next_offset;
    }
    else
    {
      JERRY_ASSERT (data_space_p->size > JMEM_ALIGNMENT);

      jmem_heap_free_t *remaining_p;
      remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset) + 1;

      VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
      remaining_p->size = data_space_p->size - JMEM_ALIGNMENT;
      remaining_p->next_offset = data_space_p->next_offset;
      VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));

      JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
    }

    VALGRIND_UNDEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));

    if (unlikely (data_space_p == JERRY_CONTEXT (jmem_heap_list_skip_p)))
    {
      JERRY_CONTEXT (jmem_heap_list_skip_p) = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
    }
  }
  // Slow path for larger regions
  else
  {
    uint32_t current_offset = JERRY_HEAP_CONTEXT (first).next_offset;
    jmem_heap_free_t *prev_p = &JERRY_HEAP_CONTEXT (first);

    while (current_offset != JMEM_HEAP_END_OF_LIST)
    {
      jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (current_offset);
      JERRY_ASSERT (jmem_is_heap_pointer (current_p));
      VALGRIND_DEFINED_SPACE (current_p, sizeof (jmem_heap_free_t));
      JMEM_HEAP_STAT_ALLOC_ITER ();

      const uint32_t next_offset = current_p->next_offset;
      JERRY_ASSERT (next_offset == JMEM_HEAP_END_OF_LIST
                    || jmem_is_heap_pointer (JMEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset)));

      if (current_p->size >= required_size)
      {
        // Region is sufficiently big, store address
        data_space_p = current_p;
        JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;

        // Region was larger than necessary
        if (current_p->size > required_size)
        {
          // Get address of remaining space
          jmem_heap_free_t *const remaining_p = (jmem_heap_free_t *) ((uint8_t *) current_p + required_size);

          // Update metadata
          VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
          remaining_p->size = current_p->size - (uint32_t) required_size;
          remaining_p->next_offset = next_offset;
          VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));

          // Update list
          VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
          prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
          VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
        }
        // Block is an exact fit
        else
        {
          // Remove the region from the list
          VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
          prev_p->next_offset = next_offset;
          VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
        }

        JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;

        // Found enough space
        break;
      }

      VALGRIND_NOACCESS_SPACE (current_p, sizeof (jmem_heap_free_t));
      // Next in list
      prev_p = current_p;
      current_offset = next_offset;
    }
  }

  while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
  {
    JERRY_CONTEXT (jmem_heap_limit) += CONFIG_MEM_HEAP_DESIRED_LIMIT;
  }

  VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));

  if (unlikely (!data_space_p))
  {
    return NULL;
  }

  JERRY_ASSERT ((uintptr_t) data_space_p % JMEM_ALIGNMENT == 0);
  VALGRIND_UNDEFINED_SPACE (data_space_p, size);
  JMEM_HEAP_STAT_ALLOC (size);

  return (void *) data_space_p;
} /* jmem_heap_finalize */
Ejemplo n.º 20
0
/**
 * Finalize heap
 */
void jmem_heap_finalize (void)
{
  JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) == 0);
  VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_t));
} /* jmem_heap_finalize */