/** * Collect empty pool chunks */ void jmem_pools_collect_empty () { jmem_pools_chunk_t *chunk_p = JERRY_CONTEXT (jmem_free_8_byte_chunk_p); JERRY_CONTEXT (jmem_free_8_byte_chunk_p) = NULL; while (chunk_p) { VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t)); jmem_pools_chunk_t *const next_p = chunk_p->next_p; VALGRIND_NOACCESS_SPACE (chunk_p, sizeof (jmem_pools_chunk_t)); jmem_heap_free_block (chunk_p, 8); JMEM_POOLS_STAT_DEALLOC (); chunk_p = next_p; } #ifdef JERRY_CPOINTER_32_BIT chunk_p = JERRY_CONTEXT (jmem_free_16_byte_chunk_p); JERRY_CONTEXT (jmem_free_16_byte_chunk_p) = NULL; while (chunk_p) { VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t)); jmem_pools_chunk_t *const next_p = chunk_p->next_p; VALGRIND_NOACCESS_SPACE (chunk_p, sizeof (jmem_pools_chunk_t)); jmem_heap_free_block (chunk_p, 16); JMEM_POOLS_STAT_DEALLOC (); chunk_p = next_p; } #endif /* JERRY_CPOINTER_32_BIT */ } /* jmem_pools_collect_empty */
/** * Allocate a chunk of specified size * * @return pointer to allocated chunk, if allocation was successful, * or NULL - if not enough memory. */ void * __attr_hot___ __attr_always_inline___ jmem_pools_alloc (size_t size) /**< size of the chunk */ { #ifdef JMEM_GC_BEFORE_EACH_ALLOC jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH); #endif /* JMEM_GC_BEFORE_EACH_ALLOC */ if (size <= 8) { if (JERRY_CONTEXT (jmem_free_8_byte_chunk_p) != NULL) { const jmem_pools_chunk_t *const chunk_p = JERRY_CONTEXT (jmem_free_8_byte_chunk_p); JMEM_POOLS_STAT_REUSE (); VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t)); JERRY_CONTEXT (jmem_free_8_byte_chunk_p) = chunk_p->next_p; VALGRIND_UNDEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t)); return (void *) chunk_p; } else { JMEM_POOLS_STAT_NEW_ALLOC (); return (void *) jmem_heap_alloc_block (8); } } #ifdef JERRY_CPOINTER_32_BIT JERRY_ASSERT (size <= 16); if (JERRY_CONTEXT (jmem_free_16_byte_chunk_p) != NULL) { const jmem_pools_chunk_t *const chunk_p = JERRY_CONTEXT (jmem_free_16_byte_chunk_p); JMEM_POOLS_STAT_REUSE (); VALGRIND_DEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t)); JERRY_CONTEXT (jmem_free_16_byte_chunk_p) = chunk_p->next_p; VALGRIND_UNDEFINED_SPACE (chunk_p, sizeof (jmem_pools_chunk_t)); return (void *) chunk_p; } else { JMEM_POOLS_STAT_NEW_ALLOC (); return (void *) jmem_heap_alloc_block (16); } #else /* !JERRY_CPOINTER_32_BIT */ JERRY_UNREACHABLE (); return NULL; #endif } /* jmem_pools_alloc */
/** * Check pool state consistency */ static void mem_check_pool (mem_pool_state_t __attr_unused___ *pool_p) /**< pool (unused #ifdef JERRY_DISABLE_HEAVY_DEBUG) */ { #ifndef JERRY_DISABLE_HEAVY_DEBUG JERRY_ASSERT (pool_p->free_chunks_number <= MEM_POOL_CHUNKS_NUMBER); size_t met_free_chunks_number = 0; mem_pool_chunk_index_t chunk_index = pool_p->first_free_chunk; while (chunk_index != MEM_POOL_CHUNKS_NUMBER) { uint8_t *chunk_p = MEM_POOL_CHUNK_ADDRESS (pool_p, chunk_index); mem_pool_chunk_index_t *next_free_chunk_index_p = (mem_pool_chunk_index_t*) chunk_p; met_free_chunks_number++; VALGRIND_DEFINED_SPACE (next_free_chunk_index_p, MEM_POOL_CHUNK_SIZE); chunk_index = *next_free_chunk_index_p; VALGRIND_NOACCESS_SPACE (next_free_chunk_index_p, MEM_POOL_CHUNK_SIZE); } JERRY_ASSERT (met_free_chunks_number == pool_p->free_chunks_number); #else /* !JERRY_DISABLE_HEAVY_DEBUG */ (void) pool_p; #endif /* JERRY_DISABLE_HEAVY_DEBUG */ } /* mem_check_pool */
/** * Free the chunk */ inline void __attr_hot___ __attr_always_inline___ jmem_pools_free (void *chunk_p, /**< pointer to the chunk */ size_t size) /**< size of the chunk */ { JERRY_ASSERT (chunk_p != NULL); jmem_pools_chunk_t *const chunk_to_free_p = (jmem_pools_chunk_t *) chunk_p; VALGRIND_DEFINED_SPACE (chunk_to_free_p, size); if (size <= 8) { chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_8_byte_chunk_p); JERRY_CONTEXT (jmem_free_8_byte_chunk_p) = chunk_to_free_p; } else { #ifdef JERRY_CPOINTER_32_BIT JERRY_ASSERT (size <= 16); chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_16_byte_chunk_p); JERRY_CONTEXT (jmem_free_16_byte_chunk_p) = chunk_to_free_p; #else /* !JERRY_CPOINTER_32_BIT */ JERRY_UNREACHABLE (); #endif /* JERRY_CPOINTER_32_BIT */ } VALGRIND_NOACCESS_SPACE (chunk_to_free_p, size); JMEM_POOLS_STAT_FREE_POOL (); } /* jmem_pools_free */
/** * Allocate a chunk of specified size * * @return pointer to allocated chunk, if allocation was successful, * or NULL - if not enough memory. */ inline void * __attribute__((hot)) __attr_always_inline___ jmem_pools_alloc (void) { #ifdef JMEM_GC_BEFORE_EACH_ALLOC jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH); #endif /* JMEM_GC_BEFORE_EACH_ALLOC */ if (JERRY_CONTEXT (jmem_free_chunk_p) != NULL) { const jmem_pools_chunk_t *const chunk_p = JERRY_CONTEXT (jmem_free_chunk_p); JMEM_POOLS_STAT_REUSE (); VALGRIND_DEFINED_SPACE (chunk_p, JMEM_POOL_CHUNK_SIZE); JERRY_CONTEXT (jmem_free_chunk_p) = chunk_p->next_p; VALGRIND_UNDEFINED_SPACE (chunk_p, JMEM_POOL_CHUNK_SIZE); return (void *) chunk_p; } else { JMEM_POOLS_STAT_NEW_ALLOC (); return (void *) jmem_heap_alloc_block (JMEM_POOL_CHUNK_SIZE); } } /* jmem_pools_alloc */
/** * Finalize heap */ void mem_heap_finalize (void) { VALGRIND_DEFINED_SPACE (mem_heap.heap_start, mem_heap.heap_size); JERRY_ASSERT (mem_heap.first_block_p == mem_heap.last_block_p); JERRY_ASSERT (mem_is_block_free (mem_heap.first_block_p)); VALGRIND_NOACCESS_SPACE (mem_heap.heap_start, mem_heap.heap_size); memset (&mem_heap, 0, sizeof (mem_heap)); } /* mem_heap_finalize */
jmem_pools_free (void *chunk_p) /**< pointer to the chunk */ { jmem_pools_chunk_t *const chunk_to_free_p = (jmem_pools_chunk_t *) chunk_p; VALGRIND_DEFINED_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE); chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_chunk_p); JERRY_CONTEXT (jmem_free_chunk_p) = chunk_to_free_p; VALGRIND_NOACCESS_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE); JMEM_POOLS_STAT_FREE_POOL (); } /* jmem_pools_free */
/** * Collect empty pool chunks */ void jmem_pools_collect_empty () { while (JERRY_CONTEXT (jmem_free_chunk_p)) { VALGRIND_DEFINED_SPACE (JERRY_CONTEXT (jmem_free_chunk_p), sizeof (jmem_pools_chunk_t)); jmem_pools_chunk_t *const next_p = JERRY_CONTEXT (jmem_free_chunk_p)->next_p; VALGRIND_NOACCESS_SPACE (JERRY_CONTEXT (jmem_free_chunk_p), sizeof (jmem_pools_chunk_t)); jmem_heap_free_block (JERRY_CONTEXT (jmem_free_chunk_p), JMEM_POOL_CHUNK_SIZE); JMEM_POOLS_STAT_DEALLOC (); JERRY_CONTEXT (jmem_free_chunk_p) = next_p; } } /* jmem_pools_collect_empty */
/** * Allocate a chunk of specified size * * @return pointer to allocated chunk, if allocation was successful, * or NULL - if not enough memory. */ uint8_t* __attr_always_inline___ mem_pools_alloc (void) { #ifdef MEM_GC_BEFORE_EACH_ALLOC mem_run_try_to_give_memory_back_callbacks (MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH); #endif /* MEM_GC_BEFORE_EACH_ALLOC */ mem_check_pools (); do { if (mem_free_chunk_p != NULL) { mem_pool_chunk_t *chunk_p = mem_free_chunk_p; MEM_POOLS_STAT_ALLOC_CHUNK (); #ifndef JERRY_NDEBUG mem_free_chunks_number--; #endif /* !JERRY_NDEBUG */ VALGRIND_DEFINED_SPACE (chunk_p, MEM_POOL_CHUNK_SIZE); mem_free_chunk_p = chunk_p->u.free.next_p; VALGRIND_UNDEFINED_SPACE (chunk_p, MEM_POOL_CHUNK_SIZE); mem_check_pools (); VALGRIND_FREYA_MALLOCLIKE_SPACE (chunk_p, MEM_POOL_CHUNK_SIZE); return (uint8_t *) chunk_p; } else { mem_pools_alloc_longpath (); /* the assertion guarantees that there will be no more than two iterations */ JERRY_ASSERT (mem_free_chunk_p != NULL); } } while (true); } /* mem_pools_alloc */
/** * Check correctness of pool allocator state */ static void mem_check_pools (void) { #ifndef JERRY_DISABLE_HEAVY_DEBUG size_t free_chunks_met = 0; for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); free_chunks_met++; } JERRY_ASSERT (free_chunks_met == mem_free_chunks_number); #endif /* !JERRY_DISABLE_HEAVY_DEBUG */ } /* mem_check_pools */
/** * Helper for reading magic number and traversal check flag fields of a pool-first chunk, * that suppresses valgrind's warnings about undefined values. * * A pool-first chunk can be either allocated or free. * * As chunks are marked as undefined upon allocation, some of chunks can still be * fully or partially marked as undefined. * * Nevertheless, the fields are read and their values are used to determine * whether the chunk is actually free pool-first chunk. * * See also: * Description of collection algorithm in mem_pools_collect_empty */ static void __attr_always_inline___ mem_pools_collect_read_magic_num_and_flag (mem_pool_chunk_t *pool_first_chunk_p, /**< a pool-first chunk */ uint16_t *out_magic_num_field_value_p, /**< out: value of magic num field, * read from the chunk */ bool *out_traversal_check_flag_p) /**< out: value of traversal check flag * field, read from the chunk */ { JERRY_ASSERT (pool_first_chunk_p != NULL); JERRY_ASSERT (out_magic_num_field_value_p != NULL); JERRY_ASSERT (out_traversal_check_flag_p != NULL); #ifdef JERRY_VALGRIND /* * If the chunk is not free, there may be undefined bytes at hint_magic_num and traversal_check_flag fields. * * Although, it is correct for the routine, valgrind issues warning about using uninitialized data * in conditional expression. To suppress the false-positive warning, the chunk is temporarily marked * as defined, and after reading hint magic number and list identifier, valgrind state of the chunk is restored. */ uint8_t vbits[MEM_POOL_CHUNK_SIZE]; unsigned status; status = VALGRIND_GET_VBITS (pool_first_chunk_p, vbits, MEM_POOL_CHUNK_SIZE); JERRY_ASSERT (status == 0 || status == 1); VALGRIND_DEFINED_SPACE (pool_first_chunk_p, MEM_POOL_CHUNK_SIZE); #endif /* JERRY_VALGRIND */ uint16_t magic_num_field = pool_first_chunk_p->u.pool_gc.hint_magic_num; bool traversal_check_flag = pool_first_chunk_p->u.pool_gc.traversal_check_flag; #ifdef JERRY_VALGRIND status = VALGRIND_SET_VBITS (pool_first_chunk_p, vbits, MEM_POOL_CHUNK_SIZE); JERRY_ASSERT (status == 0 || status == 1); #endif /* JERRY_VALGRIND */ *out_magic_num_field_value_p = magic_num_field; *out_traversal_check_flag_p = traversal_check_flag; } /* mem_pools_collect_read_magic_num_and_flag */
/** * Allocate a chunk in the pool */ uint8_t* mem_pool_alloc_chunk (mem_pool_state_t *pool_p) /**< pool */ { mem_check_pool (pool_p); JERRY_ASSERT (pool_p->free_chunks_number != 0); JERRY_ASSERT (pool_p->first_free_chunk < MEM_POOL_CHUNKS_NUMBER); mem_pool_chunk_index_t chunk_index = pool_p->first_free_chunk; uint8_t *chunk_p = MEM_POOL_CHUNK_ADDRESS (pool_p, chunk_index); VALGRIND_DEFINED_SPACE (chunk_p, MEM_POOL_CHUNK_SIZE); mem_pool_chunk_index_t *next_free_chunk_index_p = (mem_pool_chunk_index_t*) chunk_p; pool_p->first_free_chunk = *next_free_chunk_index_p; pool_p->free_chunks_number--; VALGRIND_UNDEFINED_SPACE (chunk_p, MEM_POOL_CHUNK_SIZE); mem_check_pool (pool_p); return chunk_p; } /* mem_pool_alloc_chunk */
/** * Collect chunks from empty pools and free the pools */ void mem_pools_collect_empty (void) { /* * Hint magic number in header of pools with free pool-first chunks */ const uint16_t hint_magic_num_value = 0x7e89; /* * Collection-time chunk lists */ mem_pool_chunk_t *first_chunks_list_p = NULL; mem_pool_chunk_t *non_first_chunks_list_p = NULL; /* * At first stage collect free pool-first chunks to separate collection-time lists * and change their layout from mem_pool_chunk_t::u::free to mem_pool_chunk_t::u::pool_gc */ { mem_pool_chunk_t tmp_header; tmp_header.u.free.next_p = mem_free_chunk_p; for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p, *prev_free_chunk_p = &tmp_header, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; if (pool_start_p == free_chunk_iter_p) { /* * The chunk is first at its pool * * Remove the chunk from common list of free chunks */ prev_free_chunk_p->u.free.next_p = next_free_chunk_p; /* * Initialize pool-first chunk as pool header and it insert into list of free pool-first chunks */ free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL; free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */ free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value; free_chunk_iter_p->u.pool_gc.traversal_check_flag = false; MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, first_chunks_list_p); first_chunks_list_p = free_chunk_iter_p; } else { prev_free_chunk_p = free_chunk_iter_p; } } mem_free_chunk_p = tmp_header.u.free.next_p; } if (first_chunks_list_p == NULL) { /* there are no empty pools */ return; } /* * At second stage we collect all free non-pool-first chunks, for which corresponding pool-first chunks are free, * and link them into the corresponding mem_pool_chunk_t::u::pool_gc::free_list_cp list, while also maintaining * the corresponding mem_pool_chunk_t::u::pool_gc::free_chunks_num: * - at first, for each non-pool-first free chunk we check whether traversal check flag is cleared in corresponding * first chunk in the same pool, and move those chunks, for which the condition is true, * to separate temporary list. * * - then, we flip the traversal check flags for each of free pool-first chunks. * * - at last, we perform almost the same as at first step, but check only non-pool-first chunks from the temporary * list, and send the chunks, for which the corresponding traversal check flag is cleared, back to the common list * of free chunks, and the rest chunks from the temporary list are linked to corresponding pool-first chunks. * Also, counter of the linked free chunks is maintained in every free pool-first chunk. */ { { mem_pool_chunk_t tmp_header; tmp_header.u.free.next_p = mem_free_chunk_p; for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p, *prev_free_chunk_p = &tmp_header, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; /* * The magic number doesn't guarantee that the chunk is actually a free pool-first chunk, * so we test the traversal check flag after flipping values of the flags in every * free pool-first chunk. */ uint16_t magic_num_field; bool traversal_check_flag; mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag); /* * During this traversal the flag in the free header chunks is in cleared state */ if (!traversal_check_flag && magic_num_field == hint_magic_num_value) { free_chunk_iter_p->u.free.next_p = non_first_chunks_list_p; non_first_chunks_list_p = free_chunk_iter_p; prev_free_chunk_p->u.free.next_p = next_free_chunk_p; } else { prev_free_chunk_p = free_chunk_iter_p; } } mem_free_chunk_p = tmp_header.u.free.next_p; } { /* * Now, flip the traversal check flag in free pool-first chunks */ for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p; first_chunks_iter_p != NULL; first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.next_first_cp)) { JERRY_ASSERT (!first_chunks_iter_p->u.pool_gc.traversal_check_flag); first_chunks_iter_p->u.pool_gc.traversal_check_flag = true; } } { for (mem_pool_chunk_t *non_first_chunks_iter_p = non_first_chunks_list_p, *next_p; non_first_chunks_iter_p != NULL; non_first_chunks_iter_p = next_p) { next_p = non_first_chunks_iter_p->u.free.next_p; mem_pool_chunk_t *pool_start_p; pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (non_first_chunks_iter_p); uint16_t magic_num_field; bool traversal_check_flag; mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag); JERRY_ASSERT (magic_num_field == hint_magic_num_value); #ifndef JERRY_DISABLE_HEAVY_DEBUG bool is_occured = false; for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p; first_chunks_iter_p != NULL; first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.next_first_cp)) { if (pool_start_p == first_chunks_iter_p) { is_occured = true; break; } } JERRY_ASSERT (is_occured == traversal_check_flag); #endif /* !JERRY_DISABLE_HEAVY_DEBUG */ /* * During this traversal the flag in the free header chunks is in set state * * If the flag is set, it is guaranteed that the pool-first chunk, * from the same pool, as the current non-pool-first chunk, is free * and is placed in the corresponding list of free pool-first chunks. */ if (traversal_check_flag) { pool_start_p->u.pool_gc.free_chunks_num++; non_first_chunks_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_start_p->u.pool_gc.free_list_cp); MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, non_first_chunks_iter_p); } else { non_first_chunks_iter_p->u.free.next_p = mem_free_chunk_p; mem_free_chunk_p = non_first_chunks_iter_p; } } } non_first_chunks_list_p = NULL; } /* * At third stage we check each free pool-first chunk in collection-time list for counted * number of free chunks in the pool, containing the chunk. * * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed, * otherwise - free chunks of the pool are returned to the common list of free chunks. */ for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p, *next_p; first_chunks_iter_p != NULL; first_chunks_iter_p = next_p) { next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.next_first_cp); JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.hint_magic_num == hint_magic_num_value); JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.traversal_check_flag); JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.free_chunks_num <= MEM_POOL_CHUNKS_NUMBER); if (first_chunks_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER) { #ifndef JERRY_NDEBUG mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER; #endif /* !JERRY_NDEBUG */ MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST (); mem_heap_free_block (first_chunks_iter_p); MEM_POOLS_STAT_FREE_POOL (); } else { mem_pool_chunk_t *first_chunk_p = first_chunks_iter_p; /* * Convert layout of first chunk from collection-time pool-first chunk's layout to the common free chunk layout */ first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.free_list_cp); /* * Link local pool's list of free chunks into the common list of free chunks */ for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p; ; pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p) { JERRY_ASSERT (pool_chunks_iter_p != NULL); if (pool_chunks_iter_p->u.free.next_p == NULL) { pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p; break; } } mem_free_chunk_p = first_chunk_p; } } #ifdef JERRY_VALGRIND /* * Valgrind-mode specific pass that marks all free chunks inaccessible */ for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { next_free_chunk_p = free_chunk_iter_p->u.free.next_p; VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); } #endif /* JERRY_VALGRIND */ } /* mem_pools_collect_empty */
/** * Free the memory block. */ void __attr_hot___ jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the block */ const size_t size) /**< size of allocated region */ { VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST; /* checking that ptr points to the heap */ JERRY_ASSERT (jmem_is_heap_pointer (ptr)); JERRY_ASSERT (size > 0); JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size)); VALGRIND_FREYA_FREELIKE_SPACE (ptr); VALGRIND_NOACCESS_SPACE (ptr, size); JMEM_HEAP_STAT_FREE_ITER (); jmem_heap_free_t *block_p = (jmem_heap_free_t *) ptr; jmem_heap_free_t *prev_p; jmem_heap_free_t *next_p; VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); if (block_p > JERRY_CONTEXT (jmem_heap_list_skip_p)) { prev_p = JERRY_CONTEXT (jmem_heap_list_skip_p); JMEM_HEAP_STAT_SKIP (); } else { prev_p = &JERRY_HEAP_CONTEXT (first); JMEM_HEAP_STAT_NONSKIP (); } JERRY_ASSERT (jmem_is_heap_pointer (block_p)); const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p); VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t)); // Find position of region in the list while (prev_p->next_offset < block_offset) { jmem_heap_free_t *const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset); JERRY_ASSERT (jmem_is_heap_pointer (next_p)); VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t)); VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t)); prev_p = next_p; JMEM_HEAP_STAT_FREE_ITER (); } next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset); VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t)); /* Realign size */ const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT; VALGRIND_DEFINED_SPACE (block_p, sizeof (jmem_heap_free_t)); VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t)); // Update prev if (jmem_heap_get_region_end (prev_p) == block_p) { // Can be merged prev_p->size += (uint32_t) aligned_size; VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t)); block_p = prev_p; } else { block_p->size = (uint32_t) aligned_size; prev_p->next_offset = block_offset; } VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t)); // Update next if (jmem_heap_get_region_end (block_p) == next_p) { if (unlikely (next_p == JERRY_CONTEXT (jmem_heap_list_skip_p))) { JERRY_CONTEXT (jmem_heap_list_skip_p) = block_p; } // Can be merged block_p->size += next_p->size; block_p->next_offset = next_p->next_offset; } else { block_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p); } JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p; VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t)); VALGRIND_NOACCESS_SPACE (block_p, size); VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t)); JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) > 0); JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_size; while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_MEM_HEAP_DESIRED_LIMIT <= JERRY_CONTEXT (jmem_heap_limit)) { JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_MEM_HEAP_DESIRED_LIMIT; } VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size)); JMEM_HEAP_STAT_FREE (size); } /* jmem_heap_free_block */
/** * Allocation of memory region. * * See also: * jmem_heap_alloc_block * * @return pointer to allocated memory block - if allocation is successful, * NULL - if there is not enough memory. */ static __attr_hot___ void *jmem_heap_alloc_block_internal (const size_t size) { // Align size const size_t required_size = ((size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT) * JMEM_ALIGNMENT; jmem_heap_free_t *data_space_p = NULL; VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); // Fast path for 8 byte chunks, first region is guaranteed to be sufficient if (required_size == JMEM_ALIGNMENT && likely (JERRY_HEAP_CONTEXT (first).next_offset != JMEM_HEAP_END_OF_LIST)) { data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset); JERRY_ASSERT (jmem_is_heap_pointer (data_space_p)); VALGRIND_DEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t)); JERRY_CONTEXT (jmem_heap_allocated_size) += JMEM_ALIGNMENT; JMEM_HEAP_STAT_ALLOC_ITER (); if (data_space_p->size == JMEM_ALIGNMENT) { JERRY_HEAP_CONTEXT (first).next_offset = data_space_p->next_offset; } else { JERRY_ASSERT (data_space_p->size > JMEM_ALIGNMENT); jmem_heap_free_t *remaining_p; remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset) + 1; VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t)); remaining_p->size = data_space_p->size - JMEM_ALIGNMENT; remaining_p->next_offset = data_space_p->next_offset; VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t)); JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p); } VALGRIND_UNDEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t)); if (unlikely (data_space_p == JERRY_CONTEXT (jmem_heap_list_skip_p))) { JERRY_CONTEXT (jmem_heap_list_skip_p) = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset); } } // Slow path for larger regions else { uint32_t current_offset = JERRY_HEAP_CONTEXT (first).next_offset; jmem_heap_free_t *prev_p = &JERRY_HEAP_CONTEXT (first); while (current_offset != JMEM_HEAP_END_OF_LIST) { jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (current_offset); JERRY_ASSERT (jmem_is_heap_pointer (current_p)); VALGRIND_DEFINED_SPACE (current_p, sizeof (jmem_heap_free_t)); JMEM_HEAP_STAT_ALLOC_ITER (); const uint32_t next_offset = current_p->next_offset; JERRY_ASSERT (next_offset == JMEM_HEAP_END_OF_LIST || jmem_is_heap_pointer (JMEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset))); if (current_p->size >= required_size) { // Region is sufficiently big, store address data_space_p = current_p; JERRY_CONTEXT (jmem_heap_allocated_size) += required_size; // Region was larger than necessary if (current_p->size > required_size) { // Get address of remaining space jmem_heap_free_t *const remaining_p = (jmem_heap_free_t *) ((uint8_t *) current_p + required_size); // Update metadata VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t)); remaining_p->size = current_p->size - (uint32_t) required_size; remaining_p->next_offset = next_offset; VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t)); // Update list VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t)); prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p); VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t)); } // Block is an exact fit else { // Remove the region from the list VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t)); prev_p->next_offset = next_offset; VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t)); } JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p; // Found enough space break; } VALGRIND_NOACCESS_SPACE (current_p, sizeof (jmem_heap_free_t)); // Next in list prev_p = current_p; current_offset = next_offset; } } while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit)) { JERRY_CONTEXT (jmem_heap_limit) += CONFIG_MEM_HEAP_DESIRED_LIMIT; } VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t)); if (unlikely (!data_space_p)) { return NULL; } JERRY_ASSERT ((uintptr_t) data_space_p % JMEM_ALIGNMENT == 0); VALGRIND_UNDEFINED_SPACE (data_space_p, size); JMEM_HEAP_STAT_ALLOC (size); return (void *) data_space_p; } /* jmem_heap_finalize */
void mem_pools_collect_empty (void) { /* * Hint magic number in header of pools with free first chunks */ const uint16_t hint_magic_num_value = 0x7e89; /* * At first pass collect pointers to those of free chunks that are first at their pools * to separate lists (collection-time pool lists) and change them to headers of corresponding pools */ /* * Number of collection-time pool lists */ constexpr uint32_t pool_lists_number = 8; /* * Collection-time pool lists */ mem_pool_chunk_t *pool_lists_p[pool_lists_number]; for (uint32_t i = 0; i < pool_lists_number; i++) { pool_lists_p[i] = NULL; } /* * Number of the pools, included into the lists */ uint32_t pools_in_lists_number = 0; for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; if (pool_start_p == free_chunk_iter_p) { /* * The chunk is first at its pool * * Remove the chunk from common list of free chunks */ if (prev_free_chunk_p == NULL) { JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p); mem_free_chunk_p = next_free_chunk_p; } else { prev_free_chunk_p->u.free.next_p = next_free_chunk_p; } pools_in_lists_number++; uint8_t list_id = pools_in_lists_number % pool_lists_number; /* * Initialize pool header and insert the pool into one of lists */ free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL; free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */ free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value; free_chunk_iter_p->u.pool_gc.list_id = list_id; MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, pool_lists_p[list_id]); pool_lists_p[list_id] = free_chunk_iter_p; } else { prev_free_chunk_p = free_chunk_iter_p; } } if (pools_in_lists_number == 0) { /* there are no empty pools */ return; } /* * At second pass we check for all rest free chunks whether they are in pools that were included into * collection-time pool lists. * * For each of the chunk, try to find the corresponding pool through iterating the list. * * If pool is found in a list (so, first chunk of the pool is free) for a chunk, increment counter * of free chunks in the pools, and move the chunk from global free chunks list to collection-time * local list of corresponding pool's free chunks. */ for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; bool is_chunk_moved_to_local_list = false; #ifdef JERRY_VALGRIND /* * If the chunk is not free, there may be undefined bytes at hint_magic_num and list_id fields. * * Although, it is correct for the routine, valgrind issues warning about using uninitialized data * in conditional expression. To suppress the false-positive warning, the chunk is temporarily marked * as defined, and after reading hint magic number and list identifier, valgrind state of the chunk is restored. */ uint8_t vbits[MEM_POOL_CHUNK_SIZE]; unsigned status; status = VALGRIND_GET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE); JERRY_ASSERT (status == 0 || status == 1); VALGRIND_DEFINED_SPACE (pool_start_p, MEM_POOL_CHUNK_SIZE); #endif /* JERRY_VALGRIND */ /* * The magic number doesn't guarantee that the chunk is actually a pool header, * so it is only optimization to reduce number of unnecessary iterations over * pool lists. */ uint16_t magic_num_field = pool_start_p->u.pool_gc.hint_magic_num; uint8_t id_to_search_in = pool_start_p->u.pool_gc.list_id; #ifdef JERRY_VALGRIND status = VALGRIND_SET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE); JERRY_ASSERT (status == 0 || status == 1); #endif /* JERRY_VALGRIND */ if (magic_num_field == hint_magic_num_value) { /* * Maybe, the first chunk is free. * * If it is so, it is included in the list of pool's first free chunks. */ if (id_to_search_in < pool_lists_number) { for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[id_to_search_in]; pool_list_iter_p != NULL; pool_list_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_list_iter_p->u.pool_gc.next_first_cp)) { if (pool_list_iter_p == pool_start_p) { /* * The first chunk is actually free. * * So, incrementing free chunks counter in it. */ pool_start_p->u.pool_gc.free_chunks_num++; /* * It is possible that the corresponding pool is empty * * Moving current chunk from common list of free chunks to temporary list, local to the pool */ if (prev_free_chunk_p == NULL) { JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p); mem_free_chunk_p = next_free_chunk_p; } else { prev_free_chunk_p->u.free.next_p = next_free_chunk_p; } free_chunk_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_start_p->u.pool_gc.free_list_cp); MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, free_chunk_iter_p); is_chunk_moved_to_local_list = true; break; } } } } if (!is_chunk_moved_to_local_list) { prev_free_chunk_p = free_chunk_iter_p; } } /* * At third pass we check each pool in collection-time pool lists free for counted * number of free chunks in the pool. * * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed, * otherwise - free chunks of the pool are returned to common list of free chunks. */ for (uint8_t list_id = 0; list_id < pool_lists_number; list_id++) { for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[list_id], *next_p; pool_list_iter_p != NULL; pool_list_iter_p = next_p) { next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_list_iter_p->u.pool_gc.next_first_cp); if (pool_list_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER) { #ifndef JERRY_NDEBUG mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER; #endif /* !JERRY_NDEBUG */ MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST (); mem_heap_free_block (pool_list_iter_p); MEM_POOLS_STAT_FREE_POOL (); } else { mem_pool_chunk_t *first_chunk_p = pool_list_iter_p; /* * Convert layout of first chunk from collection-time pool header to common free chunk */ first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_list_iter_p->u.pool_gc.free_list_cp); /* * Link local pool's list of free chunks into global list of free chunks */ for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p; ; pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p) { JERRY_ASSERT (pool_chunks_iter_p != NULL); if (pool_chunks_iter_p->u.free.next_p == NULL) { pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p; break; } } mem_free_chunk_p = first_chunk_p; } } } #ifdef JERRY_VALGRIND /* * Valgrind-mode specific pass that marks all free chunks inaccessible */ for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { next_free_chunk_p = free_chunk_iter_p->u.free.next_p; VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); } #endif /* JERRY_VALGRIND */ } /* mem_pools_collect_empty */