/** * Free the chunk */ void mem_pools_free (uint8_t *chunk_p) /**< pointer to the chunk */ { mem_pool_state_t *pool_state = mem_pools, *prev_pool_state_p = NULL; /** * Search for the pool containing specified chunk. */ while (!mem_pool_is_chunk_inside (pool_state, chunk_p)) { prev_pool_state_p = pool_state; pool_state = MEM_CP_GET_NON_NULL_POINTER (mem_pool_state_t, pool_state->next_pool_cp); } /** * Free the chunk */ mem_pool_free_chunk (pool_state, chunk_p); mem_free_chunks_number++; MEM_POOLS_STAT_FREE_CHUNK (); /** * If all chunks of the pool are free, free the pool itself. */ if (pool_state->free_chunks_number == MEM_POOL_CHUNKS_NUMBER) { if (prev_pool_state_p != NULL) { prev_pool_state_p->next_pool_cp = pool_state->next_pool_cp; } else { mem_pools = MEM_CP_GET_POINTER (mem_pool_state_t, pool_state->next_pool_cp); } mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER; mem_heap_free_block ((uint8_t*) pool_state); MEM_POOLS_STAT_FREE_POOL (); } else if (mem_pools != pool_state) { JERRY_ASSERT (prev_pool_state_p != NULL); prev_pool_state_p->next_pool_cp = pool_state->next_pool_cp; MEM_CP_SET_NON_NULL_POINTER (pool_state->next_pool_cp, mem_pools); mem_pools = pool_state; } } /* mem_pools_free */
/** * Long path for mem_pools_alloc * * @return true - if there is a free chunk in mem_pools, * false - otherwise (not enough memory). */ static bool __attr_noinline___ mem_pools_alloc_longpath (void) { /** * If there are no free chunks, allocate new pool. */ if (mem_free_chunks_number == 0) { mem_pool_state_t *pool_state = (mem_pool_state_t*) mem_heap_alloc_block (MEM_POOL_SIZE, MEM_HEAP_ALLOC_LONG_TERM); JERRY_ASSERT (pool_state != NULL); mem_pool_init (pool_state, MEM_POOL_SIZE); MEM_CP_SET_POINTER (pool_state->next_pool_cp, mem_pools); mem_pools = pool_state; mem_free_chunks_number += MEM_POOL_CHUNKS_NUMBER; MEM_POOLS_STAT_ALLOC_POOL (); } else { /** * There is definitely at least one pool of specified type with at least one free chunk. * * Search for the pool. */ mem_pool_state_t *pool_state = mem_pools, *prev_pool_state_p = NULL; while (pool_state->first_free_chunk == MEM_POOL_CHUNKS_NUMBER) { prev_pool_state_p = pool_state; pool_state = MEM_CP_GET_NON_NULL_POINTER (mem_pool_state_t, pool_state->next_pool_cp); } JERRY_ASSERT (prev_pool_state_p != NULL && pool_state != mem_pools); prev_pool_state_p->next_pool_cp = pool_state->next_pool_cp; MEM_CP_SET_NON_NULL_POINTER (pool_state->next_pool_cp, mem_pools); mem_pools = pool_state; } return true; } /* mem_pools_alloc_longpath */
/** * Collect chunks from empty pools and free the pools */ void mem_pools_collect_empty (void) { /* * Hint magic number in header of pools with free pool-first chunks */ const uint16_t hint_magic_num_value = 0x7e89; /* * Collection-time chunk lists */ mem_pool_chunk_t *first_chunks_list_p = NULL; mem_pool_chunk_t *non_first_chunks_list_p = NULL; /* * At first stage collect free pool-first chunks to separate collection-time lists * and change their layout from mem_pool_chunk_t::u::free to mem_pool_chunk_t::u::pool_gc */ { mem_pool_chunk_t tmp_header; tmp_header.u.free.next_p = mem_free_chunk_p; for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p, *prev_free_chunk_p = &tmp_header, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; if (pool_start_p == free_chunk_iter_p) { /* * The chunk is first at its pool * * Remove the chunk from common list of free chunks */ prev_free_chunk_p->u.free.next_p = next_free_chunk_p; /* * Initialize pool-first chunk as pool header and it insert into list of free pool-first chunks */ free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL; free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */ free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value; free_chunk_iter_p->u.pool_gc.traversal_check_flag = false; MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, first_chunks_list_p); first_chunks_list_p = free_chunk_iter_p; } else { prev_free_chunk_p = free_chunk_iter_p; } } mem_free_chunk_p = tmp_header.u.free.next_p; } if (first_chunks_list_p == NULL) { /* there are no empty pools */ return; } /* * At second stage we collect all free non-pool-first chunks, for which corresponding pool-first chunks are free, * and link them into the corresponding mem_pool_chunk_t::u::pool_gc::free_list_cp list, while also maintaining * the corresponding mem_pool_chunk_t::u::pool_gc::free_chunks_num: * - at first, for each non-pool-first free chunk we check whether traversal check flag is cleared in corresponding * first chunk in the same pool, and move those chunks, for which the condition is true, * to separate temporary list. * * - then, we flip the traversal check flags for each of free pool-first chunks. * * - at last, we perform almost the same as at first step, but check only non-pool-first chunks from the temporary * list, and send the chunks, for which the corresponding traversal check flag is cleared, back to the common list * of free chunks, and the rest chunks from the temporary list are linked to corresponding pool-first chunks. * Also, counter of the linked free chunks is maintained in every free pool-first chunk. */ { { mem_pool_chunk_t tmp_header; tmp_header.u.free.next_p = mem_free_chunk_p; for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p, *prev_free_chunk_p = &tmp_header, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; /* * The magic number doesn't guarantee that the chunk is actually a free pool-first chunk, * so we test the traversal check flag after flipping values of the flags in every * free pool-first chunk. */ uint16_t magic_num_field; bool traversal_check_flag; mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag); /* * During this traversal the flag in the free header chunks is in cleared state */ if (!traversal_check_flag && magic_num_field == hint_magic_num_value) { free_chunk_iter_p->u.free.next_p = non_first_chunks_list_p; non_first_chunks_list_p = free_chunk_iter_p; prev_free_chunk_p->u.free.next_p = next_free_chunk_p; } else { prev_free_chunk_p = free_chunk_iter_p; } } mem_free_chunk_p = tmp_header.u.free.next_p; } { /* * Now, flip the traversal check flag in free pool-first chunks */ for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p; first_chunks_iter_p != NULL; first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.next_first_cp)) { JERRY_ASSERT (!first_chunks_iter_p->u.pool_gc.traversal_check_flag); first_chunks_iter_p->u.pool_gc.traversal_check_flag = true; } } { for (mem_pool_chunk_t *non_first_chunks_iter_p = non_first_chunks_list_p, *next_p; non_first_chunks_iter_p != NULL; non_first_chunks_iter_p = next_p) { next_p = non_first_chunks_iter_p->u.free.next_p; mem_pool_chunk_t *pool_start_p; pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (non_first_chunks_iter_p); uint16_t magic_num_field; bool traversal_check_flag; mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag); JERRY_ASSERT (magic_num_field == hint_magic_num_value); #ifndef JERRY_DISABLE_HEAVY_DEBUG bool is_occured = false; for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p; first_chunks_iter_p != NULL; first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.next_first_cp)) { if (pool_start_p == first_chunks_iter_p) { is_occured = true; break; } } JERRY_ASSERT (is_occured == traversal_check_flag); #endif /* !JERRY_DISABLE_HEAVY_DEBUG */ /* * During this traversal the flag in the free header chunks is in set state * * If the flag is set, it is guaranteed that the pool-first chunk, * from the same pool, as the current non-pool-first chunk, is free * and is placed in the corresponding list of free pool-first chunks. */ if (traversal_check_flag) { pool_start_p->u.pool_gc.free_chunks_num++; non_first_chunks_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_start_p->u.pool_gc.free_list_cp); MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, non_first_chunks_iter_p); } else { non_first_chunks_iter_p->u.free.next_p = mem_free_chunk_p; mem_free_chunk_p = non_first_chunks_iter_p; } } } non_first_chunks_list_p = NULL; } /* * At third stage we check each free pool-first chunk in collection-time list for counted * number of free chunks in the pool, containing the chunk. * * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed, * otherwise - free chunks of the pool are returned to the common list of free chunks. */ for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p, *next_p; first_chunks_iter_p != NULL; first_chunks_iter_p = next_p) { next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.next_first_cp); JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.hint_magic_num == hint_magic_num_value); JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.traversal_check_flag); JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.free_chunks_num <= MEM_POOL_CHUNKS_NUMBER); if (first_chunks_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER) { #ifndef JERRY_NDEBUG mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER; #endif /* !JERRY_NDEBUG */ MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST (); mem_heap_free_block (first_chunks_iter_p); MEM_POOLS_STAT_FREE_POOL (); } else { mem_pool_chunk_t *first_chunk_p = first_chunks_iter_p; /* * Convert layout of first chunk from collection-time pool-first chunk's layout to the common free chunk layout */ first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.free_list_cp); /* * Link local pool's list of free chunks into the common list of free chunks */ for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p; ; pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p) { JERRY_ASSERT (pool_chunks_iter_p != NULL); if (pool_chunks_iter_p->u.free.next_p == NULL) { pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p; break; } } mem_free_chunk_p = first_chunk_p; } } #ifdef JERRY_VALGRIND /* * Valgrind-mode specific pass that marks all free chunks inaccessible */ for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { next_free_chunk_p = free_chunk_iter_p->u.free.next_p; VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); } #endif /* JERRY_VALGRIND */ } /* mem_pools_collect_empty */
/** * Register bytecode and idx map from snapshot * * NOTE: * If is_copy flag is set, bytecode is copied from snapshot, else bytecode is referenced directly * from snapshot * * @return pointer to byte-code header, upon success, * NULL - upon failure (i.e., in case snapshot format is not valid) */ const bytecode_data_header_t * serializer_load_bytecode_with_idx_map (const uint8_t *bytecode_and_idx_map_p, /**< buffer with instructions array * and idx to literals map from * snapshot */ uint32_t bytecode_size, /**< size of instructions array */ uint32_t idx_to_lit_map_size, /**< size of the idx to literals map */ const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map of in-snapshot * literal offsets * to literal identifiers, * created in literal * storage */ uint32_t literals_num, /**< number of literals */ bool is_copy) /** flag, indicating whether the passed in-snapshot data * should be copied to engine's memory (true), * or it can be referenced until engine is stopped * (i.e. until call to jerry_cleanup) */ { const uint8_t *idx_to_lit_map_p = bytecode_and_idx_map_p + bytecode_size; size_t instructions_number = bytecode_size / sizeof (vm_instr_t); size_t blocks_count = JERRY_ALIGNUP (instructions_number, BLOCK_SIZE) / BLOCK_SIZE; uint32_t idx_num_total; size_t idx_to_lit_map_offset = 0; if (!jrt_read_from_buffer_by_offset (idx_to_lit_map_p, idx_to_lit_map_size, &idx_to_lit_map_offset, &idx_num_total)) { return NULL; } const size_t bytecode_alloc_size = JERRY_ALIGNUP (bytecode_size, MEM_ALIGNMENT); const size_t hash_table_size = lit_id_hash_table_get_size_for_table (idx_num_total, blocks_count); const size_t header_and_hash_table_size = JERRY_ALIGNUP (sizeof (bytecode_data_header_t) + hash_table_size, MEM_ALIGNMENT); const size_t alloc_size = header_and_hash_table_size + (is_copy ? bytecode_alloc_size : 0); uint8_t *buffer_p = (uint8_t*) mem_heap_alloc_block (alloc_size, MEM_HEAP_ALLOC_LONG_TERM); bytecode_data_header_t *header_p = (bytecode_data_header_t *) buffer_p; vm_instr_t *instrs_p; vm_instr_t *snapshot_instrs_p = (vm_instr_t *) bytecode_and_idx_map_p; if (is_copy) { instrs_p = (vm_instr_t *) (buffer_p + header_and_hash_table_size); memcpy (instrs_p, snapshot_instrs_p, bytecode_size); } else { instrs_p = snapshot_instrs_p; } uint8_t *lit_id_hash_table_buffer_p = buffer_p + sizeof (bytecode_data_header_t); if (lit_id_hash_table_load_from_snapshot (blocks_count, idx_num_total, idx_to_lit_map_p + idx_to_lit_map_offset, idx_to_lit_map_size - idx_to_lit_map_offset, lit_map_p, literals_num, lit_id_hash_table_buffer_p, hash_table_size) && (vm_instr_counter_t) instructions_number == instructions_number) { MEM_CP_SET_NON_NULL_POINTER (header_p->lit_id_hash_cp, lit_id_hash_table_buffer_p); header_p->instrs_p = instrs_p; header_p->instrs_count = (vm_instr_counter_t) instructions_number; MEM_CP_SET_POINTER (header_p->next_header_cp, first_bytecode_header_p); first_bytecode_header_p = header_p; return header_p; } else { mem_heap_free_block (buffer_p); return NULL; } } /* serializer_load_bytecode_with_idx_map */
/** * Register bytecode and supplementary data of all scopes from snapshot * * NOTE: * If is_copy flag is set, bytecode is copied from snapshot, else bytecode is referenced directly * from snapshot * * @return pointer to byte-code header, upon success, * NULL - upon failure (i.e., in case snapshot format is not valid) */ const bytecode_data_header_t * bc_load_bytecode_data (const uint8_t *snapshot_data_p, /**< buffer with instructions array * and idx to literals map from * snapshot */ size_t snapshot_size, /**< remaining size of snapshot */ const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map of in-snapshot * literal offsets * to literal identifiers, * created in literal * storage */ uint32_t literals_num, /**< number of literals */ bool is_copy, /** flag, indicating whether the passed in-snapshot data * should be copied to engine's memory (true), * or it can be referenced until engine is stopped * (i.e. until call to jerry_cleanup) */ uint32_t expected_scopes_num) /**< scopes number read from snapshot header */ { uint32_t snapshot_offset = 0; uint32_t out_bytecode_data_size = 0; uint32_t scopes_num = 0; bytecode_data_header_t *bc_header_p = bc_load_bytecode_with_idx_map (snapshot_data_p, snapshot_size, lit_map_p, literals_num, is_copy, &out_bytecode_data_size); scopes_num++; snapshot_offset += out_bytecode_data_size; JERRY_ASSERT (snapshot_offset <= snapshot_size); bytecode_data_header_t* next_to_handle_list_p = bc_header_p; while (next_to_handle_list_p != NULL) { mem_cpointer_t *declarations_p = MEM_CP_GET_POINTER (mem_cpointer_t, next_to_handle_list_p->declarations_cp); uint32_t child_scope_index = 0; while (child_scope_index < next_to_handle_list_p->func_scopes_count && declarations_p[child_scope_index] != MEM_CP_NULL) { child_scope_index++; } if (child_scope_index == next_to_handle_list_p->func_scopes_count) { bytecode_data_header_t *bc_header_list_iter_p = MEM_CP_GET_POINTER (bytecode_data_header_t, next_to_handle_list_p->next_header_cp); next_to_handle_list_p->next_header_cp = MEM_CP_NULL; next_to_handle_list_p = bc_header_list_iter_p; if (next_to_handle_list_p == NULL) { break; } else { continue; } } JERRY_ASSERT (snapshot_offset < snapshot_size); bytecode_data_header_t *next_header_p = bc_load_bytecode_with_idx_map (snapshot_data_p + snapshot_offset, snapshot_size - snapshot_offset, lit_map_p, literals_num, is_copy, &out_bytecode_data_size); scopes_num++; snapshot_offset += out_bytecode_data_size; JERRY_ASSERT (snapshot_offset <= snapshot_size); MEM_CP_SET_NON_NULL_POINTER (declarations_p[child_scope_index], next_header_p); if (next_header_p->func_scopes_count > 0) { JERRY_ASSERT (next_header_p->next_header_cp == MEM_CP_NULL); MEM_CP_SET_POINTER (next_header_p->next_header_cp, next_to_handle_list_p); next_to_handle_list_p = next_header_p; } } if (expected_scopes_num != scopes_num) { return NULL; } MEM_CP_SET_POINTER (bc_header_p->next_header_cp, first_bytecode_header_p); first_bytecode_header_p = bc_header_p; return bc_header_p; } /* bc_load_bytecode_data */
/** * Dump single scopes tree into bytecode * * @return pointer to bytecode header of the outer most scope */ bytecode_data_header_t * bc_dump_single_scope (scopes_tree scope_p) /**< a node of scopes tree */ { const size_t entries_count = scope_p->max_uniq_literals_num; const vm_instr_counter_t instrs_count = scopes_tree_instrs_num (scope_p); const size_t blocks_count = JERRY_ALIGNUP (instrs_count, BLOCK_SIZE) / BLOCK_SIZE; const size_t func_scopes_count = scopes_tree_child_scopes_num (scope_p); const uint16_t var_decls_count = linked_list_get_length (scope_p->var_decls); const size_t bytecode_size = JERRY_ALIGNUP (instrs_count * sizeof (vm_instr_t), MEM_ALIGNMENT); const size_t hash_table_size = lit_id_hash_table_get_size_for_table (entries_count, blocks_count); const size_t declarations_area_size = JERRY_ALIGNUP (func_scopes_count * sizeof (mem_cpointer_t) + var_decls_count * sizeof (lit_cpointer_t), MEM_ALIGNMENT); const size_t header_and_tables_size = JERRY_ALIGNUP ((sizeof (bytecode_data_header_t) + hash_table_size + declarations_area_size), MEM_ALIGNMENT); uint8_t *buffer_p = (uint8_t *) mem_heap_alloc_block (bytecode_size + header_and_tables_size, MEM_HEAP_ALLOC_LONG_TERM); lit_id_hash_table *lit_id_hash_p = lit_id_hash_table_init (buffer_p + sizeof (bytecode_data_header_t), hash_table_size, entries_count, blocks_count); mem_cpointer_t *declarations_p = (mem_cpointer_t *) (buffer_p + sizeof (bytecode_data_header_t) + hash_table_size); for (size_t i = 0; i < func_scopes_count; i++) { declarations_p[i] = MEM_CP_NULL; } scopes_tree_dump_var_decls (scope_p, (lit_cpointer_t *) (declarations_p + func_scopes_count)); vm_instr_t *bytecode_p = (vm_instr_t *) (buffer_p + header_and_tables_size); JERRY_ASSERT (scope_p->max_uniq_literals_num >= lit_id_hash_p->current_bucket_pos); bytecode_data_header_t *header_p = (bytecode_data_header_t *) buffer_p; if ((uint16_t) func_scopes_count != func_scopes_count) { jerry_fatal (ERR_OUT_OF_MEMORY); } bc_fill_bytecode_data_header (header_p, lit_id_hash_p, bytecode_p, declarations_p, (uint16_t) func_scopes_count, var_decls_count, scope_p->strict_mode, scope_p->ref_arguments, scope_p->ref_eval, scope_p->is_vars_and_args_to_regs_possible, false, false); JERRY_ASSERT (scope_p->bc_header_cp == MEM_CP_NULL); MEM_CP_SET_NON_NULL_POINTER (scope_p->bc_header_cp, header_p); return header_p; } /* bc_dump_single_scope */
void mem_pools_collect_empty (void) { /* * Hint magic number in header of pools with free first chunks */ const uint16_t hint_magic_num_value = 0x7e89; /* * At first pass collect pointers to those of free chunks that are first at their pools * to separate lists (collection-time pool lists) and change them to headers of corresponding pools */ /* * Number of collection-time pool lists */ constexpr uint32_t pool_lists_number = 8; /* * Collection-time pool lists */ mem_pool_chunk_t *pool_lists_p[pool_lists_number]; for (uint32_t i = 0; i < pool_lists_number; i++) { pool_lists_p[i] = NULL; } /* * Number of the pools, included into the lists */ uint32_t pools_in_lists_number = 0; for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; if (pool_start_p == free_chunk_iter_p) { /* * The chunk is first at its pool * * Remove the chunk from common list of free chunks */ if (prev_free_chunk_p == NULL) { JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p); mem_free_chunk_p = next_free_chunk_p; } else { prev_free_chunk_p->u.free.next_p = next_free_chunk_p; } pools_in_lists_number++; uint8_t list_id = pools_in_lists_number % pool_lists_number; /* * Initialize pool header and insert the pool into one of lists */ free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL; free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */ free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value; free_chunk_iter_p->u.pool_gc.list_id = list_id; MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, pool_lists_p[list_id]); pool_lists_p[list_id] = free_chunk_iter_p; } else { prev_free_chunk_p = free_chunk_iter_p; } } if (pools_in_lists_number == 0) { /* there are no empty pools */ return; } /* * At second pass we check for all rest free chunks whether they are in pools that were included into * collection-time pool lists. * * For each of the chunk, try to find the corresponding pool through iterating the list. * * If pool is found in a list (so, first chunk of the pool is free) for a chunk, increment counter * of free chunks in the pools, and move the chunk from global free chunks list to collection-time * local list of corresponding pool's free chunks. */ for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; bool is_chunk_moved_to_local_list = false; #ifdef JERRY_VALGRIND /* * If the chunk is not free, there may be undefined bytes at hint_magic_num and list_id fields. * * Although, it is correct for the routine, valgrind issues warning about using uninitialized data * in conditional expression. To suppress the false-positive warning, the chunk is temporarily marked * as defined, and after reading hint magic number and list identifier, valgrind state of the chunk is restored. */ uint8_t vbits[MEM_POOL_CHUNK_SIZE]; unsigned status; status = VALGRIND_GET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE); JERRY_ASSERT (status == 0 || status == 1); VALGRIND_DEFINED_SPACE (pool_start_p, MEM_POOL_CHUNK_SIZE); #endif /* JERRY_VALGRIND */ /* * The magic number doesn't guarantee that the chunk is actually a pool header, * so it is only optimization to reduce number of unnecessary iterations over * pool lists. */ uint16_t magic_num_field = pool_start_p->u.pool_gc.hint_magic_num; uint8_t id_to_search_in = pool_start_p->u.pool_gc.list_id; #ifdef JERRY_VALGRIND status = VALGRIND_SET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE); JERRY_ASSERT (status == 0 || status == 1); #endif /* JERRY_VALGRIND */ if (magic_num_field == hint_magic_num_value) { /* * Maybe, the first chunk is free. * * If it is so, it is included in the list of pool's first free chunks. */ if (id_to_search_in < pool_lists_number) { for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[id_to_search_in]; pool_list_iter_p != NULL; pool_list_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_list_iter_p->u.pool_gc.next_first_cp)) { if (pool_list_iter_p == pool_start_p) { /* * The first chunk is actually free. * * So, incrementing free chunks counter in it. */ pool_start_p->u.pool_gc.free_chunks_num++; /* * It is possible that the corresponding pool is empty * * Moving current chunk from common list of free chunks to temporary list, local to the pool */ if (prev_free_chunk_p == NULL) { JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p); mem_free_chunk_p = next_free_chunk_p; } else { prev_free_chunk_p->u.free.next_p = next_free_chunk_p; } free_chunk_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_start_p->u.pool_gc.free_list_cp); MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, free_chunk_iter_p); is_chunk_moved_to_local_list = true; break; } } } } if (!is_chunk_moved_to_local_list) { prev_free_chunk_p = free_chunk_iter_p; } } /* * At third pass we check each pool in collection-time pool lists free for counted * number of free chunks in the pool. * * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed, * otherwise - free chunks of the pool are returned to common list of free chunks. */ for (uint8_t list_id = 0; list_id < pool_lists_number; list_id++) { for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[list_id], *next_p; pool_list_iter_p != NULL; pool_list_iter_p = next_p) { next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_list_iter_p->u.pool_gc.next_first_cp); if (pool_list_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER) { #ifndef JERRY_NDEBUG mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER; #endif /* !JERRY_NDEBUG */ MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST (); mem_heap_free_block (pool_list_iter_p); MEM_POOLS_STAT_FREE_POOL (); } else { mem_pool_chunk_t *first_chunk_p = pool_list_iter_p; /* * Convert layout of first chunk from collection-time pool header to common free chunk */ first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_list_iter_p->u.pool_gc.free_list_cp); /* * Link local pool's list of free chunks into global list of free chunks */ for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p; ; pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p) { JERRY_ASSERT (pool_chunks_iter_p != NULL); if (pool_chunks_iter_p->u.free.next_p == NULL) { pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p; break; } } mem_free_chunk_p = first_chunk_p; } } } #ifdef JERRY_VALGRIND /* * Valgrind-mode specific pass that marks all free chunks inaccessible */ for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { next_free_chunk_p = free_chunk_iter_p->u.free.next_p; VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); } #endif /* JERRY_VALGRIND */ } /* mem_pools_collect_empty */
/** * Function object creation operation. * * See also: ECMA-262 v5, 13.2 * * @return pointer to newly created Function object */ ecma_object_t* ecma_op_create_function_object (ecma_string_t* formal_parameter_list_p[], /**< formal parameters list */ ecma_length_t formal_parameters_number, /**< formal parameters list's length */ ecma_object_t *scope_p, /**< function's scope */ bool is_strict, /**< 'strict' flag */ bool do_instantiate_arguments_object, /**< should an Arguments object be instantiated * for the function object upon call */ const vm_instr_t *instrs_p, /**< byte-code array */ vm_instr_counter_t first_instr_pos) /**< position of first instruction * of function's body */ { // 1., 4., 13. ecma_object_t *prototype_obj_p = ecma_builtin_get (ECMA_BUILTIN_ID_FUNCTION_PROTOTYPE); ecma_object_t *f = ecma_create_object (prototype_obj_p, true, ECMA_OBJECT_TYPE_FUNCTION); ecma_deref_object (prototype_obj_p); // 2., 6., 7., 8. /* * We don't setup [[Get]], [[Call]], [[Construct]], [[HasInstance]] for each function object. * Instead we set the object's type to ECMA_OBJECT_TYPE_FUNCTION * that defines which version of the routine should be used on demand. */ // 3. /* * [[Class]] property is not stored explicitly for objects of ECMA_OBJECT_TYPE_FUNCTION type. * * See also: ecma_object_get_class_name */ // 9. ecma_property_t *scope_prop_p = ecma_create_internal_property (f, ECMA_INTERNAL_PROPERTY_SCOPE); ECMA_SET_POINTER (scope_prop_p->u.internal_property.value, scope_p); // 10., 11. ecma_property_t *formal_parameters_prop_p = ecma_create_internal_property (f, ECMA_INTERNAL_PROPERTY_FORMAL_PARAMETERS); if (formal_parameters_number != 0) { /* * Reverse formal parameter list */ for (ecma_length_t i = 0; i < formal_parameters_number / 2; i++) { ecma_string_t *tmp_p = formal_parameter_list_p[i]; formal_parameter_list_p[i] = formal_parameter_list_p[formal_parameters_number - 1u - i]; formal_parameter_list_p[formal_parameters_number - 1u - i] = tmp_p; } ecma_collection_header_t *formal_parameters_collection_p = ecma_new_strings_collection (formal_parameter_list_p, formal_parameters_number); ECMA_SET_POINTER (formal_parameters_prop_p->u.internal_property.value, formal_parameters_collection_p); } else { JERRY_ASSERT (formal_parameters_prop_p->u.internal_property.value == ECMA_NULL_POINTER); } // 12. ecma_property_t *bytecode_prop_p = ecma_create_internal_property (f, ECMA_INTERNAL_PROPERTY_CODE_BYTECODE); MEM_CP_SET_NON_NULL_POINTER (bytecode_prop_p->u.internal_property.value, instrs_p); ecma_property_t *code_prop_p = ecma_create_internal_property (f, ECMA_INTERNAL_PROPERTY_CODE_FLAGS_AND_OFFSET); code_prop_p->u.internal_property.value = ecma_pack_code_internal_property_value (is_strict, do_instantiate_arguments_object, first_instr_pos); // 14. ecma_number_t* len_p = ecma_alloc_number (); *len_p = ecma_uint32_to_number (formal_parameters_number); // 15. ecma_property_descriptor_t length_prop_desc = ecma_make_empty_property_descriptor (); length_prop_desc.is_value_defined = true; length_prop_desc.value = ecma_make_number_value (len_p); ecma_string_t* magic_string_length_p = ecma_get_magic_string (LIT_MAGIC_STRING_LENGTH); ecma_completion_value_t completion = ecma_op_object_define_own_property (f, magic_string_length_p, &length_prop_desc, false); ecma_deref_ecma_string (magic_string_length_p); JERRY_ASSERT (ecma_is_completion_value_normal_true (completion) || ecma_is_completion_value_normal_false (completion)); ecma_dealloc_number (len_p); len_p = NULL; // 16. ecma_object_t *proto_p = ecma_op_create_object_object_noarg (); // 17. ecma_property_descriptor_t prop_desc = ecma_make_empty_property_descriptor (); { prop_desc.is_value_defined = true; prop_desc.value = ecma_make_object_value (f); prop_desc.is_writable_defined = true; prop_desc.is_writable = true; prop_desc.is_enumerable_defined = true; prop_desc.is_enumerable = false; prop_desc.is_configurable_defined = true; prop_desc.is_configurable = true; } ecma_string_t *magic_string_constructor_p = ecma_get_magic_string (LIT_MAGIC_STRING_CONSTRUCTOR); ecma_op_object_define_own_property (proto_p, magic_string_constructor_p, &prop_desc, false); ecma_deref_ecma_string (magic_string_constructor_p); // 18. prop_desc.value = ecma_make_object_value (proto_p); prop_desc.is_configurable = false; ecma_string_t *magic_string_prototype_p = ecma_get_magic_string (LIT_MAGIC_STRING_PROTOTYPE); ecma_op_object_define_own_property (f, magic_string_prototype_p, &prop_desc, false); ecma_deref_ecma_string (magic_string_prototype_p); ecma_deref_object (proto_p); // 19. if (is_strict) { ecma_object_t *thrower_p = ecma_builtin_get (ECMA_BUILTIN_ID_TYPE_ERROR_THROWER); prop_desc = ecma_make_empty_property_descriptor (); { prop_desc.is_enumerable_defined = true; prop_desc.is_enumerable = false; prop_desc.is_configurable_defined = true; prop_desc.is_configurable = false; prop_desc.is_get_defined = true; prop_desc.get_p = thrower_p; prop_desc.is_set_defined = true; prop_desc.set_p = thrower_p; } ecma_string_t *magic_string_caller_p = ecma_get_magic_string (LIT_MAGIC_STRING_CALLER); ecma_op_object_define_own_property (f, magic_string_caller_p, &prop_desc, false); ecma_deref_ecma_string (magic_string_caller_p); ecma_string_t *magic_string_arguments_p = ecma_get_magic_string (LIT_MAGIC_STRING_ARGUMENTS); ecma_op_object_define_own_property (f, magic_string_arguments_p, &prop_desc, false); ecma_deref_ecma_string (magic_string_arguments_p); ecma_deref_object (thrower_p); } return f; } /* ecma_op_create_function_object */