/** * Delete bytecode and associated hash table */ void bc_remove_bytecode_data (const bytecode_data_header_t *bytecode_data_p) /**< byte-code scope data header */ { bytecode_data_header_t *prev_header_p = NULL; bytecode_data_header_t *cur_header_p = first_bytecode_header_p; while (cur_header_p != NULL) { if (cur_header_p == bytecode_data_p) { if (prev_header_p) { prev_header_p->next_header_cp = cur_header_p->next_header_cp; } else { first_bytecode_header_p = MEM_CP_GET_POINTER (bytecode_data_header_t, cur_header_p->next_header_cp); } cur_header_p->next_header_cp = MEM_CP_NULL; bc_free_bytecode_data (cur_header_p); break; } prev_header_p = cur_header_p; cur_header_p = MEM_CP_GET_POINTER (bytecode_data_header_t, cur_header_p->next_header_cp); } } /* bc_remove_bytecode_data */
/** * Free memory occupied by bytecode data */ static void bc_free_bytecode_data (bytecode_data_header_t *bytecode_data_p) /**< byte-code scope data header */ { bytecode_data_header_t *next_to_handle_list_p = bytecode_data_p; while (next_to_handle_list_p != NULL) { bytecode_data_header_t *bc_header_list_iter_p = next_to_handle_list_p; next_to_handle_list_p = NULL; while (bc_header_list_iter_p != NULL) { bytecode_data_header_t *header_p = bc_header_list_iter_p; bc_header_list_iter_p = MEM_CP_GET_POINTER (bytecode_data_header_t, header_p->next_header_cp); mem_cpointer_t *declarations_p = MEM_CP_GET_POINTER (mem_cpointer_t, header_p->declarations_cp); for (uint32_t index = 0; index < header_p->func_scopes_count; index++) { bytecode_data_header_t *child_scope_header_p = MEM_CP_GET_NON_NULL_POINTER (bytecode_data_header_t, declarations_p[index]); JERRY_ASSERT (child_scope_header_p->next_header_cp == MEM_CP_NULL); MEM_CP_SET_POINTER (child_scope_header_p->next_header_cp, next_to_handle_list_p); next_to_handle_list_p = child_scope_header_p; } mem_heap_free_block (header_p); } JERRY_ASSERT (bc_header_list_iter_p == NULL); } } /* bc_free_bytecode_data */
/** * Deletes bytecode and associated hash table */ void serializer_remove_bytecode_data (const bytecode_data_header_t *bytecode_data_p) /**< pointer to bytecode data which * should be deleted */ { bytecode_data_header_t *prev_header = NULL; bytecode_data_header_t *cur_header_p = first_bytecode_header_p; while (cur_header_p != NULL) { if (cur_header_p == bytecode_data_p) { if (prev_header) { prev_header->next_header_cp = cur_header_p->next_header_cp; } else { first_bytecode_header_p = MEM_CP_GET_POINTER (bytecode_data_header_t, cur_header_p->next_header_cp); } mem_heap_free_block (cur_header_p); break; } prev_header = cur_header_p; } } /* serializer_remove_instructions */
/** * Convert literal id (operand value of instruction) to compressed pointer to literal * * Bytecode is divided into blocks of fixed size and each block has independent encoding of variable names, * which are represented by 8 bit numbers - ids. * This function performs conversion from id to literal. * * @return compressed pointer to literal */ lit_cpointer_t serializer_get_literal_cp_by_uid (uint8_t id, /**< literal idx */ const bytecode_data_header_t *bytecode_data_p, /**< pointer to bytecode */ vm_instr_counter_t oc) /**< position in the bytecode */ { lit_id_hash_table *lit_id_hash = null_hash; if (bytecode_data_p) { lit_id_hash = MEM_CP_GET_POINTER (lit_id_hash_table, bytecode_data_p->lit_id_hash_cp); } else { lit_id_hash = MEM_CP_GET_POINTER (lit_id_hash_table, first_bytecode_header_p->lit_id_hash_cp); } if (lit_id_hash == null_hash) { return INVALID_LITERAL; } return lit_id_hash_table_lookup (lit_id_hash, id, oc); } /* serializer_get_literal_cp_by_uid */
void serializer_free (void) { lit_finalize (); while (first_bytecode_header_p != NULL) { bytecode_data_header_t *header_p = first_bytecode_header_p; first_bytecode_header_p = MEM_CP_GET_POINTER (bytecode_data_header_t, header_p->next_header_cp); mem_heap_free_block (header_p); } }
/** * Free all bytecode data which was allocated */ void bc_finalize (void) { while (first_bytecode_header_p != NULL) { bytecode_data_header_t *header_p = first_bytecode_header_p; first_bytecode_header_p = MEM_CP_GET_POINTER (bytecode_data_header_t, header_p->next_header_cp); header_p->next_header_cp = MEM_CP_NULL; bc_free_bytecode_data (header_p); } } /* bc_finalize */
/** * Free the chunk */ void mem_pools_free (uint8_t *chunk_p) /**< pointer to the chunk */ { mem_pool_state_t *pool_state = mem_pools, *prev_pool_state_p = NULL; /** * Search for the pool containing specified chunk. */ while (!mem_pool_is_chunk_inside (pool_state, chunk_p)) { prev_pool_state_p = pool_state; pool_state = MEM_CP_GET_NON_NULL_POINTER (mem_pool_state_t, pool_state->next_pool_cp); } /** * Free the chunk */ mem_pool_free_chunk (pool_state, chunk_p); mem_free_chunks_number++; MEM_POOLS_STAT_FREE_CHUNK (); /** * If all chunks of the pool are free, free the pool itself. */ if (pool_state->free_chunks_number == MEM_POOL_CHUNKS_NUMBER) { if (prev_pool_state_p != NULL) { prev_pool_state_p->next_pool_cp = pool_state->next_pool_cp; } else { mem_pools = MEM_CP_GET_POINTER (mem_pool_state_t, pool_state->next_pool_cp); } mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER; mem_heap_free_block ((uint8_t*) pool_state); MEM_POOLS_STAT_FREE_POOL (); } else if (mem_pools != pool_state) { JERRY_ASSERT (prev_pool_state_p != NULL); prev_pool_state_p->next_pool_cp = pool_state->next_pool_cp; MEM_CP_SET_NON_NULL_POINTER (pool_state->next_pool_cp, mem_pools); mem_pools = pool_state; } } /* mem_pools_free */
/** * Dump byte-code and idx-to-literal map to snapshot * * @return true, upon success (i.e. buffer size is enough), * false - otherwise. */ bool serializer_dump_bytecode_with_idx_map (uint8_t *buffer_p, /**< buffer to dump to */ size_t buffer_size, /**< buffer size */ size_t *in_out_buffer_offset_p, /**< in-out: buffer write offset */ const bytecode_data_header_t *bytecode_data_p, /**< byte-code data */ const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map from literal * identifiers in * literal storage * to literal offsets * in snapshot */ uint32_t literals_num, /**< literals number */ uint32_t *out_bytecode_size_p, /**< out: size of dumped instructions array */ uint32_t *out_idx_to_lit_map_size_p) /**< out: side of dumped * idx to literals map */ { JERRY_ASSERT (bytecode_data_p->next_header_cp == MEM_CP_NULL); vm_instr_counter_t instrs_num = bytecode_data_p->instrs_count; const size_t instrs_array_size = sizeof (vm_instr_t) * instrs_num; if (*in_out_buffer_offset_p + instrs_array_size > buffer_size) { return false; } memcpy (buffer_p + *in_out_buffer_offset_p, bytecode_data_p->instrs_p, instrs_array_size); *in_out_buffer_offset_p += instrs_array_size; *out_bytecode_size_p = (uint32_t) (sizeof (vm_instr_t) * instrs_num); lit_id_hash_table *lit_id_hash_p = MEM_CP_GET_POINTER (lit_id_hash_table, bytecode_data_p->lit_id_hash_cp); uint32_t idx_to_lit_map_size = lit_id_hash_table_dump_for_snapshot (buffer_p, buffer_size, in_out_buffer_offset_p, lit_id_hash_p, lit_map_p, literals_num, instrs_num); if (idx_to_lit_map_size == 0) { return false; } else { *out_idx_to_lit_map_size_p = idx_to_lit_map_size; return true; } } /* serializer_dump_bytecode_with_idx_map */
/** * Convert literal id (operand value of instruction) to compressed pointer to literal * * Bytecode is divided into blocks of fixed size and each block has independent encoding of variable names, * which are represented by 8 bit numbers - ids. * This function performs conversion from id to literal. * * @return compressed pointer to literal */ lit_cpointer_t bc_get_literal_cp_by_uid (uint8_t id, /**< literal idx */ const bytecode_data_header_t *bytecode_data_p, /**< pointer to bytecode */ vm_instr_counter_t oc) /**< position in the bytecode */ { JERRY_ASSERT (bytecode_data_p); lit_id_hash_table *lit_id_hash = MEM_CP_GET_POINTER (lit_id_hash_table, bytecode_data_p->lit_id_hash_cp); if (lit_id_hash == NULL) { return INVALID_LITERAL; } return lit_id_hash_table_lookup (lit_id_hash, id, oc); } /* bc_get_literal_cp_by_uid */
void serializer_free (void) { if (bytecode_data.strings_buffer) { mem_heap_free_block ((uint8_t *) bytecode_data.strings_buffer); } lit_finalize (); while (bytecode_data.instrs_p != NULL) { insts_data_header_t *header_p = GET_BYTECODE_HEADER (bytecode_data.instrs_p); bytecode_data.instrs_p = MEM_CP_GET_POINTER (vm_instr_t, header_p->next_instrs_cp); mem_heap_free_block (header_p); } }
/** * Collect chunks from empty pools and free the pools */ void mem_pools_collect_empty (void) { /* * Hint magic number in header of pools with free pool-first chunks */ const uint16_t hint_magic_num_value = 0x7e89; /* * Collection-time chunk lists */ mem_pool_chunk_t *first_chunks_list_p = NULL; mem_pool_chunk_t *non_first_chunks_list_p = NULL; /* * At first stage collect free pool-first chunks to separate collection-time lists * and change their layout from mem_pool_chunk_t::u::free to mem_pool_chunk_t::u::pool_gc */ { mem_pool_chunk_t tmp_header; tmp_header.u.free.next_p = mem_free_chunk_p; for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p, *prev_free_chunk_p = &tmp_header, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; if (pool_start_p == free_chunk_iter_p) { /* * The chunk is first at its pool * * Remove the chunk from common list of free chunks */ prev_free_chunk_p->u.free.next_p = next_free_chunk_p; /* * Initialize pool-first chunk as pool header and it insert into list of free pool-first chunks */ free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL; free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */ free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value; free_chunk_iter_p->u.pool_gc.traversal_check_flag = false; MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, first_chunks_list_p); first_chunks_list_p = free_chunk_iter_p; } else { prev_free_chunk_p = free_chunk_iter_p; } } mem_free_chunk_p = tmp_header.u.free.next_p; } if (first_chunks_list_p == NULL) { /* there are no empty pools */ return; } /* * At second stage we collect all free non-pool-first chunks, for which corresponding pool-first chunks are free, * and link them into the corresponding mem_pool_chunk_t::u::pool_gc::free_list_cp list, while also maintaining * the corresponding mem_pool_chunk_t::u::pool_gc::free_chunks_num: * - at first, for each non-pool-first free chunk we check whether traversal check flag is cleared in corresponding * first chunk in the same pool, and move those chunks, for which the condition is true, * to separate temporary list. * * - then, we flip the traversal check flags for each of free pool-first chunks. * * - at last, we perform almost the same as at first step, but check only non-pool-first chunks from the temporary * list, and send the chunks, for which the corresponding traversal check flag is cleared, back to the common list * of free chunks, and the rest chunks from the temporary list are linked to corresponding pool-first chunks. * Also, counter of the linked free chunks is maintained in every free pool-first chunk. */ { { mem_pool_chunk_t tmp_header; tmp_header.u.free.next_p = mem_free_chunk_p; for (mem_pool_chunk_t *free_chunk_iter_p = tmp_header.u.free.next_p, *prev_free_chunk_p = &tmp_header, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; /* * The magic number doesn't guarantee that the chunk is actually a free pool-first chunk, * so we test the traversal check flag after flipping values of the flags in every * free pool-first chunk. */ uint16_t magic_num_field; bool traversal_check_flag; mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag); /* * During this traversal the flag in the free header chunks is in cleared state */ if (!traversal_check_flag && magic_num_field == hint_magic_num_value) { free_chunk_iter_p->u.free.next_p = non_first_chunks_list_p; non_first_chunks_list_p = free_chunk_iter_p; prev_free_chunk_p->u.free.next_p = next_free_chunk_p; } else { prev_free_chunk_p = free_chunk_iter_p; } } mem_free_chunk_p = tmp_header.u.free.next_p; } { /* * Now, flip the traversal check flag in free pool-first chunks */ for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p; first_chunks_iter_p != NULL; first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.next_first_cp)) { JERRY_ASSERT (!first_chunks_iter_p->u.pool_gc.traversal_check_flag); first_chunks_iter_p->u.pool_gc.traversal_check_flag = true; } } { for (mem_pool_chunk_t *non_first_chunks_iter_p = non_first_chunks_list_p, *next_p; non_first_chunks_iter_p != NULL; non_first_chunks_iter_p = next_p) { next_p = non_first_chunks_iter_p->u.free.next_p; mem_pool_chunk_t *pool_start_p; pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (non_first_chunks_iter_p); uint16_t magic_num_field; bool traversal_check_flag; mem_pools_collect_read_magic_num_and_flag (pool_start_p, &magic_num_field, &traversal_check_flag); JERRY_ASSERT (magic_num_field == hint_magic_num_value); #ifndef JERRY_DISABLE_HEAVY_DEBUG bool is_occured = false; for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p; first_chunks_iter_p != NULL; first_chunks_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.next_first_cp)) { if (pool_start_p == first_chunks_iter_p) { is_occured = true; break; } } JERRY_ASSERT (is_occured == traversal_check_flag); #endif /* !JERRY_DISABLE_HEAVY_DEBUG */ /* * During this traversal the flag in the free header chunks is in set state * * If the flag is set, it is guaranteed that the pool-first chunk, * from the same pool, as the current non-pool-first chunk, is free * and is placed in the corresponding list of free pool-first chunks. */ if (traversal_check_flag) { pool_start_p->u.pool_gc.free_chunks_num++; non_first_chunks_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_start_p->u.pool_gc.free_list_cp); MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, non_first_chunks_iter_p); } else { non_first_chunks_iter_p->u.free.next_p = mem_free_chunk_p; mem_free_chunk_p = non_first_chunks_iter_p; } } } non_first_chunks_list_p = NULL; } /* * At third stage we check each free pool-first chunk in collection-time list for counted * number of free chunks in the pool, containing the chunk. * * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed, * otherwise - free chunks of the pool are returned to the common list of free chunks. */ for (mem_pool_chunk_t *first_chunks_iter_p = first_chunks_list_p, *next_p; first_chunks_iter_p != NULL; first_chunks_iter_p = next_p) { next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.next_first_cp); JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.hint_magic_num == hint_magic_num_value); JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.traversal_check_flag); JERRY_ASSERT (first_chunks_iter_p->u.pool_gc.free_chunks_num <= MEM_POOL_CHUNKS_NUMBER); if (first_chunks_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER) { #ifndef JERRY_NDEBUG mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER; #endif /* !JERRY_NDEBUG */ MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST (); mem_heap_free_block (first_chunks_iter_p); MEM_POOLS_STAT_FREE_POOL (); } else { mem_pool_chunk_t *first_chunk_p = first_chunks_iter_p; /* * Convert layout of first chunk from collection-time pool-first chunk's layout to the common free chunk layout */ first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, first_chunks_iter_p->u.pool_gc.free_list_cp); /* * Link local pool's list of free chunks into the common list of free chunks */ for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p; ; pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p) { JERRY_ASSERT (pool_chunks_iter_p != NULL); if (pool_chunks_iter_p->u.free.next_p == NULL) { pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p; break; } } mem_free_chunk_p = first_chunk_p; } } #ifdef JERRY_VALGRIND /* * Valgrind-mode specific pass that marks all free chunks inaccessible */ for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { next_free_chunk_p = free_chunk_iter_p->u.free.next_p; VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); } #endif /* JERRY_VALGRIND */ } /* mem_pools_collect_empty */
/** * Register bytecode and supplementary data of all scopes from snapshot * * NOTE: * If is_copy flag is set, bytecode is copied from snapshot, else bytecode is referenced directly * from snapshot * * @return pointer to byte-code header, upon success, * NULL - upon failure (i.e., in case snapshot format is not valid) */ const bytecode_data_header_t * bc_load_bytecode_data (const uint8_t *snapshot_data_p, /**< buffer with instructions array * and idx to literals map from * snapshot */ size_t snapshot_size, /**< remaining size of snapshot */ const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map of in-snapshot * literal offsets * to literal identifiers, * created in literal * storage */ uint32_t literals_num, /**< number of literals */ bool is_copy, /** flag, indicating whether the passed in-snapshot data * should be copied to engine's memory (true), * or it can be referenced until engine is stopped * (i.e. until call to jerry_cleanup) */ uint32_t expected_scopes_num) /**< scopes number read from snapshot header */ { uint32_t snapshot_offset = 0; uint32_t out_bytecode_data_size = 0; uint32_t scopes_num = 0; bytecode_data_header_t *bc_header_p = bc_load_bytecode_with_idx_map (snapshot_data_p, snapshot_size, lit_map_p, literals_num, is_copy, &out_bytecode_data_size); scopes_num++; snapshot_offset += out_bytecode_data_size; JERRY_ASSERT (snapshot_offset <= snapshot_size); bytecode_data_header_t* next_to_handle_list_p = bc_header_p; while (next_to_handle_list_p != NULL) { mem_cpointer_t *declarations_p = MEM_CP_GET_POINTER (mem_cpointer_t, next_to_handle_list_p->declarations_cp); uint32_t child_scope_index = 0; while (child_scope_index < next_to_handle_list_p->func_scopes_count && declarations_p[child_scope_index] != MEM_CP_NULL) { child_scope_index++; } if (child_scope_index == next_to_handle_list_p->func_scopes_count) { bytecode_data_header_t *bc_header_list_iter_p = MEM_CP_GET_POINTER (bytecode_data_header_t, next_to_handle_list_p->next_header_cp); next_to_handle_list_p->next_header_cp = MEM_CP_NULL; next_to_handle_list_p = bc_header_list_iter_p; if (next_to_handle_list_p == NULL) { break; } else { continue; } } JERRY_ASSERT (snapshot_offset < snapshot_size); bytecode_data_header_t *next_header_p = bc_load_bytecode_with_idx_map (snapshot_data_p + snapshot_offset, snapshot_size - snapshot_offset, lit_map_p, literals_num, is_copy, &out_bytecode_data_size); scopes_num++; snapshot_offset += out_bytecode_data_size; JERRY_ASSERT (snapshot_offset <= snapshot_size); MEM_CP_SET_NON_NULL_POINTER (declarations_p[child_scope_index], next_header_p); if (next_header_p->func_scopes_count > 0) { JERRY_ASSERT (next_header_p->next_header_cp == MEM_CP_NULL); MEM_CP_SET_POINTER (next_header_p->next_header_cp, next_to_handle_list_p); next_to_handle_list_p = next_header_p; } } if (expected_scopes_num != scopes_num) { return NULL; } MEM_CP_SET_POINTER (bc_header_p->next_header_cp, first_bytecode_header_p); first_bytecode_header_p = bc_header_p; return bc_header_p; } /* bc_load_bytecode_data */
/** * Dump bytecode and summplementary data of all existing scopes to snapshot * * @return true if snapshot was dumped successfully * false otherwise */ bool bc_save_bytecode_data (uint8_t *buffer_p, /**< buffer to dump to */ size_t buffer_size, /**< buffer size */ size_t *in_out_buffer_offset_p, /**< in-out: buffer write offset */ const bytecode_data_header_t *bytecode_data_p, /**< byte-code data */ const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map from literal * identifiers in * literal storage * to literal offsets * in snapshot */ uint32_t literals_num, /**< literals number */ uint32_t *out_scopes_num) /**< number of scopes written */ { bytecode_data_header_t *next_to_handle_list_p = first_bytecode_header_p; while (next_to_handle_list_p != NULL) { if (next_to_handle_list_p == bytecode_data_p) { break; } next_to_handle_list_p = MEM_CP_GET_POINTER (bytecode_data_header_t, next_to_handle_list_p->next_header_cp); } JERRY_ASSERT (next_to_handle_list_p); JERRY_ASSERT (next_to_handle_list_p->next_header_cp == MEM_CP_NULL); *out_scopes_num = 0; while (next_to_handle_list_p!= NULL) { bytecode_data_header_t *bc_header_list_iter_p = next_to_handle_list_p; next_to_handle_list_p = NULL; mem_cpointer_t *declarations_p = MEM_CP_GET_POINTER (mem_cpointer_t, bc_header_list_iter_p->declarations_cp); if (!bc_save_bytecode_with_idx_map (buffer_p, buffer_size, in_out_buffer_offset_p, bc_header_list_iter_p, lit_map_p, literals_num)) { return false; } (*out_scopes_num)++; next_to_handle_list_p = MEM_CP_GET_POINTER (bytecode_data_header_t, bc_header_list_iter_p->next_header_cp); for (uint32_t index = bc_header_list_iter_p->func_scopes_count; index > 0 ; index--) { bytecode_data_header_t *child_scope_header_p = MEM_CP_GET_NON_NULL_POINTER (bytecode_data_header_t, declarations_p[index-1]); JERRY_ASSERT (child_scope_header_p->next_header_cp == MEM_CP_NULL); MEM_CP_SET_POINTER (child_scope_header_p->next_header_cp, next_to_handle_list_p); next_to_handle_list_p = child_scope_header_p; } bc_header_list_iter_p->next_header_cp = MEM_CP_NULL; } return true; } /* bc_save_bytecode_data */
/** * Dump byte-code and idx-to-literal map of a single scope to snapshot * * @return true, upon success (i.e. buffer size is enough), * false - otherwise. */ static bool bc_save_bytecode_with_idx_map (uint8_t *buffer_p, /**< buffer to dump to */ size_t buffer_size, /**< buffer size */ size_t *in_out_buffer_offset_p, /**< in-out: buffer write offset */ const bytecode_data_header_t *bytecode_data_p, /**< byte-code data */ const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map from literal * identifiers in * literal storage * to literal offsets * in snapshot */ uint32_t literals_num) /**< literals number */ { JERRY_ASSERT (JERRY_ALIGNUP (*in_out_buffer_offset_p, MEM_ALIGNMENT) == *in_out_buffer_offset_p); jerry_snapshot_bytecode_header_t bytecode_header; bytecode_header.func_scopes_count = bytecode_data_p->func_scopes_count; bytecode_header.var_decls_count = bytecode_data_p->var_decls_count; bytecode_header.is_strict = bytecode_data_p->is_strict; bytecode_header.is_ref_arguments_identifier = bytecode_data_p->is_ref_arguments_identifier; bytecode_header.is_ref_eval_identifier = bytecode_data_p->is_ref_eval_identifier; bytecode_header.is_args_moved_to_regs = bytecode_data_p->is_args_moved_to_regs; bytecode_header.is_no_lex_env = bytecode_data_p->is_no_lex_env; size_t bytecode_header_offset = *in_out_buffer_offset_p; /* Dump instructions */ *in_out_buffer_offset_p += JERRY_ALIGNUP (sizeof (jerry_snapshot_bytecode_header_t), MEM_ALIGNMENT); vm_instr_counter_t instrs_num = bytecode_data_p->instrs_count; const size_t instrs_array_size = sizeof (vm_instr_t) * instrs_num; if (*in_out_buffer_offset_p + instrs_array_size > buffer_size) { return false; } memcpy (buffer_p + *in_out_buffer_offset_p, bytecode_data_p->instrs_p, instrs_array_size); *in_out_buffer_offset_p += instrs_array_size; bytecode_header.instrs_size = (uint32_t) (sizeof (vm_instr_t) * instrs_num); /* Dump variable declarations */ mem_cpointer_t *func_scopes_p = MEM_CP_GET_POINTER (mem_cpointer_t, bytecode_data_p->declarations_cp); lit_cpointer_t *var_decls_p = (lit_cpointer_t *) (func_scopes_p + bytecode_data_p->func_scopes_count); uint32_t null_var_decls_num = 0; for (uint32_t i = 0; i < bytecode_header.var_decls_count; ++i) { lit_cpointer_t lit_cp = var_decls_p[i]; if (lit_cp.packed_value == MEM_CP_NULL) { null_var_decls_num++; continue; } uint32_t offset = bc_find_lit_offset (lit_cp, lit_map_p, literals_num); if (!jrt_write_to_buffer_by_offset (buffer_p, buffer_size, in_out_buffer_offset_p, &offset, sizeof (offset))) { return false; } } bytecode_header.var_decls_count -= null_var_decls_num; /* Dump uid->lit_cp hash table */ lit_id_hash_table *lit_id_hash_p = MEM_CP_GET_POINTER (lit_id_hash_table, bytecode_data_p->lit_id_hash_cp); uint32_t idx_to_lit_map_size = lit_id_hash_table_dump_for_snapshot (buffer_p, buffer_size, in_out_buffer_offset_p, lit_id_hash_p, lit_map_p, literals_num, instrs_num); if (idx_to_lit_map_size == 0) { return false; } bytecode_header.idx_to_lit_map_size = idx_to_lit_map_size; /* Align to write next bytecode data at aligned address */ bytecode_header.size = (uint32_t) (*in_out_buffer_offset_p - bytecode_header_offset); JERRY_ASSERT (bytecode_header.size == JERRY_ALIGNUP (sizeof (jerry_snapshot_bytecode_header_t), MEM_ALIGNMENT) + bytecode_header.instrs_size + bytecode_header.var_decls_count * sizeof (uint32_t) + idx_to_lit_map_size); if (!bc_align_data_in_output_buffer (&bytecode_header.size, buffer_p, buffer_size, in_out_buffer_offset_p)) { return false; } /* Dump header at the saved offset */ if (!jrt_write_to_buffer_by_offset (buffer_p, buffer_size, &bytecode_header_offset, &bytecode_header, sizeof (bytecode_header))) { return false; } return true; } /* bc_save_bytecode_with_idx_map */
void mem_pools_collect_empty (void) { /* * Hint magic number in header of pools with free first chunks */ const uint16_t hint_magic_num_value = 0x7e89; /* * At first pass collect pointers to those of free chunks that are first at their pools * to separate lists (collection-time pool lists) and change them to headers of corresponding pools */ /* * Number of collection-time pool lists */ constexpr uint32_t pool_lists_number = 8; /* * Collection-time pool lists */ mem_pool_chunk_t *pool_lists_p[pool_lists_number]; for (uint32_t i = 0; i < pool_lists_number; i++) { pool_lists_p[i] = NULL; } /* * Number of the pools, included into the lists */ uint32_t pools_in_lists_number = 0; for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); VALGRIND_DEFINED_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; if (pool_start_p == free_chunk_iter_p) { /* * The chunk is first at its pool * * Remove the chunk from common list of free chunks */ if (prev_free_chunk_p == NULL) { JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p); mem_free_chunk_p = next_free_chunk_p; } else { prev_free_chunk_p->u.free.next_p = next_free_chunk_p; } pools_in_lists_number++; uint8_t list_id = pools_in_lists_number % pool_lists_number; /* * Initialize pool header and insert the pool into one of lists */ free_chunk_iter_p->u.pool_gc.free_list_cp = MEM_CP_NULL; free_chunk_iter_p->u.pool_gc.free_chunks_num = 1; /* the first chunk */ free_chunk_iter_p->u.pool_gc.hint_magic_num = hint_magic_num_value; free_chunk_iter_p->u.pool_gc.list_id = list_id; MEM_CP_SET_POINTER (free_chunk_iter_p->u.pool_gc.next_first_cp, pool_lists_p[list_id]); pool_lists_p[list_id] = free_chunk_iter_p; } else { prev_free_chunk_p = free_chunk_iter_p; } } if (pools_in_lists_number == 0) { /* there are no empty pools */ return; } /* * At second pass we check for all rest free chunks whether they are in pools that were included into * collection-time pool lists. * * For each of the chunk, try to find the corresponding pool through iterating the list. * * If pool is found in a list (so, first chunk of the pool is free) for a chunk, increment counter * of free chunks in the pools, and move the chunk from global free chunks list to collection-time * local list of corresponding pool's free chunks. */ for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *prev_free_chunk_p = NULL, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { mem_pool_chunk_t *pool_start_p = (mem_pool_chunk_t *) mem_heap_get_chunked_block_start (free_chunk_iter_p); next_free_chunk_p = free_chunk_iter_p->u.free.next_p; bool is_chunk_moved_to_local_list = false; #ifdef JERRY_VALGRIND /* * If the chunk is not free, there may be undefined bytes at hint_magic_num and list_id fields. * * Although, it is correct for the routine, valgrind issues warning about using uninitialized data * in conditional expression. To suppress the false-positive warning, the chunk is temporarily marked * as defined, and after reading hint magic number and list identifier, valgrind state of the chunk is restored. */ uint8_t vbits[MEM_POOL_CHUNK_SIZE]; unsigned status; status = VALGRIND_GET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE); JERRY_ASSERT (status == 0 || status == 1); VALGRIND_DEFINED_SPACE (pool_start_p, MEM_POOL_CHUNK_SIZE); #endif /* JERRY_VALGRIND */ /* * The magic number doesn't guarantee that the chunk is actually a pool header, * so it is only optimization to reduce number of unnecessary iterations over * pool lists. */ uint16_t magic_num_field = pool_start_p->u.pool_gc.hint_magic_num; uint8_t id_to_search_in = pool_start_p->u.pool_gc.list_id; #ifdef JERRY_VALGRIND status = VALGRIND_SET_VBITS (pool_start_p, vbits, MEM_POOL_CHUNK_SIZE); JERRY_ASSERT (status == 0 || status == 1); #endif /* JERRY_VALGRIND */ if (magic_num_field == hint_magic_num_value) { /* * Maybe, the first chunk is free. * * If it is so, it is included in the list of pool's first free chunks. */ if (id_to_search_in < pool_lists_number) { for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[id_to_search_in]; pool_list_iter_p != NULL; pool_list_iter_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_list_iter_p->u.pool_gc.next_first_cp)) { if (pool_list_iter_p == pool_start_p) { /* * The first chunk is actually free. * * So, incrementing free chunks counter in it. */ pool_start_p->u.pool_gc.free_chunks_num++; /* * It is possible that the corresponding pool is empty * * Moving current chunk from common list of free chunks to temporary list, local to the pool */ if (prev_free_chunk_p == NULL) { JERRY_ASSERT (mem_free_chunk_p == free_chunk_iter_p); mem_free_chunk_p = next_free_chunk_p; } else { prev_free_chunk_p->u.free.next_p = next_free_chunk_p; } free_chunk_iter_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_start_p->u.pool_gc.free_list_cp); MEM_CP_SET_NON_NULL_POINTER (pool_start_p->u.pool_gc.free_list_cp, free_chunk_iter_p); is_chunk_moved_to_local_list = true; break; } } } } if (!is_chunk_moved_to_local_list) { prev_free_chunk_p = free_chunk_iter_p; } } /* * At third pass we check each pool in collection-time pool lists free for counted * number of free chunks in the pool. * * If the number is equal to number of chunks in the pool - then the pool is empty, and so is freed, * otherwise - free chunks of the pool are returned to common list of free chunks. */ for (uint8_t list_id = 0; list_id < pool_lists_number; list_id++) { for (mem_pool_chunk_t *pool_list_iter_p = pool_lists_p[list_id], *next_p; pool_list_iter_p != NULL; pool_list_iter_p = next_p) { next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_list_iter_p->u.pool_gc.next_first_cp); if (pool_list_iter_p->u.pool_gc.free_chunks_num == MEM_POOL_CHUNKS_NUMBER) { #ifndef JERRY_NDEBUG mem_free_chunks_number -= MEM_POOL_CHUNKS_NUMBER; #endif /* !JERRY_NDEBUG */ MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST (); mem_heap_free_block (pool_list_iter_p); MEM_POOLS_STAT_FREE_POOL (); } else { mem_pool_chunk_t *first_chunk_p = pool_list_iter_p; /* * Convert layout of first chunk from collection-time pool header to common free chunk */ first_chunk_p->u.free.next_p = MEM_CP_GET_POINTER (mem_pool_chunk_t, pool_list_iter_p->u.pool_gc.free_list_cp); /* * Link local pool's list of free chunks into global list of free chunks */ for (mem_pool_chunk_t *pool_chunks_iter_p = first_chunk_p; ; pool_chunks_iter_p = pool_chunks_iter_p->u.free.next_p) { JERRY_ASSERT (pool_chunks_iter_p != NULL); if (pool_chunks_iter_p->u.free.next_p == NULL) { pool_chunks_iter_p->u.free.next_p = mem_free_chunk_p; break; } } mem_free_chunk_p = first_chunk_p; } } } #ifdef JERRY_VALGRIND /* * Valgrind-mode specific pass that marks all free chunks inaccessible */ for (mem_pool_chunk_t *free_chunk_iter_p = mem_free_chunk_p, *next_free_chunk_p; free_chunk_iter_p != NULL; free_chunk_iter_p = next_free_chunk_p) { next_free_chunk_p = free_chunk_iter_p->u.free.next_p; VALGRIND_NOACCESS_SPACE (free_chunk_iter_p, MEM_POOL_CHUNK_SIZE); } #endif /* JERRY_VALGRIND */ } /* mem_pools_collect_empty */