/** * Write alignment bytes to outptut buffer to align 'in_out_size' to MEM_ALIGNEMENT * * @return true if alignment bytes were written successfully * else otherwise */ bool bc_align_data_in_output_buffer (uint32_t *in_out_size, /**< in: unaligned size, out: aligned size */ uint8_t *buffer_p, /**< buffer where to write */ size_t buffer_size, /**< buffer size */ size_t *in_out_buffer_offset_p) /**< current offset in buffer */ { uint32_t aligned_size = JERRY_ALIGNUP (*in_out_size, MEM_ALIGNMENT); if (aligned_size != (*in_out_size)) { JERRY_ASSERT (aligned_size > (*in_out_size)); uint32_t padding_bytes_num = (uint32_t) (aligned_size - (*in_out_size)); uint8_t padding = 0; for (uint32_t i = 0; i < padding_bytes_num; i++) { if (!jrt_write_to_buffer_by_offset (buffer_p, buffer_size, in_out_buffer_offset_p, &padding, sizeof (padding))) { return false; } } *in_out_size = aligned_size; } return true; } /* bc_align_data_in_output_buffer */
/** * Recommend allocation size based on chunk size. * * @return recommended allocation size */ size_t __attr_pure___ mem_heap_recommend_allocation_size (size_t minimum_allocation_size) /**< minimum allocation size */ { size_t minimum_allocation_size_with_block_header = minimum_allocation_size + sizeof (mem_block_header_t); size_t heap_chunk_aligned_allocation_size = JERRY_ALIGNUP (minimum_allocation_size_with_block_header, MEM_HEAP_CHUNK_SIZE); return heap_chunk_aligned_allocation_size - sizeof (mem_block_header_t); } /* mem_heap_recommend_allocation_size */
/** * Merge scopes tree into bytecode * * @return pointer to generated bytecode */ const bytecode_data_header_t * serializer_merge_scopes_into_bytecode (void) { const size_t buckets_count = scopes_tree_count_literals_in_blocks (current_scope); const vm_instr_counter_t instrs_count = scopes_tree_count_instructions (current_scope); const size_t blocks_count = JERRY_ALIGNUP (instrs_count, BLOCK_SIZE) / BLOCK_SIZE; const size_t bytecode_size = JERRY_ALIGNUP (instrs_count * sizeof (vm_instr_t), MEM_ALIGNMENT); const size_t hash_table_size = lit_id_hash_table_get_size_for_table (buckets_count, blocks_count); const size_t header_and_hash_table_size = JERRY_ALIGNUP (sizeof (bytecode_data_header_t) + hash_table_size, MEM_ALIGNMENT); uint8_t *buffer_p = (uint8_t*) mem_heap_alloc_block (bytecode_size + header_and_hash_table_size, MEM_HEAP_ALLOC_LONG_TERM); lit_id_hash_table *lit_id_hash = lit_id_hash_table_init (buffer_p + sizeof (bytecode_data_header_t), hash_table_size, buckets_count, blocks_count); vm_instr_t *bytecode_p = scopes_tree_raw_data (current_scope, buffer_p + header_and_hash_table_size, bytecode_size, lit_id_hash); bytecode_data_header_t *header_p = (bytecode_data_header_t *) buffer_p; MEM_CP_SET_POINTER (header_p->lit_id_hash_cp, lit_id_hash); header_p->instrs_p = bytecode_p; header_p->instrs_count = instrs_count; MEM_CP_SET_POINTER (header_p->next_header_cp, first_bytecode_header_p); first_bytecode_header_p = header_p; if (print_instrs) { lit_dump_literals (); serializer_print_instrs (header_p); } return header_p; } /* serializer_merge_scopes_into_bytecode */
const vm_instr_t * serializer_merge_scopes_into_bytecode (void) { bytecode_data.instrs_count = scopes_tree_count_instructions (current_scope); const size_t buckets_count = scopes_tree_count_literals_in_blocks (current_scope); const size_t blocks_count = (size_t) bytecode_data.instrs_count / BLOCK_SIZE + 1; const vm_instr_counter_t instrs_count = scopes_tree_count_instructions (current_scope); const size_t bytecode_array_size = JERRY_ALIGNUP (sizeof (insts_data_header_t) + instrs_count * sizeof (vm_instr_t), MEM_ALIGNMENT); const size_t lit_id_hash_table_size = JERRY_ALIGNUP (lit_id_hash_table_get_size_for_table (buckets_count, blocks_count), MEM_ALIGNMENT); uint8_t *buffer_p = (uint8_t*) mem_heap_alloc_block (bytecode_array_size + lit_id_hash_table_size, MEM_HEAP_ALLOC_LONG_TERM); lit_id_hash_table *lit_id_hash = lit_id_hash_table_init (buffer_p + bytecode_array_size, lit_id_hash_table_size, buckets_count, blocks_count); const vm_instr_t *instrs_p = scopes_tree_raw_data (current_scope, buffer_p, bytecode_array_size, lit_id_hash); insts_data_header_t *header_p = (insts_data_header_t*) buffer_p; MEM_CP_SET_POINTER (header_p->next_instrs_cp, bytecode_data.instrs_p); header_p->instructions_number = instrs_count; bytecode_data.instrs_p = instrs_p; if (print_instrs) { lit_dump_literals (); serializer_print_instrs (instrs_p, bytecode_data.instrs_count); } return instrs_p; }
/** * Calculate minimum chunks count needed for block with specified size of allocated data area. * * @return chunks count */ static size_t mem_get_block_chunks_count_from_data_size (size_t block_allocated_size) /**< size of block's allocated area */ { return JERRY_ALIGNUP (sizeof (mem_block_header_t) + block_allocated_size, MEM_HEAP_CHUNK_SIZE) / MEM_HEAP_CHUNK_SIZE; } /* mem_get_block_chunks_count_from_data_size */
/** * Register bytecode and idx map from snapshot * * NOTE: * If is_copy flag is set, bytecode is copied from snapshot, else bytecode is referenced directly * from snapshot * * @return pointer to byte-code header, upon success, * NULL - upon failure (i.e., in case snapshot format is not valid) */ const bytecode_data_header_t * serializer_load_bytecode_with_idx_map (const uint8_t *bytecode_and_idx_map_p, /**< buffer with instructions array * and idx to literals map from * snapshot */ uint32_t bytecode_size, /**< size of instructions array */ uint32_t idx_to_lit_map_size, /**< size of the idx to literals map */ const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map of in-snapshot * literal offsets * to literal identifiers, * created in literal * storage */ uint32_t literals_num, /**< number of literals */ bool is_copy) /** flag, indicating whether the passed in-snapshot data * should be copied to engine's memory (true), * or it can be referenced until engine is stopped * (i.e. until call to jerry_cleanup) */ { const uint8_t *idx_to_lit_map_p = bytecode_and_idx_map_p + bytecode_size; size_t instructions_number = bytecode_size / sizeof (vm_instr_t); size_t blocks_count = JERRY_ALIGNUP (instructions_number, BLOCK_SIZE) / BLOCK_SIZE; uint32_t idx_num_total; size_t idx_to_lit_map_offset = 0; if (!jrt_read_from_buffer_by_offset (idx_to_lit_map_p, idx_to_lit_map_size, &idx_to_lit_map_offset, &idx_num_total)) { return NULL; } const size_t bytecode_alloc_size = JERRY_ALIGNUP (bytecode_size, MEM_ALIGNMENT); const size_t hash_table_size = lit_id_hash_table_get_size_for_table (idx_num_total, blocks_count); const size_t header_and_hash_table_size = JERRY_ALIGNUP (sizeof (bytecode_data_header_t) + hash_table_size, MEM_ALIGNMENT); const size_t alloc_size = header_and_hash_table_size + (is_copy ? bytecode_alloc_size : 0); uint8_t *buffer_p = (uint8_t*) mem_heap_alloc_block (alloc_size, MEM_HEAP_ALLOC_LONG_TERM); bytecode_data_header_t *header_p = (bytecode_data_header_t *) buffer_p; vm_instr_t *instrs_p; vm_instr_t *snapshot_instrs_p = (vm_instr_t *) bytecode_and_idx_map_p; if (is_copy) { instrs_p = (vm_instr_t *) (buffer_p + header_and_hash_table_size); memcpy (instrs_p, snapshot_instrs_p, bytecode_size); } else { instrs_p = snapshot_instrs_p; } uint8_t *lit_id_hash_table_buffer_p = buffer_p + sizeof (bytecode_data_header_t); if (lit_id_hash_table_load_from_snapshot (blocks_count, idx_num_total, idx_to_lit_map_p + idx_to_lit_map_offset, idx_to_lit_map_size - idx_to_lit_map_offset, lit_map_p, literals_num, lit_id_hash_table_buffer_p, hash_table_size) && (vm_instr_counter_t) instructions_number == instructions_number) { MEM_CP_SET_NON_NULL_POINTER (header_p->lit_id_hash_cp, lit_id_hash_table_buffer_p); header_p->instrs_p = instrs_p; header_p->instrs_count = (vm_instr_counter_t) instructions_number; MEM_CP_SET_POINTER (header_p->next_header_cp, first_bytecode_header_p); first_bytecode_header_p = header_p; return header_p; } else { mem_heap_free_block (buffer_p); return NULL; } } /* serializer_load_bytecode_with_idx_map */
/** * Register bytecode and supplementary data of a single scope from snapshot * * NOTE: * If is_copy flag is set, bytecode is copied from snapshot, else bytecode is referenced directly * from snapshot * * @return pointer to byte-code header, upon success, * NULL - upon failure (i.e., in case snapshot format is not valid) */ static bytecode_data_header_t * bc_load_bytecode_with_idx_map (const uint8_t *snapshot_data_p, /**< buffer with instructions array * and idx to literals map from * snapshot */ size_t snapshot_size, /**< remaining size of snapshot */ const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map of in-snapshot * literal offsets * to literal identifiers, * created in literal * storage */ uint32_t literals_num, /**< number of literals */ bool is_copy, /** flag, indicating whether the passed in-snapshot data * should be copied to engine's memory (true), * or it can be referenced until engine is stopped * (i.e. until call to jerry_cleanup) */ uint32_t *out_bytecode_data_size) /**< out: size occupied by bytecode data * in snapshot */ { size_t buffer_offset = 0; jerry_snapshot_bytecode_header_t bytecode_header; if (!jrt_read_from_buffer_by_offset (snapshot_data_p, snapshot_size, &buffer_offset, &bytecode_header, sizeof (bytecode_header))) { return NULL; } *out_bytecode_data_size = bytecode_header.size; buffer_offset += (JERRY_ALIGNUP (sizeof (jerry_snapshot_bytecode_header_t), MEM_ALIGNMENT) - sizeof (jerry_snapshot_bytecode_header_t)); JERRY_ASSERT (bytecode_header.size <= snapshot_size); /* Read uid->lit_cp hash table size */ const uint8_t *idx_to_lit_map_p = (snapshot_data_p + buffer_offset + + bytecode_header.instrs_size + bytecode_header.var_decls_count * sizeof (uint32_t)); size_t instructions_number = bytecode_header.instrs_size / sizeof (vm_instr_t); size_t blocks_count = JERRY_ALIGNUP (instructions_number, BLOCK_SIZE) / BLOCK_SIZE; uint32_t idx_num_total; size_t idx_to_lit_map_offset = 0; if (!jrt_read_from_buffer_by_offset (idx_to_lit_map_p, bytecode_header.idx_to_lit_map_size, &idx_to_lit_map_offset, &idx_num_total, sizeof (idx_num_total))) { return NULL; } /* Alloc bytecode_header for runtime */ const size_t bytecode_alloc_size = JERRY_ALIGNUP (bytecode_header.instrs_size, MEM_ALIGNMENT); const size_t hash_table_size = lit_id_hash_table_get_size_for_table (idx_num_total, blocks_count); const size_t declarations_area_size = JERRY_ALIGNUP (bytecode_header.func_scopes_count * sizeof (mem_cpointer_t) + bytecode_header.var_decls_count * sizeof (lit_cpointer_t), MEM_ALIGNMENT); const size_t header_and_tables_size = JERRY_ALIGNUP ((sizeof (bytecode_data_header_t) + hash_table_size + declarations_area_size), MEM_ALIGNMENT); const size_t alloc_size = header_and_tables_size + (is_copy ? bytecode_alloc_size : 0); uint8_t *buffer_p = (uint8_t*) mem_heap_alloc_block (alloc_size, MEM_HEAP_ALLOC_LONG_TERM); bytecode_data_header_t *header_p = (bytecode_data_header_t *) buffer_p; vm_instr_t *instrs_p; vm_instr_t *snapshot_instrs_p = (vm_instr_t *) (snapshot_data_p + buffer_offset); if (is_copy) { instrs_p = (vm_instr_t *) (buffer_p + header_and_tables_size); memcpy (instrs_p, snapshot_instrs_p, bytecode_header.instrs_size); } else { instrs_p = snapshot_instrs_p; } buffer_offset += bytecode_header.instrs_size; /* buffer_offset is now offset of variable declarations */ /* Read uid->lit_cp hash table */ uint8_t *lit_id_hash_table_buffer_p = buffer_p + sizeof (bytecode_data_header_t); if (!(lit_id_hash_table_load_from_snapshot (blocks_count, idx_num_total, idx_to_lit_map_p + idx_to_lit_map_offset, bytecode_header.idx_to_lit_map_size - idx_to_lit_map_offset, lit_map_p, literals_num, lit_id_hash_table_buffer_p, hash_table_size) && (vm_instr_counter_t) instructions_number == instructions_number)) { mem_heap_free_block (buffer_p); return NULL; } /* Fill with NULLs child scopes declarations for this scope */ mem_cpointer_t *declarations_p = (mem_cpointer_t *) (buffer_p + sizeof (bytecode_data_header_t) + hash_table_size); memset (declarations_p, 0, bytecode_header.func_scopes_count * sizeof (mem_cpointer_t)); /* Read variable declarations for this scope */ lit_cpointer_t *var_decls_p = (lit_cpointer_t *) (declarations_p + bytecode_header.func_scopes_count); for (uint32_t i = 0; i < bytecode_header.var_decls_count; i++) { uint32_t lit_offset_from_snapshot; if (!jrt_read_from_buffer_by_offset (snapshot_data_p, buffer_offset + bytecode_header.var_decls_count * sizeof (uint32_t), &buffer_offset, &lit_offset_from_snapshot, sizeof (lit_offset_from_snapshot))) { mem_heap_free_block (buffer_p); return NULL; } /** * TODO: implement binary search here */ lit_cpointer_t lit_cp = NOT_A_LITERAL; uint32_t j; for (j = 0; j < literals_num; j++) { if (lit_map_p[j].literal_offset == lit_offset_from_snapshot) { lit_cp.packed_value = lit_map_p[j].literal_id.packed_value; break; } } if (j == literals_num) { mem_heap_free_block (buffer_p); return NULL; } var_decls_p[i] = lit_cp; } /* Fill bytecode_data_header */ bc_fill_bytecode_data_header (header_p, (lit_id_hash_table *) lit_id_hash_table_buffer_p, instrs_p, declarations_p, (uint16_t) bytecode_header.func_scopes_count, (uint16_t) bytecode_header.var_decls_count, bytecode_header.is_strict, bytecode_header.is_ref_arguments_identifier, bytecode_header.is_ref_eval_identifier, bytecode_header.is_args_moved_to_regs, bytecode_header.is_args_moved_to_regs, bytecode_header.is_no_lex_env); return header_p; } /* bc_load_bytecode_with_idx_map */
/** * Dump byte-code and idx-to-literal map of a single scope to snapshot * * @return true, upon success (i.e. buffer size is enough), * false - otherwise. */ static bool bc_save_bytecode_with_idx_map (uint8_t *buffer_p, /**< buffer to dump to */ size_t buffer_size, /**< buffer size */ size_t *in_out_buffer_offset_p, /**< in-out: buffer write offset */ const bytecode_data_header_t *bytecode_data_p, /**< byte-code data */ const lit_mem_to_snapshot_id_map_entry_t *lit_map_p, /**< map from literal * identifiers in * literal storage * to literal offsets * in snapshot */ uint32_t literals_num) /**< literals number */ { JERRY_ASSERT (JERRY_ALIGNUP (*in_out_buffer_offset_p, MEM_ALIGNMENT) == *in_out_buffer_offset_p); jerry_snapshot_bytecode_header_t bytecode_header; bytecode_header.func_scopes_count = bytecode_data_p->func_scopes_count; bytecode_header.var_decls_count = bytecode_data_p->var_decls_count; bytecode_header.is_strict = bytecode_data_p->is_strict; bytecode_header.is_ref_arguments_identifier = bytecode_data_p->is_ref_arguments_identifier; bytecode_header.is_ref_eval_identifier = bytecode_data_p->is_ref_eval_identifier; bytecode_header.is_args_moved_to_regs = bytecode_data_p->is_args_moved_to_regs; bytecode_header.is_no_lex_env = bytecode_data_p->is_no_lex_env; size_t bytecode_header_offset = *in_out_buffer_offset_p; /* Dump instructions */ *in_out_buffer_offset_p += JERRY_ALIGNUP (sizeof (jerry_snapshot_bytecode_header_t), MEM_ALIGNMENT); vm_instr_counter_t instrs_num = bytecode_data_p->instrs_count; const size_t instrs_array_size = sizeof (vm_instr_t) * instrs_num; if (*in_out_buffer_offset_p + instrs_array_size > buffer_size) { return false; } memcpy (buffer_p + *in_out_buffer_offset_p, bytecode_data_p->instrs_p, instrs_array_size); *in_out_buffer_offset_p += instrs_array_size; bytecode_header.instrs_size = (uint32_t) (sizeof (vm_instr_t) * instrs_num); /* Dump variable declarations */ mem_cpointer_t *func_scopes_p = MEM_CP_GET_POINTER (mem_cpointer_t, bytecode_data_p->declarations_cp); lit_cpointer_t *var_decls_p = (lit_cpointer_t *) (func_scopes_p + bytecode_data_p->func_scopes_count); uint32_t null_var_decls_num = 0; for (uint32_t i = 0; i < bytecode_header.var_decls_count; ++i) { lit_cpointer_t lit_cp = var_decls_p[i]; if (lit_cp.packed_value == MEM_CP_NULL) { null_var_decls_num++; continue; } uint32_t offset = bc_find_lit_offset (lit_cp, lit_map_p, literals_num); if (!jrt_write_to_buffer_by_offset (buffer_p, buffer_size, in_out_buffer_offset_p, &offset, sizeof (offset))) { return false; } } bytecode_header.var_decls_count -= null_var_decls_num; /* Dump uid->lit_cp hash table */ lit_id_hash_table *lit_id_hash_p = MEM_CP_GET_POINTER (lit_id_hash_table, bytecode_data_p->lit_id_hash_cp); uint32_t idx_to_lit_map_size = lit_id_hash_table_dump_for_snapshot (buffer_p, buffer_size, in_out_buffer_offset_p, lit_id_hash_p, lit_map_p, literals_num, instrs_num); if (idx_to_lit_map_size == 0) { return false; } bytecode_header.idx_to_lit_map_size = idx_to_lit_map_size; /* Align to write next bytecode data at aligned address */ bytecode_header.size = (uint32_t) (*in_out_buffer_offset_p - bytecode_header_offset); JERRY_ASSERT (bytecode_header.size == JERRY_ALIGNUP (sizeof (jerry_snapshot_bytecode_header_t), MEM_ALIGNMENT) + bytecode_header.instrs_size + bytecode_header.var_decls_count * sizeof (uint32_t) + idx_to_lit_map_size); if (!bc_align_data_in_output_buffer (&bytecode_header.size, buffer_p, buffer_size, in_out_buffer_offset_p)) { return false; } /* Dump header at the saved offset */ if (!jrt_write_to_buffer_by_offset (buffer_p, buffer_size, &bytecode_header_offset, &bytecode_header, sizeof (bytecode_header))) { return false; } return true; } /* bc_save_bytecode_with_idx_map */
/** * Dump single scopes tree into bytecode * * @return pointer to bytecode header of the outer most scope */ bytecode_data_header_t * bc_dump_single_scope (scopes_tree scope_p) /**< a node of scopes tree */ { const size_t entries_count = scope_p->max_uniq_literals_num; const vm_instr_counter_t instrs_count = scopes_tree_instrs_num (scope_p); const size_t blocks_count = JERRY_ALIGNUP (instrs_count, BLOCK_SIZE) / BLOCK_SIZE; const size_t func_scopes_count = scopes_tree_child_scopes_num (scope_p); const uint16_t var_decls_count = linked_list_get_length (scope_p->var_decls); const size_t bytecode_size = JERRY_ALIGNUP (instrs_count * sizeof (vm_instr_t), MEM_ALIGNMENT); const size_t hash_table_size = lit_id_hash_table_get_size_for_table (entries_count, blocks_count); const size_t declarations_area_size = JERRY_ALIGNUP (func_scopes_count * sizeof (mem_cpointer_t) + var_decls_count * sizeof (lit_cpointer_t), MEM_ALIGNMENT); const size_t header_and_tables_size = JERRY_ALIGNUP ((sizeof (bytecode_data_header_t) + hash_table_size + declarations_area_size), MEM_ALIGNMENT); uint8_t *buffer_p = (uint8_t *) mem_heap_alloc_block (bytecode_size + header_and_tables_size, MEM_HEAP_ALLOC_LONG_TERM); lit_id_hash_table *lit_id_hash_p = lit_id_hash_table_init (buffer_p + sizeof (bytecode_data_header_t), hash_table_size, entries_count, blocks_count); mem_cpointer_t *declarations_p = (mem_cpointer_t *) (buffer_p + sizeof (bytecode_data_header_t) + hash_table_size); for (size_t i = 0; i < func_scopes_count; i++) { declarations_p[i] = MEM_CP_NULL; } scopes_tree_dump_var_decls (scope_p, (lit_cpointer_t *) (declarations_p + func_scopes_count)); vm_instr_t *bytecode_p = (vm_instr_t *) (buffer_p + header_and_tables_size); JERRY_ASSERT (scope_p->max_uniq_literals_num >= lit_id_hash_p->current_bucket_pos); bytecode_data_header_t *header_p = (bytecode_data_header_t *) buffer_p; if ((uint16_t) func_scopes_count != func_scopes_count) { jerry_fatal (ERR_OUT_OF_MEMORY); } bc_fill_bytecode_data_header (header_p, lit_id_hash_p, bytecode_p, declarations_p, (uint16_t) func_scopes_count, var_decls_count, scope_p->strict_mode, scope_p->ref_arguments, scope_p->ref_eval, scope_p->is_vars_and_args_to_regs_possible, false, false); JERRY_ASSERT (scope_p->bc_header_cp == MEM_CP_NULL); MEM_CP_SET_NON_NULL_POINTER (scope_p->bc_header_cp, header_p); return header_p; } /* bc_dump_single_scope */