static void interp_mem_stats_opcode_enter (opcode_counter_t opcode_position, mem_heap_stats_t *out_heap_stats_p, mem_pools_stats_t *out_pools_stats_p) { if (likely (!interp_mem_stats_enabled)) { return; } const uint32_t indentation = JERRY_MIN (interp_mem_stats_print_indentation, INTERP_MEM_PRINT_INDENTATION_MAX); char indent_prefix[INTERP_MEM_PRINT_INDENTATION_MAX + 2]; memset (indent_prefix, ' ', sizeof (indent_prefix)); indent_prefix[indentation] = '|'; indent_prefix[indentation + 1] = '\0'; interp_mem_get_stats (out_heap_stats_p, out_pools_stats_p, true, false); opcode_t opcode = vm_get_opcode (opcode_position); printf ("%s-- Opcode: %s (position %u) --\n", indent_prefix, __op_names[opcode.op_idx], (uint32_t) opcode_position); interp_mem_stats_print_indentation += INTERP_MEM_PRINT_INDENTATION_STEP; }
/** * Get scope code flags from opcode specified by opcode counter * * @return mask of scope code flags */ opcode_scope_code_flags_t vm_get_scope_flags (opcode_counter_t counter) /**< opcode counter */ { opcode_t flags_opcode = vm_get_opcode (counter); JERRY_ASSERT (flags_opcode.op_idx == __op__idx_meta && flags_opcode.data.meta.type == OPCODE_META_TYPE_SCOPE_CODE_FLAGS); return (opcode_scope_code_flags_t) flags_opcode.data.meta.data_1; } /* vm_get_scope_flags */
/** * 'Try' opcode handler. * * See also: ECMA-262 v5, 12.14 * * @return completion value * Returned value must be freed with ecma_free_completion_value */ ecma_completion_value_t opfunc_try_block (opcode_t opdata, /**< operation data */ vm_frame_ctx_t *frame_ctx_p) /**< interpreter context */ { const idx_t block_end_oc_idx_1 = opdata.data.try_block.oc_idx_1; const idx_t block_end_oc_idx_2 = opdata.data.try_block.oc_idx_2; const opcode_counter_t try_end_oc = (opcode_counter_t) ( calc_opcode_counter_from_idx_idx (block_end_oc_idx_1, block_end_oc_idx_2) + frame_ctx_p->pos); frame_ctx_p->pos++; vm_run_scope_t run_scope_try = { frame_ctx_p->pos, try_end_oc }; ecma_completion_value_t try_completion = vm_loop (frame_ctx_p, &run_scope_try); JERRY_ASSERT ((!ecma_is_completion_value_empty (try_completion) && frame_ctx_p->pos <= try_end_oc) || (ecma_is_completion_value_empty (try_completion) && frame_ctx_p->pos == try_end_oc)); frame_ctx_p->pos = try_end_oc; opcode_t next_opcode = vm_get_opcode (frame_ctx_p->opcodes_p, frame_ctx_p->pos); JERRY_ASSERT (next_opcode.op_idx == __op__idx_meta); if (next_opcode.data.meta.type == OPCODE_META_TYPE_CATCH) { const opcode_counter_t catch_end_oc = (opcode_counter_t) ( read_meta_opcode_counter (OPCODE_META_TYPE_CATCH, frame_ctx_p) + frame_ctx_p->pos); frame_ctx_p->pos++; if (ecma_is_completion_value_throw (try_completion)) { next_opcode = vm_get_opcode (frame_ctx_p->opcodes_p, frame_ctx_p->pos); JERRY_ASSERT (next_opcode.op_idx == __op__idx_meta); JERRY_ASSERT (next_opcode.data.meta.type == OPCODE_META_TYPE_CATCH_EXCEPTION_IDENTIFIER); lit_cpointer_t catch_exc_val_var_name_lit_cp = serializer_get_literal_cp_by_uid (next_opcode.data.meta.data_1, frame_ctx_p->opcodes_p, frame_ctx_p->pos); frame_ctx_p->pos++; ecma_string_t *catch_exc_var_name_str_p = ecma_new_ecma_string_from_lit_cp (catch_exc_val_var_name_lit_cp); ecma_object_t *old_env_p = frame_ctx_p->lex_env_p; ecma_object_t *catch_env_p = ecma_create_decl_lex_env (old_env_p); ecma_completion_value_t completion = ecma_op_create_mutable_binding (catch_env_p, catch_exc_var_name_str_p, false); JERRY_ASSERT (ecma_is_completion_value_empty (completion)); completion = ecma_op_set_mutable_binding (catch_env_p, catch_exc_var_name_str_p, ecma_get_completion_value_value (try_completion), false); JERRY_ASSERT (ecma_is_completion_value_empty (completion)); ecma_deref_ecma_string (catch_exc_var_name_str_p); frame_ctx_p->lex_env_p = catch_env_p; ecma_free_completion_value (try_completion); vm_run_scope_t run_scope_catch = { frame_ctx_p->pos, catch_end_oc }; try_completion = vm_loop (frame_ctx_p, &run_scope_catch); frame_ctx_p->lex_env_p = old_env_p; ecma_deref_object (catch_env_p); JERRY_ASSERT ((!ecma_is_completion_value_empty (try_completion) && frame_ctx_p->pos <= catch_end_oc) || (ecma_is_completion_value_empty (try_completion) && frame_ctx_p->pos == catch_end_oc)); } frame_ctx_p->pos = catch_end_oc; } next_opcode = vm_get_opcode (frame_ctx_p->opcodes_p, frame_ctx_p->pos); JERRY_ASSERT (next_opcode.op_idx == __op__idx_meta); if (next_opcode.data.meta.type == OPCODE_META_TYPE_FINALLY) { const opcode_counter_t finally_end_oc = (opcode_counter_t) ( read_meta_opcode_counter (OPCODE_META_TYPE_FINALLY, frame_ctx_p) + frame_ctx_p->pos); frame_ctx_p->pos++; vm_run_scope_t run_scope_finally = { frame_ctx_p->pos, finally_end_oc }; ecma_completion_value_t finally_completion = vm_loop (frame_ctx_p, &run_scope_finally); JERRY_ASSERT ((!ecma_is_completion_value_empty (finally_completion) && frame_ctx_p->pos <= finally_end_oc) || (ecma_is_completion_value_empty (finally_completion) && frame_ctx_p->pos == finally_end_oc)); frame_ctx_p->pos = finally_end_oc; if (!ecma_is_completion_value_empty (finally_completion)) { ecma_free_completion_value (try_completion); try_completion = finally_completion; } } next_opcode = vm_get_opcode (frame_ctx_p->opcodes_p, frame_ctx_p->pos++); JERRY_ASSERT (next_opcode.op_idx == __op__idx_meta); JERRY_ASSERT (next_opcode.data.meta.type == OPCODE_META_TYPE_END_TRY_CATCH_FINALLY); return try_completion; } /* opfunc_try_block */
int main(int ac, char** av) { t_vm* vm = vm_initialize(); t_process* process = (t_process*) malloc(sizeof(t_process)); int32 i; t_display* display; int32 update_display = 0; int32 was_pressed = 0; t_ring_buffer* ring_buffer; ring_buffer = ring_buffer_initialize(10, free); if (load_cores(vm, ac, av) <= 0) return -1; display = display_initialize(800, 600); vm_set_print_callback(vm, main_debug_print, ring_buffer); if (1) { while (vm->process_count && !display_should_exit(display)) { vm->cycle_current++; update_display = 1; int process_count = vm->process_count; for (i = 0; i < process_count; ++i) { t_process* process = vm->processes[i]; if (process->cycle_wait <= 0) { update_display = 0; vm_reset_process_io_op(process); if (process->current_opcode) vm_execute(vm, process); vm_get_opcode(vm, process); } else process->cycle_wait--; } if (vm->cycle_current > vm->cycle_to_die) { vm->cycle_current = 0; vm_kill_process_if_no_live(vm); } vm_clean_dead_process(vm); // update_display = 0; if (display_update_input(display) || update_display == 0) { display_print_ring_buffer(display, 0, 0, ring_buffer); display_step(vm, display); } } } else { int32 execute_one = 0; int32 current_keys_state[GLFW_KEY_LAST]; int32 previous_keys_state[GLFW_KEY_LAST]; memset(previous_keys_state, 0, GLFW_KEY_LAST * sizeof(int32)); memset(current_keys_state, 0, GLFW_KEY_LAST * sizeof(int32)); display_step(vm, display); while (vm->process_count && !display_should_exit(display)) { int32 executed = 0; int32 print_processes; int32 process_count = 0; current_keys_state[GLFW_KEY_S] = display_key_pressed(display, GLFW_KEY_S); current_keys_state[GLFW_KEY_P] = display_key_pressed(display, GLFW_KEY_P); if (!execute_one) execute_one = previous_keys_state[GLFW_KEY_S] && !current_keys_state[GLFW_KEY_S]; print_processes = previous_keys_state[GLFW_KEY_P] && !current_keys_state[GLFW_KEY_P]; memcpy(previous_keys_state, current_keys_state, sizeof(int32) * GLFW_KEY_LAST); if (execute_one) vm->cycle_current++; for (i = 0; i < vm->process_count; ++i) { t_process* process = vm->processes[i]; if (print_processes) vm_debug_print_process(vm, process); if (execute_one) { if (process->cycle_wait <= 0) { vm_reset_process_io_op(process); vm_execute(vm, process); vm_get_opcode(vm, process); executed++; if (vm->live_count >= NBR_LIVE) { vm->live_count = 0; vm->cycle_to_die -= vm->cycle_delta; } } process->cycle_wait--; } } if (executed) execute_one = 0; if (vm->cycle_current > vm->cycle_to_die) { vm->cycle_current = 0; vm_kill_process_if_no_live(vm); } vm_clean_dead_process(vm); executed += display_update_input(display); if (executed) display_step(vm, display); else glfwPollEvents(); } } ring_buffer_destroy(ring_buffer); display_destroy(display); vm_destroy(vm); }
static void interp_mem_stats_opcode_exit (int_data_t *int_data_p, opcode_counter_t opcode_position, mem_heap_stats_t *heap_stats_before_p, mem_pools_stats_t *pools_stats_before_p) { if (likely (!interp_mem_stats_enabled)) { return; } interp_mem_stats_print_indentation -= INTERP_MEM_PRINT_INDENTATION_STEP; const uint32_t indentation = JERRY_MIN (interp_mem_stats_print_indentation, INTERP_MEM_PRINT_INDENTATION_MAX); char indent_prefix[INTERP_MEM_PRINT_INDENTATION_MAX + 2]; memset (indent_prefix, ' ', sizeof (indent_prefix)); indent_prefix[indentation] = '|'; indent_prefix[indentation + 1] = '\0'; mem_heap_stats_t heap_stats_after; mem_pools_stats_t pools_stats_after; interp_mem_get_stats (&heap_stats_after, &pools_stats_after, false, true); int_data_p->context_peak_allocated_heap_bytes = JERRY_MAX (int_data_p->context_peak_allocated_heap_bytes, heap_stats_after.allocated_bytes); int_data_p->context_peak_waste_heap_bytes = JERRY_MAX (int_data_p->context_peak_waste_heap_bytes, heap_stats_after.waste_bytes); int_data_p->context_peak_pools_count = JERRY_MAX (int_data_p->context_peak_pools_count, pools_stats_after.pools_count); int_data_p->context_peak_allocated_pool_chunks = JERRY_MAX (int_data_p->context_peak_allocated_pool_chunks, pools_stats_after.allocated_chunks); opcode_t opcode = vm_get_opcode (opcode_position); printf ("%s Allocated heap bytes: %5u -> %5u (%+5d, local %5u, peak %5u)\n", indent_prefix, (uint32_t) heap_stats_before_p->allocated_bytes, (uint32_t) heap_stats_after.allocated_bytes, (uint32_t) (heap_stats_after.allocated_bytes - heap_stats_before_p->allocated_bytes), (uint32_t) (heap_stats_after.peak_allocated_bytes - JERRY_MAX (heap_stats_before_p->allocated_bytes, heap_stats_after.allocated_bytes)), (uint32_t) heap_stats_after.global_peak_allocated_bytes); if (heap_stats_before_p->waste_bytes != heap_stats_after.waste_bytes) { printf ("%s Waste heap bytes: %5u -> %5u (%+5d, local %5u, peak %5u)\n", indent_prefix, (uint32_t) heap_stats_before_p->waste_bytes, (uint32_t) heap_stats_after.waste_bytes, (uint32_t) (heap_stats_after.waste_bytes - heap_stats_before_p->waste_bytes), (uint32_t) (heap_stats_after.peak_waste_bytes - JERRY_MAX (heap_stats_before_p->waste_bytes, heap_stats_after.waste_bytes)), (uint32_t) heap_stats_after.global_peak_waste_bytes); } if (pools_stats_before_p->pools_count != pools_stats_after.pools_count) { printf ("%s Pools: %5u -> %5u (%+5d, local %5u, peak %5u)\n", indent_prefix, (uint32_t) pools_stats_before_p->pools_count, (uint32_t) pools_stats_after.pools_count, (uint32_t) (pools_stats_after.pools_count - pools_stats_before_p->pools_count), (uint32_t) (pools_stats_after.peak_pools_count - JERRY_MAX (pools_stats_before_p->pools_count, pools_stats_after.pools_count)), (uint32_t) pools_stats_after.global_peak_pools_count); } if (pools_stats_before_p->allocated_chunks != pools_stats_after.allocated_chunks) { printf ("%s Allocated pool chunks: %5u -> %5u (%+5d, local %5u, peak %5u)\n", indent_prefix, (uint32_t) pools_stats_before_p->allocated_chunks, (uint32_t) pools_stats_after.allocated_chunks, (uint32_t) (pools_stats_after.allocated_chunks - pools_stats_before_p->allocated_chunks), (uint32_t) (pools_stats_after.peak_allocated_chunks - JERRY_MAX (pools_stats_before_p->allocated_chunks, pools_stats_after.allocated_chunks)), (uint32_t) pools_stats_after.global_peak_allocated_chunks); } printf ("%s-- End of execution of opcode %s (position %u) --\n\n", indent_prefix, __op_names[opcode.op_idx], opcode_position); }