U_32 si_get_inline_depth(StackIterator* si)
{
    //
    // Here we assume that JIT data blocks can store only InlineInfo
    // A better idea is to extend JIT_Data_Block with some type information
    // Example:
    //
    // enum JIT_Data_Block_Type { InlineInfo, Empty }
    //
    // struct JIT_Data_Block {
    //     JIT_Data_Block *next;
    //     JIT_Data_Block_Type type;
    //     char bytes[1];
    // };
    //
    // void *Method::allocate_JIT_data_block(size_t size, JIT *jit, JIT_Data_Block_Type)
    //

    ASSERT_NO_INTERPRETER
    CodeChunkInfo* cci = si_get_code_chunk_info(si);
    if ( cci != NULL && cci->has_inline_info()) {
        return cci->get_jit()->get_inline_depth(
                cci->get_inline_info(),
                // FIXME64: no support for large methods
                (U_32)((POINTER_SIZE_INT)si_get_ip(si) - (POINTER_SIZE_INT)cci->get_code_block_addr()));
    }
    return 0;
}
示例#2
0
static void ti_enumerate_thread_stack(TIEnv* ti_env, StackIterator* si)
{
    ASSERT_NO_INTERPRETER

    TIIterationState *state = ti_env->iteration_state;
    state->depth = 0;

    while (!si_is_past_end(si)) {

        CodeChunkInfo* cci = si_get_code_chunk_info(si);
        if (cci) {
            state->method = (jmethodID)cci->get_method();
            state->root_kind = JVMTI_HEAP_ROOT_STACK_LOCAL;
            // FIXME: set up frame base (platform dependent!)
            cci->get_jit()->get_root_set_from_stack_frame(cci->get_method(), 0, si_get_jit_context(si));
        } else {
            state->method = (jmethodID)m2n_get_method(si_get_m2n(si));
            state->root_kind = JVMTI_HEAP_ROOT_JNI_LOCAL;
            oh_enumerate_handles(m2n_get_local_handles(si_get_m2n(si)));
        }
        state->depth += 1;
        si_goto_previous(si);
    }
    si_free(si);
}
Method_Handle si_get_method(StackIterator* si)
{
    ASSERT_NO_INTERPRETER
    CodeChunkInfo* cci = si_get_code_chunk_info(si);
    if (cci)
        return cci->get_method();
    else
        return m2n_get_method(si_get_m2n(si));
}
示例#4
0
文件: compile.cpp 项目: dacut/juliet
JIT_Result compile_do_compilation_jit(Method* method, JIT* jit)
{
    // Time stamp for counting the total compilation time
    apr_time_t start;

    Global_Env * vm_env = VM_Global_State::loader_env;

    assert(method);
    assert(jit);

    if (!parallel_jit) {
        vm_env->p_jit_a_method_lock->_lock();
        // MikhailF reports that each JIT in recompilation chain has its own
        // JIT* pointer.
        // If in addition to recompilation chains one adds recompilation loops,
        // this check can be skipped, or main_code_chunk_id should be
        // modified.

        if (NULL != method->get_chunk_info_no_create_mt(jit, CodeChunkInfo::main_code_chunk_id)) {
            vm_env->p_jit_a_method_lock->_unlock();
            return JIT_SUCCESS;
        }
    }

    OpenMethodExecutionParams flags = {0}; 
    jvmti_get_compilation_flags(&flags);
    flags.exe_insert_write_barriers = gc_requires_barriers();

    Compilation_Handle ch;
    ch.env = VM_Global_State::loader_env;
    ch.jit = jit;

    start = apr_time_now();

    TRACE("compile_do_compilation_jit(): calling jit->compile_method_with_params() for method " << method );

    JIT_Result res = jit->compile_method_with_params(&ch, method, flags);

    TRACE("compile_do_compilation_jit(): returned from jit->compile_method_with_params() for method " << method );

    UNSAFE_REGION_START
    // Non-atomic increment of statistic counter
    // Conversion from microseconds to milliseconds
    vm_env->total_compilation_time += ((apr_time_now() - start)/1000);
    UNSAFE_REGION_END

    if (JIT_SUCCESS != res) {
        if (!parallel_jit) {
            vm_env->p_jit_a_method_lock->_unlock();
        }
        return res;
    }

    method->lock();
    for (CodeChunkInfo* cci = method->get_first_JIT_specific_info();  cci;  cci = cci->_next) {
        if (cci->get_jit() == jit) {
            compile_flush_generated_code_block((U_8*)cci->get_code_block_addr(), cci->get_code_block_size());
            // We assume the main chunk starts from entry point
            if (cci->get_id() == CodeChunkInfo::main_code_chunk_id) {
                method->set_code_addr(cci->get_code_block_addr());
            }
        }
    }

    // Commit the compilation by setting the method's code address
    method->set_state(Method::ST_Compiled);
    method->do_jit_recompiled_method_callbacks();
    method->apply_vtable_patches();
    method->unlock();
    if (!parallel_jit) {
        vm_env->p_jit_a_method_lock->_unlock();
    }

    // Find TI environment
    DebugUtilsTI *ti = vm_env->TI;

    // Call TI callbacks
    if (jvmti_should_report_event(JVMTI_EVENT_COMPILED_METHOD_LOAD)
        && ti->getPhase() == JVMTI_PHASE_LIVE)
    {
        jvmti_send_chunks_compiled_method_load_event(method);
    }
    return JIT_SUCCESS;
}
示例#5
0
// function can be safe point & should be called with disable recursion = 1
static ManagedObject * exn_propagate_exception(
    StackIterator * si,
    ManagedObject ** exn_obj,
    Class_Handle exn_class,
    Method_Handle exn_constr,
    U_8 * jit_exn_constr_args,
    jvalue* vm_exn_constr_args)
{
    assert(!hythread_is_suspend_enabled());
    ASSERT_RAISE_AREA;
    ASSERT_NO_INTERPRETER;

    assert(*exn_obj || exn_class);

    // Save the throw context
    StackIterator *throw_si = (StackIterator*) STD_ALLOCA(si_size());
    memcpy(throw_si, si, si_size());

    // Skip first frame if it is an M2nFrame (which is always a transition from managed to the throw code).
    // The M2nFrame will be removed from the thread's M2nFrame list but transfer control or copy to registers.
    if (si_is_native(si)) {
        si_goto_previous(si);
    }

    Method *interrupted_method;
    NativeCodePtr interrupted_method_location;
    JIT *interrupted_method_jit;
    bool restore_guard_page = p_TLS_vmthread->restore_guard_page;

    if (!si_is_native(si))
    {
        CodeChunkInfo *interrupted_cci = si_get_code_chunk_info(si);
        assert(interrupted_cci);
        interrupted_method = interrupted_cci->get_method();
        interrupted_method_location = si_get_ip(si);
        interrupted_method_jit = interrupted_cci->get_jit();
    }
    else
    {
        interrupted_method = m2n_get_method(si_get_m2n(si));
        interrupted_method_location = 0;
        interrupted_method_jit = 0;
    }

    if (NULL != *exn_obj)
    {
        // Gregory - When *exn_obj is NULL it means we're called from exn_athrow_regs
        // which means that IP points exactly to the right location. But
        // when *exn_obj is not NULL, it means that we're called from exn_throw_for_JIT
        // where *exn_obj is already constructed and is thrown by code via athrow.
        // So in this case IP reported by stack iterator is past the athrow bytecode
        // and should be moved back to be inside of bytecode location for interrupted
        // method.

        interrupted_method_location = (NativeCodePtr)((POINTER_SIZE_INT)interrupted_method_location - 1);

        // Determine the type of the exception for the type tests below.
        exn_class = (*exn_obj)->vt()->clss;
    }

#ifdef VM_STATS
    assert(exn_class);
    exn_class->class_thrown();
    UNSAFE_REGION_START
    VM_Statistics::get_vm_stats().num_exceptions++;
    UNSAFE_REGION_END
#endif // VM_STATS

    // Remove single step breakpoints which could have been set on the
    // exception bytecode
    DebugUtilsTI *ti = VM_Global_State::loader_env->TI;
    if (ti->isEnabled() && ti->is_single_step_enabled())
    {
        jvmti_thread_t jvmti_thread = jthread_self_jvmti();
        ti->vm_brpt->lock();
        if (NULL != jvmti_thread->ss_state)
        {
            jvmti_remove_single_step_breakpoints(ti, jvmti_thread);
        }
        ti->vm_brpt->unlock();
    }

    // When VM is in shutdown stage we need to execute "finally" clause to
    // release monitors and propagate an exception to the upper frames.
    Class_Handle search_exn_class = !VM_Global_State::loader_env->IsVmShutdowning()
        ? exn_class : VM_Global_State::loader_env->JavaLangObject_Class;

    if (!si_is_native(si))
    {
        bool same_frame = true;
        while (!si_is_past_end(si) && !si_is_native(si)) {
            CodeChunkInfo *cci = si_get_code_chunk_info(si);
            assert(cci);
            Method *method = cci->get_method();
            JIT *jit = cci->get_jit();
            assert(method && jit);
            NativeCodePtr ip = si_get_ip(si);
            bool is_ip_past = !!si_get_jit_context(si)->is_ip_past;

#ifdef VM_STATS
            cci->num_throws++;
#endif // VM_STATS

            // Examine this frame's exception handlers looking for a match
            unsigned num_handlers = cci->get_num_target_exception_handlers();
            for (unsigned i = 0; i < num_handlers; i++) {
                Target_Exception_Handler_Ptr handler =
                    cci->get_target_exception_handler_info(i);
                if (!handler)
                    continue;
                if (handler->is_in_range(ip, is_ip_past)
                    && handler->is_assignable(search_exn_class)) {
                    // Found a handler that catches the exception.
#ifdef VM_STATS
                    cci->num_catches++;
                    if (same_frame) {
                        VM_Statistics::get_vm_stats().num_exceptions_caught_same_frame++;
                    }
                    if (handler->is_exc_obj_dead()) {
                        VM_Statistics::get_vm_stats().num_exceptions_dead_object++;
                        if (!*exn_obj) {
                            VM_Statistics::get_vm_stats().num_exceptions_object_not_created++;
                        }
                    }
#endif // VM_STATS

                    if (restore_guard_page) {
                        bool res = check_stack_size_enough_for_exception_catch(si_get_sp(si));
                        //must always be enough. otherwise program behavior is unspecified: finally blocks, monitor exits are not executed
                        assert(res); 
                        if (!res) {
                            break;
                        }

                    }

                    // Setup handler context
                    jit->fix_handler_context(method, si_get_jit_context(si));
                    si_set_ip(si, handler->get_handler_ip(), false);

                    // Start single step in exception handler
                    if (ti->isEnabled() && ti->is_single_step_enabled())
                    {
                        jvmti_thread_t jvmti_thread = jthread_self_jvmti();
                        ti->vm_brpt->lock();
                        if (NULL != jvmti_thread->ss_state)
                        {
                            uint16 bc;
                            NativeCodePtr ip = handler->get_handler_ip();
                            OpenExeJpdaError UNREF result =
                                jit->get_bc_location_for_native(method, ip, &bc);
                            assert(EXE_ERROR_NONE == result);

                            jvmti_StepLocation method_start = {(Method *)method, ip, bc, false};

                            jvmti_set_single_step_breakpoints(ti, jvmti_thread,
                                &method_start, 1);
                        }
                        ti->vm_brpt->unlock();
                    }

                    // Create exception if necessary
                    if (!*exn_obj && !handler->is_exc_obj_dead()) {
                        assert(!exn_raised());

                        *exn_obj = create_lazy_exception(exn_class, exn_constr,
                            jit_exn_constr_args, vm_exn_constr_args);
                    }

                    if (jvmti_is_exception_event_requested()) {
                        // Create exception if necessary
                        if (NULL == *exn_obj) {
                            *exn_obj = create_lazy_exception(exn_class, exn_constr,
                                jit_exn_constr_args, vm_exn_constr_args);
                        }

                        // Reload exception object pointer because it could have
                        // moved while calling JVMTI callback

                        *exn_obj = jvmti_jit_exception_event_callback_call(*exn_obj,
                            interrupted_method_jit, interrupted_method,
                            interrupted_method_location,
                            jit, method, handler->get_handler_ip());
                    }

                    CTRACE(("setting return pointer to %d", exn_obj));

                    si_set_return_pointer(si, (void **) exn_obj);
                    //si_free(throw_si);
                    return NULL;
                }
            }

            // No appropriate handler found, undo synchronization
            vm_monitor_exit_synchronized_method(si);

            jvalue ret_val = {(jlong)0};
            jvmti_process_method_exception_exit_event(
                reinterpret_cast<jmethodID>(method), JNI_TRUE, ret_val, si);

            // Goto previous frame
            si_goto_previous(si);
            same_frame = false;
        }
    }
    // Exception propagates to the native code
    assert(si_is_native(si));

    // The current thread exception is set to the exception and we return 0/NULL to the native code
    if (*exn_obj == NULL) {
        *exn_obj = create_lazy_exception(exn_class, exn_constr,
            jit_exn_constr_args, vm_exn_constr_args);
    }
    assert(!hythread_is_suspend_enabled());

    CodeChunkInfo *catch_cci = si_get_code_chunk_info(si);
    Method *catch_method = NULL;
    if (catch_cci)
        catch_method = catch_cci->get_method();

    // Reload exception object pointer because it could have
    // moved while calling JVMTI callback
    if (exn_raised()) {
        //si_free(throw_si);
        return NULL;
    }

    *exn_obj = jvmti_jit_exception_event_callback_call(*exn_obj,
        interrupted_method_jit, interrupted_method, interrupted_method_location,
        NULL, NULL, NULL);

    //si_free(throw_si);
    return *exn_obj;
}   //exn_propagate_exception