void si_fill_from_native(StackIterator* si, VM_thread * thread) {
    memset(si, 0, sizeof(StackIterator));

    si->cci = NULL;
    si->jit_frame_context.p_rip = &si->ip;
    si->m2n_frame = m2n_get_last_frame(thread);
    si->ip = 0;
}
Пример #2
0
static void m2n_free_local_handles() {
    assert(!hythread_is_suspend_enabled());

    if (exn_raised()) {
        exn_rethrow();
    }

    M2nFrame * m2n = m2n_get_last_frame();
    free_local_object_handles3(m2n->local_object_handles);
}
Пример #3
0
static IDATA rt_jthread_monitor_enter(ManagedObject*  monitor) {
    const unsigned handles_size = (unsigned)(sizeof(ObjectHandlesNew)+sizeof(ManagedObject*)*4);
    ObjectHandlesNew* handels = (ObjectHandlesNew *)STD_ALLOCA(handles_size);
    handels->capacity = 4;
    handels->size = 0;
    handels->next = NULL;

    m2n_set_local_handles(m2n_get_last_frame(), (ObjectHandles *) handels);

    ObjectHandle monitorJavaObj = oh_allocate_local_handle();
    monitorJavaObj->object = monitor;

    IDATA result = jthread_monitor_enter(monitorJavaObj);

    free_local_object_handles2(m2n_get_local_handles(m2n_get_last_frame()));
    m2n_set_local_handles(m2n_get_last_frame(), NULL);

    return result;
}
Пример #4
0
// FIXME moove to exception_impl.cpp
static void check_pop_frame(ManagedObject *exn) {
    if (exn == VM_Global_State::loader_env->popFrameException->object) {
        exn_clear();
        frame_type type = m2n_get_frame_type(m2n_get_last_frame());

        if (FRAME_POP_NOW == (FRAME_POP_MASK & type)) {
            jvmti_jit_do_pop_frame();
        }
    }
}
Пример #5
0
void m2n_push_suspended_frame(VM_thread* thread, M2nFrame* m2nf, Registers* regs) 
{
    assert(m2nf);
    m2nf->p_lm2nf = (M2nFrame**)1;
    m2nf->method = NULL;
    m2nf->local_object_handles = NULL;
    m2nf->current_frame_type = FRAME_UNKNOWN;

    m2nf->rip  = (POINTER_SIZE_INT)regs->get_ip();
    m2nf->regs = regs;

    m2nf->prev_m2nf = m2n_get_last_frame(thread);
    m2n_set_last_frame(thread, m2nf);
}
Пример #6
0
static void rth_throw_lazy(Method * exn_constr)
{
#if defined(_IPF_) || defined(_EM64T_)
    LDIE(61, "Lazy exceptions are not supported on this platform");
#else
    U_8 *args = (U_8 *) (m2n_get_args(m2n_get_last_frame()) + 1);   // +1 to skip constructor
    if (NULL != exn_constr) {
        args += exn_constr->get_num_arg_slots() * 4 - 4;
    } else {
        args += 1*4 /*default constructor*/ - 4;
    }
    exn_athrow(NULL, *(Class_Handle *) args, exn_constr, args);
#endif
}   //rth_throw_lazy
Пример #7
0
// Alexei
// Check if we could proceed with destructive stack unwinding,
// i. e. the last GC frame is created before the last m2n frame.
// We use here a knowledge that the newer stack objects
// have smaller addresses for ia32 and em64t architectures.
static bool UNUSED is_gc_frame_before_m2n_frame()
{
    if (p_TLS_vmthread->gc_frames) {
        POINTER_SIZE_INT m2n_address =
            (POINTER_SIZE_INT) m2n_get_last_frame();
        POINTER_SIZE_INT gc_frame_address =
            (POINTER_SIZE_INT) p_TLS_vmthread->gc_frames;
        // gc frame is created before the last m2n frame
        return m2n_address < gc_frame_address;
    }
    else {
        return true;    // no gc frames - nothing to be broken
    }
}
Пример #8
0
bool set_unwindable(bool unwindable)
{
    M2nFrame* lastFrame = m2n_get_last_frame();

    if (interpreter_enabled() || (!lastFrame)) {
        assert(!unwindable);
        return false;
    }

    int lastFrameType = m2n_get_frame_type(lastFrame);
    bool previousValue = !(lastFrameType & FRAME_NON_UNWINDABLE);

    if (unwindable) {
        lastFrameType &= ~FRAME_NON_UNWINDABLE;
    } else {
        lastFrameType |= FRAME_NON_UNWINDABLE;
    }
    m2n_set_frame_type( lastFrame, (frame_type) lastFrameType);
    return previousValue;
}
Пример #9
0
Vector_Handle rth_multianewarrayhelper()
{
    ASSERT_THROW_AREA;
    M2nFrame* m2nf = m2n_get_last_frame();
    const unsigned max_dim = 255;
    int lens[max_dim];

#ifdef VM_STATS
    VM_Statistics::get_vm_stats().num_multianewarray++;  
#endif
    // +1(skip rip)
    uint64 * args = (uint64 *)m2n_get_frame_base(m2nf) + 1;
    Class * c = (Class *)args[0];
    unsigned dims = (unsigned)(args[1] & 0xFFFFffff);
    assert(dims <= max_dim);
    // compute the base address of an array
    uint64* lens_base = (uint64*)(args+2);
    for(unsigned i = 0; i < dims; i++) {
        lens[i] = (int)lens_base[dims-i-1];
    }
    return vm_rt_multianewarray_recursive(c, lens, dims);
}
Пример #10
0
bool is_unwindable()
{
    M2nFrame* lastFrame = m2n_get_last_frame();
    return !(interpreter_enabled() || (!lastFrame)
         || (m2n_get_frame_type(lastFrame) & FRAME_NON_UNWINDABLE));
}
void
interp_ti_enumerate_root_set_single_thread_on_stack(jvmtiEnv* ti_env, VM_thread *thread) {
    TRACE2("enumeration", "interp_enumerate_root_set_single_thread_on_stack()");
    StackIterator_interp* si;
    si = interp_si_create_from_native(thread);
    
    int i;
    int depth;
    DEBUG_GC("\n\nGC enumeration in interpreter stack:\n");
    for (depth = 0; !interp_si_is_past_end(si); depth++) {
        Method* method = (Method*)interp_si_get_method(si);
        jmethodID method_id = (jmethodID)method;
        int slot = 0;

        if (si->This) {
            vm_ti_enumerate_stack_root(ti_env,
                    (void**)&si->This, si->This,
                    JVMTI_HEAP_ROOT_STACK_LOCAL,
                    depth, method_id, slot++);
            DEBUG_GC("  [THIS]: " << si->This);
        }

        if (si->exc) {
            vm_ti_enumerate_stack_root(ti_env,
                (void**)&si->exc, si->exc,
                JVMTI_HEAP_ROOT_STACK_LOCAL,
                depth, method_id, slot++);
            DEBUG_GC("  [EXCEPTION]: " << si->exc);
        }

        if (method->is_native()) {
            DEBUG_GC("[METHOD <native>]: " << method);
            interp_si_goto_previous(si);
            continue;
        }

        DEBUG_GC("[METHOD "<< si->stack.size << " " << (int)si->locals.varNum << "]: "
                << method);

        if (si->stack.size)
            for(i = 0; i <= si->stack.index; i++) {
                if (si->stack.refs[i] == FLAG_OBJECT) {
                    DEBUG_GC("  Stack[" << i << "] ");
                    REF* ref = &si->stack.data[i].ref;
                    ManagedObject *obj = UNCOMPRESS_INTERP(*ref);
                    if (obj == 0) {
                        DEBUG_GC("NULL");
                    } else {
                        DEBUG_GC(obj);
                        vm_ti_enumerate_stack_root(ti_env,
                            ref, (Managed_Object_Handle)obj, 
                            JVMTI_HEAP_ROOT_STACK_LOCAL,
                            depth, method_id, slot++);
                    }
                }
            }

                unsigned j;
        if (si->locals.varNum)
            for(j = 0; j < si->locals.varNum; j++) {
                if (si->locals.refs[j] == FLAG_OBJECT) {
                    DEBUG_GC("  Locals[" << j << "] ");
                    REF* ref = &si->locals.vars[j].ref;
                    ManagedObject *obj = UNCOMPRESS_INTERP(*ref);
                    if (obj == 0) {
                        DEBUG_GC("NULL\n");
                    } else {
                        DEBUG_GC(obj);
                        vm_ti_enumerate_stack_root(ti_env,
                            ref, (Managed_Object_Handle)obj, 
                            JVMTI_HEAP_ROOT_STACK_LOCAL,
                            depth, method_id, slot++);
                    }
                }
            }
        MonitorList *ml = si->locked_monitors;
        while(ml) {
            vm_ti_enumerate_stack_root(ti_env,
                    &ml->monitor, ml->monitor,
                    JVMTI_HEAP_ROOT_MONITOR,
                    depth, method_id, slot++);
            ml = ml->next;
        }
        interp_si_goto_previous(si);
    }

    // enumerate m2n frames
    M2nFrame *m2n = m2n_get_last_frame(thread);
    while(m2n) {
        oh_enumerate_handles(m2n_get_local_handles(m2n));
        m2n = m2n_get_previous_frame(m2n);
    }
}
void
interp_enumerate_root_set_single_thread_on_stack(VM_thread *thread) {
    TRACE2("enumeration", "interp_enumerate_root_set_single_thread_on_stack()");
    StackIterator_interp* si;
    si = interp_si_create_from_native(thread);
    
    int i;
    DEBUG_GC("\n\nGC enumeration in interpreter stack:\n");
    while(!interp_si_is_past_end(si)) {
        Method* method = (Method*)interp_si_get_method(si);
        method = method;

        if (si->This) {
            vm_enumerate_root_reference((void**)&si->This, FALSE);
            DEBUG_GC("  [THIS]: " << si->This);
        }

        if (si->exc) {
            vm_enumerate_root_reference((void**)&si->exc, FALSE);
            DEBUG_GC("  [EXCEPTION]: " << si->exc);
        }

        if (method->is_native()) {
            DEBUG_GC("[METHOD <native>]: " << method);
            interp_si_goto_previous(si);
            continue;
        }

        DEBUG_GC("[METHOD "<< si->stack.size << " " << (int)si->locals.varNum << "]: "
                << method);

        if (si->stack.size)
            for(i = 0; i <= si->stack.index; i++) {
                if (si->stack.refs[i] == FLAG_OBJECT) {
                    DEBUG_GC("  Stack[" << i << "] ");
                    REF* ref = &si->stack.data[i].ref;
                    ManagedObject *obj = UNCOMPRESS_INTERP(*ref);
                    if (obj == 0) {
                        DEBUG_GC("NULL");
                    } else {
                        DEBUG_GC(obj);
                        vm_enumerate(ref, FALSE); // CHECK!!! can we enumerate uncompressed ref in compressed mode
                    }
                }
            }

                unsigned j;
        if (si->locals.varNum)
            for(j = 0; j < si->locals.varNum; j++) {
                if (si->locals.refs[j] == FLAG_OBJECT) {
                    DEBUG_GC("  Locals[" << j << "] ");
                    REF* ref = &si->locals.vars[j].ref;
                    ManagedObject *obj = UNCOMPRESS_INTERP(*ref);
                    if (obj == 0) {
                        DEBUG_GC("NULL\n");
                    } else {
                        DEBUG_GC(obj);
                        vm_enumerate(ref, FALSE); // CHECK!!! can we enumerate uncompressed ref in compressed mode
                    }
                }
            }
        MonitorList *ml = si->locked_monitors;
        while(ml) {
            vm_enumerate_root_reference((void**)&ml->monitor, FALSE);
            ml = ml->next;
        }
        interp_si_goto_previous(si);
    }

    // enumerate m2n frames
    M2nFrame *m2n = m2n_get_last_frame(thread);
    while(m2n) {
        oh_enumerate_handles(m2n_get_local_handles(m2n));
        m2n = m2n_get_previous_frame(m2n);
    }
}
Пример #13
0
// function can be safe point & should be called with disable reqursion = 1
void exn_athrow_regs(Registers * regs, Class_Handle exn_class, bool java_code, bool transfer_control)
{
    assert(!hythread_is_suspend_enabled());
    assert(exn_class);

#ifndef _IPF_
    M2nFrame *cur_m2nf = (M2nFrame *) STD_ALLOCA(m2n_get_size());
    M2nFrame *unw_m2nf;
    ManagedObject *exn_obj = NULL;
    StackIterator *si;
    DebugUtilsTI* ti = VM_Global_State::loader_env->TI;
    VM_thread* vmthread = p_TLS_vmthread;

    if (java_code)
        m2n_push_suspended_frame(vmthread, cur_m2nf, regs);
    else
        // Gregory -
        // Initialize cur_m2nf pointer in case we've crashed in native code that is unwindable,
	// e.g. in the code that sets non-unwindable state for the native code area
        cur_m2nf = m2n_get_last_frame();

    BEGIN_RAISE_AREA;

    si = (StackIterator*) STD_ALLOCA(si_size());
    si_fill_from_native(si);
    ManagedObject *local_exn_obj = NULL;
    exn_obj = exn_propagate_exception(si, &local_exn_obj, exn_class, NULL, NULL, NULL);

    //free local handles
    ObjectHandles* last_m2n_frame_handles = m2n_get_local_handles(cur_m2nf);

    if (last_m2n_frame_handles) {
        free_local_object_handles2(last_m2n_frame_handles);
    }

    if (ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_EXCEPTION_EVENT)) {
        VM_thread *thread = p_TLS_vmthread;
        NativeCodePtr callback = (NativeCodePtr)
                jvmti_exception_catch_callback;

        si_copy_to_registers(si, regs);
        vm_set_exception_registers(thread, *regs);
        si_set_callback(si, &callback);
    } else if (p_TLS_vmthread->restore_guard_page) {
        VM_thread *thread = p_TLS_vmthread;
        NativeCodePtr callback = (NativeCodePtr)
                exception_catch_callback;
        si_copy_to_registers(si, regs);
        vm_set_exception_registers(thread, *regs);
        si_set_callback(si, &callback);
    }

    si_copy_to_registers(si, regs);

    if (transfer_control) {
        // Let NCAI to continue single stepping in exception handler
        ncai_setup_signal_step(&vmthread->jvmti_thread, (NativeCodePtr)regs->get_ip());

        set_exception_object_internal(exn_obj);
        si_transfer_control(si);
        assert(!"si_transfer_control should not return");
    } 

    unw_m2nf = si_get_m2n(si);
    //si_free(si);

    END_RAISE_AREA;

    set_exception_object_internal(exn_obj);
    m2n_set_last_frame(unw_m2nf);
#endif
}   //exn_athrow_regs
Пример #14
0
// function can be safe point & should be called with disable reqursion = 1
void exn_throw_for_JIT(ManagedObject* exn_obj, Class_Handle exn_class,
    Method_Handle exn_constr, U_8* jit_exn_constr_args, jvalue* vm_exn_constr_args)
{
/*
 * !!!! NO LOGGER IS ALLOWED IN THIS FUNCTION !!!
 * !!!! RELEASE BUILD WILL BE BROKEN          !!!
 * !!!! NO TRACE2, INFO, WARN, ECHO, ASSERT, ...
 */
    assert(!hythread_is_suspend_enabled());

    if(exn_raised()) {
        return;
    }

    ASSERT_NO_INTERPRETER
    ASSERT_RAISE_AREA;

    if ((exn_obj == NULL) && (exn_class == NULL)) {
        exn_class = VM_Global_State::loader_env->java_lang_NullPointerException_Class;
    }
    ManagedObject* local_exn_obj = exn_obj;
    StackIterator* si = (StackIterator*) STD_ALLOCA(si_size());
    si_fill_from_native(si);

    if (exn_raised()) {
        return;
    }

#ifndef _IPF_
    assert(is_gc_frame_before_m2n_frame());
#endif // _IPF_

    assert(!exn_raised());

    if (si_is_past_end(si)) {
        //FIXME LAZY EXCEPTION (2006.05.12)
        // should be replaced by lazy version
        set_exception_object_internal(local_exn_obj);
        return;
    }

    si_transfer_all_preserved_registers(si);
    assert(!exn_raised());

    DebugUtilsTI* ti = VM_Global_State::loader_env->TI;
    exn_obj = exn_propagate_exception(si, &local_exn_obj, exn_class, exn_constr,
        jit_exn_constr_args, vm_exn_constr_args);

    if (exn_raised()) {
        //si_free(si);
        return;
    }

    M2nFrame* m2nFrame = m2n_get_last_frame();
    ObjectHandles* last_m2n_frame_handles = m2n_get_local_handles(m2nFrame);

    if (last_m2n_frame_handles) {
        free_local_object_handles2(last_m2n_frame_handles);
    }
    set_exception_object_internal(exn_obj);

    if (ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_EXCEPTION_EVENT)) {
        Registers regs = {0};
        VM_thread *thread = p_TLS_vmthread;
        NativeCodePtr callback = (NativeCodePtr)
                jvmti_exception_catch_callback;

        si_copy_to_registers(si, &regs);
        vm_set_exception_registers(thread, regs);
        si_set_callback(si, &callback);
    } else if (p_TLS_vmthread->restore_guard_page) {
        Registers regs = {0};
        VM_thread *thread = p_TLS_vmthread;
        NativeCodePtr callback = (NativeCodePtr)
                exception_catch_callback;
        si_copy_to_registers(si, &regs);
        vm_set_exception_registers(thread, regs);
        si_set_callback(si, &callback);
    }

    // don't put any call here
    si_transfer_control(si);
}   //exn_throw_for_JIT