コード例 #1
0
static transfer_control_stub_type gen_transfer_control_stub()
{
    static transfer_control_stub_type addr = NULL;

    if (addr) {
        return addr;
    }

    const int STUB_SIZE = 255;
    char * stub = (char *)malloc_fixed_code_for_jit(STUB_SIZE,
        DEFAULT_CODE_ALIGNMENT, CODE_BLOCK_HEAT_COLD, CAA_Allocate);
    char * ss = stub;
#ifndef NDEBUG
    memset(stub, 0xcc /*int 3*/, STUB_SIZE);
#endif

    //
    // ************* LOW LEVEL DEPENDENCY! ***************
    // This code sequence must be atomic.  The "atomicity" effect is achieved by
    // changing the rsp at the very end of the sequence.

    // rdx holds the pointer to the stack iterator
#if defined (PLATFORM_POSIX) // RDI holds 1st parameter on Linux
    ss = mov(ss, rdx_opnd, rdi_opnd);
#else // RCX holds 1st parameter on Windows
    ss = mov(ss, rdx_opnd, rcx_opnd);
#endif

    // Restore general registers
    ss = get_reg(ss, rbp_opnd, rdx_reg, CONTEXT_OFFSET(p_rbp), false);
    ss = get_reg(ss, rbx_opnd, rdx_reg, CONTEXT_OFFSET(p_rbx), true);
    ss = get_reg(ss, r12_opnd, rdx_reg, CONTEXT_OFFSET(p_r12), true);
    ss = get_reg(ss, r13_opnd, rdx_reg, CONTEXT_OFFSET(p_r13), true);
    ss = get_reg(ss, r14_opnd, rdx_reg, CONTEXT_OFFSET(p_r14), true);
    ss = get_reg(ss, r15_opnd, rdx_reg, CONTEXT_OFFSET(p_r15), true);
    ss = get_reg(ss, rsi_opnd, rdx_reg, CONTEXT_OFFSET(p_rsi), true);
    ss = get_reg(ss, rdi_opnd, rdx_reg, CONTEXT_OFFSET(p_rdi), true);
    ss = get_reg(ss, r8_opnd,  rdx_reg, CONTEXT_OFFSET(p_r8),  true);
    ss = get_reg(ss, r9_opnd,  rdx_reg, CONTEXT_OFFSET(p_r9),  true);
    ss = get_reg(ss, r10_opnd, rdx_reg, CONTEXT_OFFSET(p_r10), true);
    ss = get_reg(ss, r11_opnd, rdx_reg, CONTEXT_OFFSET(p_r11), true);

    // Get the new RSP
    M_Base_Opnd saved_rsp(rdx_reg, CONTEXT_OFFSET(rsp));
    ss = mov(ss, rax_opnd, saved_rsp);
    // Store it over return address for future use
    ss = mov(ss, M_Base_Opnd(rsp_reg, 0), rax_opnd);
    // Get the new RIP
    ss = get_reg(ss, rcx_opnd, rdx_reg, CONTEXT_OFFSET(p_rip), false);
    // Store RIP to [<new RSP> - 136] to preserve 128 bytes under RSP
    // which are 'reserved' on Linux
    ss = mov(ss,  M_Base_Opnd(rax_reg, -136), rcx_opnd);

    ss = get_reg(ss, rax_opnd, rdx_reg, CONTEXT_OFFSET(p_rax), true);

    // Restore processor flags
    ss = movzx(ss, rcx_opnd,  M_Base_Opnd(rdx_reg, CONTEXT_OFFSET(eflags)), size_16);
    ss = test(ss, rcx_opnd, rcx_opnd);
    ss = branch8(ss, Condition_Z,  Imm_Opnd(size_8, 0));
    char* patch_offset = ((char*)ss) - 1; // Store location for jump patch
    *ss++ = (char)0x9C; // PUSHFQ
    M_Base_Opnd sflags(rsp_reg, 0);
    ss = alu(ss, and_opc, sflags, Imm_Opnd(size_32,FLG_CLEAR_MASK), size_32);
    ss = alu(ss, and_opc, rcx_opnd, Imm_Opnd(size_32,FLG_SET_MASK), size_32);
    ss = alu(ss, or_opc, sflags, rcx_opnd, size_32);
    *ss++ = (char)0x9D; // POPFQ
    // Patch conditional jump
    POINTER_SIZE_SINT offset =
        (POINTER_SIZE_SINT)ss - (POINTER_SIZE_SINT)patch_offset - 1;
    *patch_offset = (char)offset;

    ss = get_reg(ss, rcx_opnd, rdx_reg, CONTEXT_OFFSET(p_rcx), true, true);
    ss = get_reg(ss, rdx_opnd, rdx_reg, CONTEXT_OFFSET(p_rdx), true, true);

    // Setup stack pointer to previously saved value
    ss = mov(ss,  rsp_opnd,  M_Base_Opnd(rsp_reg, 0));

    // Jump to address stored to [<new RSP> - 136]
    ss = jump(ss,  M_Base_Opnd(rsp_reg, -136));

    addr = (transfer_control_stub_type)stub;
    assert(ss-stub <= STUB_SIZE);

    /*
       The following code will be generated:

        mov         rdx,rcx
        mov         rbp,qword ptr [rdx+10h]
        mov         rbp,qword ptr [rbp]
        mov         rbx,qword ptr [rdx+20h]
        test        rbx,rbx
        je          __label1__
        mov         rbx,qword ptr [rbx]
__label1__
        ; .... The same for r12,r13,r14,r15,rsi,rdi,r8,r9,r10
        mov         r11,qword ptr [rdx+88h]
        test        r11,r11
        je          __label11__
        mov         r11,qword ptr [r11]
__label11__
        mov         rax,qword ptr [rdx+8]
        mov         qword ptr [rsp],rax
        mov         rcx,qword ptr [rdx+18h]
        mov         rcx,qword ptr [rcx]
        mov         qword ptr [rax-88h],rcx
        mov         rax,qword ptr [rdx+48h]
        test        rax,rax
        je          __label12__
        mov         rax,qword ptr [rax]
__label12__
        movzx       rcx,word ptr [rdx+90h]
        test        rcx,rcx
        je          __label13__
        pushfq
        and         dword ptr [rsp], 0x003F7202
        and         ecx, 0x00000CD5
        or          dword ptr [esp], ecx
        popfq
__label13__
        mov         rcx,qword ptr [rdx+50h]
        pushfq
        test        rcx,rcx
        je          __label14__
        mov         rcx,qword ptr [rcx]
__label14__
        popfq
        mov         rdx,qword ptr [rdx+58h]
        pushfq
        test        rdx,rdx
        je          __label15__
        mov         rdx,qword ptr [rdx]
__label15__
        popfq
        mov         rsp,qword ptr [rsp]
        jmp         qword ptr [rsp-88h]
    */

    DUMP_STUB(stub, "getaddress__transfer_control", ss-stub);

    return addr;
}
コード例 #2
0
ファイル: compile.cpp プロジェクト: dacut/juliet
// Implementation note: don't use l2 (use l3, l4 instead if required) since its
// space can be used in case of 64-bit return value.
NativeCodePtr compile_create_lil_jni_stub(Method_Handle method, void* func, NativeStubOverride nso)
{
    ASSERT_NO_INTERPRETER;
    const Class_Handle clss = method->get_class();
    bool is_static = method->is_static();
    bool is_synchronised = method->is_synchronized();
    Method_Signature_Handle msh = method_get_signature(method);
    unsigned num_args = method->get_num_args();
    Type_Info_Handle ret_tih = method_ret_type_get_type_info(msh);
    VM_Data_Type ret_type = type_info_get_type(ret_tih);
    unsigned i;

    unsigned num_ref_args = 0; // among original args, does not include jclass for static methods
    for(i=0; i<num_args; i++)
        if (is_reference(method_args_get_type_info(msh, i))) num_ref_args++;

    //***** Part 1: Entry, Stats, Override, push m2n, allocate space for handles
    LilCodeStub* cs = lil_parse_code_stub("entry 0:managed:%0m;",
                                          method);
    assert(cs);

    // Increment stats (total number of calls)
#ifdef VM_STATS
    cs = lil_parse_onto_end(cs,
                            "inc [%0i:pint];",
                            &((Method*)method)->num_accesses);
    assert(cs);
#endif //VM_STATS

    // Do stub override here
    if (nso) cs = nso(cs, method);
    assert(cs);

    // Increment stats (number of nonoverridden calls)
#ifdef VM_STATS
    cs = lil_parse_onto_end(cs,
                            "inc [%0i:pint];",
                            &((Method*)method)->num_slow_accesses);
    assert(cs);
#endif

    // Push M2nFrame
    cs = lil_parse_onto_end(cs, "push_m2n %0i, %1i, handles; locals 3;",
                            method, (POINTER_SIZE_INT)FRAME_JNI);
    assert(cs);

    // Allocate space for handles
    unsigned number_of_object_handles = num_ref_args + (is_static ? 1 : 0);
    cs = oh_gen_allocate_handles(cs, number_of_object_handles, "l0", "l1");
    assert(cs);

    //***** Part 2: Initialize object handles

    if (is_static) {
        void *jlc = clss->get_class_handle();
        cs = lil_parse_onto_end(cs,
                                //"ld l1,[%0i:pint];"
                                "ld l1,[%0i:ref];",
                                jlc);
        assert(cs);
        cs = oh_gen_init_handle(cs, "l0", 0, "l1", false);
        assert(cs);
    } else {
        cs = oh_gen_init_handle(cs, "l0", 0, "i0", true);
    }

    // The remaining handles are for the proper arguments (not including this)
    // Loop over the arguments, skipping 0th argument for instance methods. If argument is a reference, generate code
    unsigned hn = 1;
    for(i=(is_static?0:1); i<num_args; i++) {
        if (is_reference(method_args_get_type_info(msh, i))) {
            char buf[20];
            sprintf(buf, "i%d", i);
            cs = oh_gen_init_handle(cs, "l0", hn, buf, true);
            assert(cs);
            hn++;
        }
    }

    //***** Part 3: Synchronize
    if (is_synchronised) {
        if (is_static) {
            cs = lil_parse_onto_end(cs,
                                    "out stdcall:pint:pint;"
                                    "o0=%0i;"
                                    "call %1i;"
                                    "out stdcall:pint:void;"
                                    "o0=r;"
                                    "call %2i;",
                                    clss,
                                    lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)),
                                    lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER)));
            assert(cs);
        } else {
            cs = lil_parse_onto_end(cs,
                                    "out stdcall:ref:void;"
                                    "o0=i0;"
                                    "call %0i;",
                                    lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_ENTER)));
            assert(cs);
        }
    }

    //***** Call JVMTI MethodEntry
    DebugUtilsTI* ti = VM_Global_State::loader_env->TI;
    if (ti->isEnabled() &&
        ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_ENTRY))
    {
        cs = lil_parse_onto_end(cs,
                                "out platform:pint:void;"
                                "o0=%0i:pint;"
                                "call %1i;",
                                (jmethodID)method,
                                jvmti_process_method_entry_event);
        assert(cs);
    }

    //***** Part 4: Enable GC
    cs = lil_parse_onto_end(cs,
                            "out platform::void;"
                            "call %0i;",
                            hythread_suspend_enable);
    assert(cs);

    //***** Part 5: Set up arguments

    // Setup outputs, set JNIEnv, set class/this handle
    cs = lil_parse_onto_end(cs,
                            "out jni:%0j;"
                            "l1=ts;"
                            "ld o0,[l1 + %1i:pint];"
                            "o1=l0+%2i;",
                            method,
                            (POINTER_SIZE_INT)APR_OFFSETOF(VM_thread, jni_env),
                            oh_get_handle_offset(0));
    assert(cs);

    // Loop over arguments proper, setting rest of outputs
    unsigned int arg_base = 1 + (is_static ? 1 : 0);
    hn = 1;
    for(i=(is_static?0:1); i<num_args; i++) {
        if (is_reference(method_args_get_type_info(msh, i))) {
            POINTER_SIZE_INT handle_offset = oh_get_handle_offset(hn);
            REFS_RUNTIME_SWITCH_IF
#ifdef REFS_RUNTIME_OR_COMPRESSED
                cs = lil_parse_onto_end(cs,
                                        "jc i%0i=%1i:ref,%n;"
                                        "o%2i=l0+%3i;"
                                        "j %o;"
                                        ":%g;"
                                        "o%4i=0;"
                                        ":%g;",
                                        i,
                                        VM_Global_State::loader_env->managed_null,
                                        arg_base+i, handle_offset, arg_base+i);
#endif // REFS_RUNTIME_OR_COMPRESSED
            REFS_RUNTIME_SWITCH_ELSE
#ifdef REFS_RUNTIME_OR_UNCOMPRESSED
                cs = lil_parse_onto_end(cs,
                                        "jc i%0i=0:ref,%n;"
                                        "o%1i=l0+%2i;"
                                        "j %o;"
                                        ":%g;"
                                        "o%3i=0;"
                                        ":%g;",
                                        i,
                                        arg_base+i, handle_offset,
                                        arg_base+i);
#endif // REFS_RUNTIME_OR_UNCOMPRESSED
            REFS_RUNTIME_SWITCH_ENDIF
            hn++;
        } else {
            cs = lil_parse_onto_end(cs, "o%0i=i%1i;", arg_base+i, i);
        }
        assert(cs);
    }

    //***** Part 6: Call
    cs = lil_parse_onto_end(cs,
                            "call %0i;",
                            func);
    assert(cs);

    //***** Part 7: Save return, widening if necessary
    switch (ret_type) {
    case VM_DATA_TYPE_VOID:
        break;
    case VM_DATA_TYPE_INT32:
        cs = lil_parse_onto_end(cs, "l1=r;");
        break;
    case VM_DATA_TYPE_BOOLEAN:
        cs = lil_parse_onto_end(cs, "l1=zx1 r;");
        break;
    case VM_DATA_TYPE_INT16:
        cs = lil_parse_onto_end(cs, "l1=sx2 r;");
        break;
    case VM_DATA_TYPE_INT8:
        cs = lil_parse_onto_end(cs, "l1=sx1 r;");
        break;
    case VM_DATA_TYPE_CHAR:
        cs = lil_parse_onto_end(cs, "l1=zx2 r;");
        break;
    default:
        cs = lil_parse_onto_end(cs, "l1=r;");
        break;
    }
    assert(cs);

    //***** Part 8: Disable GC
    cs = lil_parse_onto_end(cs,
                            "out platform::void;"
                            "call %0i;",
                            hythread_suspend_disable);
    assert(cs);

    // Exception offsets
    POINTER_SIZE_INT eoo = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_object;
    POINTER_SIZE_INT eco = (POINTER_SIZE_INT)&((VM_thread*)0)->thread_exception.exc_class;

    //***** Call JVMTI MethodExit
    if (ti->isEnabled() &&
        ti->get_global_capability(DebugUtilsTI::TI_GC_ENABLE_METHOD_EXIT))
    {
        cs = lil_parse_onto_end(cs,
                                "out platform:pint,g1,g8:void;"
                                "l2=ts;"
                                "ld l2,[l2+%0i:ref];"
                                "jc l2!=0,_mex_exn_raised;"
                                "l2=ts;"
                                "ld l2,[l2+%1i:ref];"
                                "jc l2!=0,_mex_exn_raised;"
                                "o1=%2i:g1;"
                                "o2=l1:g8;"
                                "j _mex_exn_cont;"
                                ":_mex_exn_raised;"
                                "o1=%3i:g1;"
                                "o2=0:g8;"
                                ":_mex_exn_cont;"
                                "o0=%4i:pint;"
                                "call %5i;",
                                eoo,
                                eco,
                                (POINTER_SIZE_INT)JNI_FALSE,
                                (POINTER_SIZE_INT)JNI_TRUE,
                                (jmethodID)method,
                                jvmti_process_method_exit_event);
        assert(cs);
    }

    //***** Part 9: Synchronize
    if (is_synchronised) {
        if (is_static) {
            cs = lil_parse_onto_end(cs,
                "out stdcall:pint:pint;"
                "o0=%0i;"
                "call %1i;"
                "out stdcall:pint:void;"
                "o0=r;"
                "call %2i;",
                clss,
                lil_npc_to_fp(vm_helper_get_addr(VM_RT_CLASS_2_JLC)),
                lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT)));
        } else {
            cs = lil_parse_onto_end(cs,
                "ld l0,[l0+%0i:ref];"
                "out stdcall:ref:void; o0=l0; call %1i;",
                oh_get_handle_offset(0),
                lil_npc_to_fp(vm_helper_get_addr(VM_RT_MONITOR_EXIT)));
        }
        assert(cs);
    }

    //***** Part 10: Unhandle the return if it is a reference
    if (is_reference(ret_tih)) {
        cs = lil_parse_onto_end(cs,
                                "jc l1=0,ret_done;"
                                "ld l1,[l1+0:ref];"
                                ":ret_done;");
#ifdef REFS_RUNTIME_OR_COMPRESSED
        REFS_RUNTIME_SWITCH_IF
            cs = lil_parse_onto_end(cs,
                                    "jc l1!=0,done_translating_ret;"
                                    "l1=%0i:ref;"
                                    ":done_translating_ret;",
                                    VM_Global_State::loader_env->managed_null);
        REFS_RUNTIME_SWITCH_ENDIF
#endif // REFS_RUNTIME_OR_UNCOMPRESSED
        assert(cs);
    }

    //***** Part 11: Rethrow exception
    cs = lil_parse_onto_end(cs,
                            "l0=ts;"
                            "ld l2,[l0+%0i:ref];"
                            "jc l2!=0,_exn_raised;"
                            "ld l2,[l0+%1i:ref];"
                            "jc l2=0,_no_exn;"
                            ":_exn_raised;"
                            "m2n_save_all;"
                            "out platform::void;"
                            "call.noret %2i;"
                            ":_no_exn;",
                            eoo, eco, exn_rethrow);
    assert(cs);

    //***** Part 12: Restore return variable, pop_m2n, return
    if (ret_type != VM_DATA_TYPE_VOID) {
        cs = lil_parse_onto_end(cs, "r=l1;");
        assert(cs);
    }
    cs = lil_parse_onto_end(cs,
                            "pop_m2n;"
                            "ret;");
    assert(cs);

    //***** Now generate code

    assert(lil_is_valid(cs));
    NativeCodePtr addr = LilCodeGenerator::get_platform()->compile(cs, clss->get_class_loader()->GetCodePool());

#ifndef NDEBUG
    char buf[100];
    apr_snprintf(buf, sizeof(buf)-1, "jni_stub.%s::%s", clss->get_name()->bytes,
        method->get_name()->bytes);
    DUMP_STUB(addr, buf, lil_cs_get_code_size(cs));
#endif

#ifdef VM_STATS
    VM_Statistics::get_vm_stats().jni_stub_bytes += lil_cs_get_code_size(cs);
#endif

    lil_free_code_stub(cs);
    return addr;
} // compile_create_lil_jni_stub