static char* get_reg(char* ss, const R_Opnd & dst, Reg_No base, int64 offset, bool check_null = false, bool preserve_flags = false) { char* patch_offset = NULL; ss = mov(ss, dst, M_Base_Opnd(base, (I_32)offset)); if (check_null) { if (preserve_flags) *ss++ = (char)0x9C; // PUSHFD ss = test(ss, dst, dst); ss = branch8(ss, Condition_Z, Imm_Opnd(size_8, 0)); patch_offset = ((char*)ss) - 1; // Store location for jump patch } ss = mov(ss, dst, M_Base_Opnd(dst.reg_no(), 0)); if (check_null) { // Patch conditional jump POINTER_SIZE_SINT offset = (POINTER_SIZE_SINT)ss - (POINTER_SIZE_SINT)patch_offset - 1; assert(offset >= -128 && offset < 127); *patch_offset = (char)offset; if (preserve_flags) *ss++ = (char)0x9D; // POPFD } return ss; }
static void* getaddress__pop_java_to_native_frame() { static void *addr = 0; if (addr) { return addr; } const int stub_size = 32 + m2n_pop_m2n_size(false, 1, 0); char *stub = (char *)malloc_fixed_code_for_jit(stub_size, DEFAULT_CODE_ALIGNMENT, CODE_BLOCK_HEAT_MAX/2, CAA_Allocate); #ifdef _DEBUG memset(stub, 0xcc /*int 3*/, stub_size); #endif char *ss = stub; ss = mov(ss, r12_opnd, rax_opnd); ss = m2n_gen_pop_m2n(ss, false, 1, 8, 0); ss = mov(ss, rax_opnd, r12_opnd); ss = mov(ss, r11_opnd, M_Base_Opnd(rsp_reg, 0)); ss = mov(ss, r12_opnd, M_Base_Opnd(rsp_reg, m2n_sizeof_m2n_frame - 8)); ss = mov(ss, M_Base_Opnd(rsp_reg, m2n_sizeof_m2n_frame - 8), r11_opnd); ss = alu(ss, add_opc, rsp_opnd, Imm_Opnd(m2n_sizeof_m2n_frame - 8)); ss = ret(ss); assert((ss - stub) <= stub_size); addr = stub; compile_add_dynamic_generated_code_chunk("pop_java_to_native_frame", false, stub, stub_size); // Put TI support here. DUMP_STUB(stub, "getaddress__pop_java_to_native_frame", ss - stub); return addr; } //getaddress__pop_java_to_native_frame
/** * Generates monitor exit. * The code should not contain safepoints. * * @param[in] ss buffer to put the assembly code to * @param[in] input_param1 register should point to the lockword in object header. * If input_param1 == ecx it reduce one register mov. * The code use and do not restore eax, ecx registers. * @return 0 if success in eax register */ char* gen_monitor_exit_helper(char *ss, const R_Opnd & input_param1) { if (&input_param1 != &ecx_opnd) { ss = mov(ss, ecx_opnd, input_param1); } #ifdef ASM_MONITOR_HELPER ss = mov(ss, eax_opnd, M_Base_Opnd(ecx_reg, 0)); // mov eax,dword[ecx] ss = test(ss, eax_opnd, Imm_Opnd(0x80000000), size_32); // test eax,0x80000000 ss = branch8(ss, Condition_NZ, Imm_Opnd(size_8, 0)); // jnz fat char *fat = ((char *)ss) - 1; ss = mov(ss, eax_opnd, M_Base_Opnd(ecx_reg, 1), size_8); // mov al, byte[ecx+1] ss = alu(ss, sub_opc, eax_opnd, Imm_Opnd(size_8,0x8),size_8); // sub al, 0x8 ss = branch8(ss, Condition_C, Imm_Opnd(size_8, 0)); // jc zero_rec char *zero_rec = ((char *)ss) - 1; ss = mov(ss, M_Base_Opnd(ecx_reg, 1), eax_opnd, size_8); // mov byte[ecx+1],al ss = ret(ss, Imm_Opnd(4)); // ret 4 signed offset = (signed)ss - (signed)zero_rec - 1; //zero_rec: *zero_rec = (char)offset; ss = mov(ss, M_Base_Opnd(ecx_reg, 2), Imm_Opnd(size_16, 0), size_16);// mov word[ecx+2],0 ss = ret(ss, Imm_Opnd(4)); // ret 4 offset = (signed)ss - (signed)fat - 1; //fat: *fat = (char)offset; #endif ss = push(ss, ecx_opnd); ss = call(ss, (char *)hythread_thin_monitor_exit); ss = alu(ss, add_opc, esp_opnd, Imm_Opnd(4)); // pop parameters return ss; }
char * m2n_gen_pop_m2n(char * buf, bool handles, unsigned num_callee_saves, I_32 bytes_to_m2n_bottom, unsigned num_preserve_ret) { assert (num_preserve_ret <= 2); assert(LcgEM64TContext::GR_SIZE == 8); if (num_preserve_ret > 0) { // Save return value // NOTE: don't break stack allignment by pushing only one register. buf = push(buf, rax_opnd, size_64); buf = push(buf, rdx_opnd, size_64); } if (handles) { // There are handles located on the stack buf = mov(buf, rax_opnd, Imm_Opnd(size_64, (uint64)m2n_pop_local_handles), size_64); } else { buf = mov(buf, rax_opnd, Imm_Opnd(size_64, (uint64)m2n_free_local_handles), size_64); } // NOTE: the following should be true before the call ($rsp % 8 == 0 && $rsp % 16 != 0)! // Call m2n_pop_local_handles or m2n_free_local_handles #ifdef _WIN64 buf = alu(buf, add_opc, rsp_opnd, Imm_Opnd(-SHADOW)); #endif buf = call(buf, rax_opnd, size_64); #ifdef _WIN64 buf = alu(buf, add_opc, rsp_opnd, Imm_Opnd(SHADOW)); #endif if (num_preserve_ret > 0) { // Restore return value buf = pop(buf, rdx_opnd, size_64); buf = pop(buf, rax_opnd, size_64); } // pop prev_m2nf buf = mov(buf, r10_opnd, M_Base_Opnd(rsp_reg, bytes_to_m2n_bottom), size_64); bytes_to_m2n_bottom += LcgEM64TContext::GR_SIZE; // pop p_lm2nf buf = mov(buf, r11_opnd, M_Base_Opnd(rsp_reg, bytes_to_m2n_bottom), size_64); bytes_to_m2n_bottom += LcgEM64TContext::GR_SIZE; buf = mov(buf, M_Base_Opnd(r11_reg, 0), r10_opnd, size_64); // skip local_object_handles, method, current_frame_type, pop_regs bytes_to_m2n_bottom += 4 * LcgEM64TContext::GR_SIZE; // restore part of callee-saves registers for (int i = LcgEM64TContext::MAX_GR_LOCALS - 1; i >= (int)num_callee_saves; i--) { buf = mov(buf, LcgEM64TContext::get_reg_from_map(LcgEM64TContext::GR_LOCALS_OFFSET + i), M_Base_Opnd(rsp_reg, bytes_to_m2n_bottom), size_64); bytes_to_m2n_bottom += LcgEM64TContext::GR_SIZE; } return buf; }//m2n_gen_pop_m2n
static void* getaddress__setup_java_to_native_frame() { static void *addr = 0; if (addr) { return addr; } const int stub_size = 32 + m2n_push_m2n_size(1, 0); char *stub = (char *)malloc_fixed_code_for_jit(stub_size, DEFAULT_CODE_ALIGNMENT, CODE_BLOCK_HEAT_MAX/2, CAA_Allocate); #ifdef _DEBUG memset(stub, 0xcc /*int 3*/, stub_size); #endif char *ss = stub; // Stack changes // prev new // // ... ... // -------- -------- ------------ // ret ret // -------- -------- // ret r12 // -------- -------- m2n frame // // ... // // -------- ------------ // ret // -------- ss = alu(ss, sub_opc, rsp_opnd, Imm_Opnd(m2n_sizeof_m2n_frame - 8)); ss = mov(ss, r11_opnd, M_Base_Opnd(rsp_reg, m2n_sizeof_m2n_frame - 8)); ss = mov(ss, M_Base_Opnd(rsp_reg, m2n_sizeof_m2n_frame - 8), r12_opnd); ss = mov(ss, M_Base_Opnd(rsp_reg, 0), r11_opnd); ss = mov(ss, r12_opnd, rdi_opnd); ss = m2n_gen_push_m2n(ss, NULL, FRAME_UNKNOWN, false, 1, 0, m2n_sizeof_m2n_frame); ss = mov(ss, rdi_opnd, r12_opnd); ss = ret(ss); assert((ss - stub) <= stub_size); addr = stub; compile_add_dynamic_generated_code_chunk("setup_java_to_native_frame", false, stub, stub_size); // Put TI support here. DUMP_STUB(stub, "getaddress__setup_java_to_native_frame", ss - stub); return addr; } //getaddress__setup_java_to_native_frame
/** * Generates tmn_self() call. * The code should not contains safepoint. * The code uses and doesn't restore eax register. * * @return tm_self() in eax register */ char* gen_hythread_self_helper(char *ss) { #ifdef HYTHREAD_FAST_TLS # ifdef FS14_TLS_USE //ss = mov(ss, eax_opnd, M_Base_Opnd(fs_reg, 0x14)); *ss++ = (char)0x64; *ss++ = (char)0xa1; *ss++ = (char)0x14; *ss++ = (char)0x00; *ss++ = (char)0x00; *ss++ = (char)0x00; # else unsigned offset = hythread_get_hythread_offset_in_tls(); // gs register uses for tls acces on linux x86-32 //ss = mov(ss, eax_opnd, M_Base_Opnd(gs_reg, 0x00)); *ss++ = (char)0x65; *ss++ = (char)0x8b; *ss++ = (char)0x05; *ss++ = (char)0x00; *ss++ = (char)0x00; *ss++ = (char)0x00; *ss++ = (char)0x00; ss = mov(ss, eax_opnd, M_Base_Opnd(eax_reg, offset)); # endif #else ss = push(ss, ecx_opnd); // Preserve caller-saved ECX ss = call(ss, (char *)hythread_self); ss = pop (ss, ecx_opnd); #endif return ss; }
/** * Generates fast accessor to the TLS for the given key.<br> * Example: * <pre><code> * get_thread_ptr = get_tls_helper(vm_thread_block_key); * ... * self = get_thread_ptr(); * </code></pre> * * @param[in] key TLS key * @return fast accessor to key, if one exist */ fast_tls_func* get_tls_helper(hythread_tls_key_t key) { // return tm_self_tls->thread_local_storage[key]; unsigned key_offset = (unsigned) &(((HyThread_public *) (0))->thread_local_storage[key]); const int stub_size = 128; char *stub = (char *)malloc(stub_size); memset(stub, 0xcc /*int 3*/, stub_size); char *ss = stub; ss = gen_hythread_self_helper(ss); ss = mov(ss, eax_opnd, M_Base_Opnd(eax_reg, key_offset)); ss = ret(ss, Imm_Opnd(0)); assert((ss - stub) < stub_size); return (fast_tls_func*) stub; }
static char* gen_invoke_common_managed_func(char* stub) { // Defines stack alignment on managed function enter. const I_32 STACK_ALIGNMENT = MANAGED_STACK_ALIGNMENT; const I_32 STACK_ALIGNMENT_MASK = ~(STACK_ALIGNMENT - 1); const char * LOOP_BEGIN = "loop_begin"; const char * LOOP_END = "loop_end"; // [ebp + 8] - args // [ebp + 12] - size // [ebp + 16] - func const I_32 STACK_ARGS_OFFSET = 8; const I_32 STACK_NARGS_OFFSET = 12; const I_32 STACK_FUNC_OFFSET = 16; const I_32 STACK_CALLEE_SAVED_OFFSET = -12; tl::MemoryPool pool; LilCguLabelAddresses labels(&pool, stub); // Initialize ebp-based stack frame. stub = push(stub, ebp_opnd); stub = mov(stub, ebp_opnd, esp_opnd); // Preserve callee-saved registers. stub = push(stub, ebx_opnd); stub = push(stub, esi_opnd); stub = push(stub, edi_opnd); // Load an array of arguments ('args') and its size from the stack. stub = mov(stub, eax_opnd, M_Base_Opnd(ebp_reg, STACK_ARGS_OFFSET)); stub = mov(stub, ecx_opnd, M_Base_Opnd(ebp_reg, STACK_NARGS_OFFSET)); // Align memory stack. stub = lea(stub, ebx_opnd, M_Index_Opnd(n_reg, ecx_reg, 4, 4)); stub = mov(stub, esi_opnd, ebx_opnd); stub = neg(stub, esi_opnd); stub = alu(stub, add_opc, esi_opnd, esp_opnd); stub = alu(stub, and_opc, esi_opnd, Imm_Opnd(size_32, STACK_ALIGNMENT_MASK)); stub = alu(stub, add_opc, ebx_opnd, esi_opnd); stub = mov(stub, esp_opnd, ebx_opnd); // Load a pointer to the last argument of 'args' array. stub = lea(stub, eax_opnd, M_Index_Opnd(eax_reg, ecx_reg, -4, 4)); stub = alu(stub, sub_opc, eax_opnd, esp_opnd); stub = alu(stub, or_opc, ecx_opnd, ecx_opnd); stub = branch8(stub, Condition_Z, Imm_Opnd(size_8, 0)); labels.add_patch_to_label(LOOP_END, stub - 1, LPT_Rel8); // LOOP_BEGIN: // Push inputs on the stack. labels.define_label(LOOP_BEGIN, stub, false); stub = push(stub, M_Index_Opnd(esp_reg, eax_reg, 0, 1)); stub = loop(stub, Imm_Opnd(size_8, 0)); labels.add_patch_to_label(LOOP_BEGIN, stub - 1, LPT_Rel8); // LOOP_END: labels.define_label(LOOP_END, stub, false); // Call target function. stub = mov(stub, eax_opnd, M_Base_Opnd(ebp_reg, STACK_FUNC_OFFSET)); stub = call(stub, eax_opnd); // Restore callee-saved registers from the stack. stub = lea(stub, esp_opnd, M_Base_Opnd(ebp_reg, STACK_CALLEE_SAVED_OFFSET)); stub = pop(stub, edi_opnd); stub = pop(stub, esi_opnd); stub = pop(stub, ebx_opnd); // Leave current frame. stub = pop(stub, ebp_opnd); return stub; }
static transfer_control_stub_type gen_transfer_control_stub() { static transfer_control_stub_type addr = NULL; if (addr) { return addr; } const int STUB_SIZE = 255; char * stub = (char *)malloc_fixed_code_for_jit(STUB_SIZE, DEFAULT_CODE_ALIGNMENT, CODE_BLOCK_HEAT_COLD, CAA_Allocate); char * ss = stub; #ifndef NDEBUG memset(stub, 0xcc /*int 3*/, STUB_SIZE); #endif // // ************* LOW LEVEL DEPENDENCY! *************** // This code sequence must be atomic. The "atomicity" effect is achieved by // changing the rsp at the very end of the sequence. // rdx holds the pointer to the stack iterator #if defined (PLATFORM_POSIX) // RDI holds 1st parameter on Linux ss = mov(ss, rdx_opnd, rdi_opnd); #else // RCX holds 1st parameter on Windows ss = mov(ss, rdx_opnd, rcx_opnd); #endif // Restore general registers ss = get_reg(ss, rbp_opnd, rdx_reg, CONTEXT_OFFSET(p_rbp), false); ss = get_reg(ss, rbx_opnd, rdx_reg, CONTEXT_OFFSET(p_rbx), true); ss = get_reg(ss, r12_opnd, rdx_reg, CONTEXT_OFFSET(p_r12), true); ss = get_reg(ss, r13_opnd, rdx_reg, CONTEXT_OFFSET(p_r13), true); ss = get_reg(ss, r14_opnd, rdx_reg, CONTEXT_OFFSET(p_r14), true); ss = get_reg(ss, r15_opnd, rdx_reg, CONTEXT_OFFSET(p_r15), true); ss = get_reg(ss, rsi_opnd, rdx_reg, CONTEXT_OFFSET(p_rsi), true); ss = get_reg(ss, rdi_opnd, rdx_reg, CONTEXT_OFFSET(p_rdi), true); ss = get_reg(ss, r8_opnd, rdx_reg, CONTEXT_OFFSET(p_r8), true); ss = get_reg(ss, r9_opnd, rdx_reg, CONTEXT_OFFSET(p_r9), true); ss = get_reg(ss, r10_opnd, rdx_reg, CONTEXT_OFFSET(p_r10), true); ss = get_reg(ss, r11_opnd, rdx_reg, CONTEXT_OFFSET(p_r11), true); // Get the new RSP M_Base_Opnd saved_rsp(rdx_reg, CONTEXT_OFFSET(rsp)); ss = mov(ss, rax_opnd, saved_rsp); // Store it over return address for future use ss = mov(ss, M_Base_Opnd(rsp_reg, 0), rax_opnd); // Get the new RIP ss = get_reg(ss, rcx_opnd, rdx_reg, CONTEXT_OFFSET(p_rip), false); // Store RIP to [<new RSP> - 136] to preserve 128 bytes under RSP // which are 'reserved' on Linux ss = mov(ss, M_Base_Opnd(rax_reg, -136), rcx_opnd); ss = get_reg(ss, rax_opnd, rdx_reg, CONTEXT_OFFSET(p_rax), true); // Restore processor flags ss = movzx(ss, rcx_opnd, M_Base_Opnd(rdx_reg, CONTEXT_OFFSET(eflags)), size_16); ss = test(ss, rcx_opnd, rcx_opnd); ss = branch8(ss, Condition_Z, Imm_Opnd(size_8, 0)); char* patch_offset = ((char*)ss) - 1; // Store location for jump patch *ss++ = (char)0x9C; // PUSHFQ M_Base_Opnd sflags(rsp_reg, 0); ss = alu(ss, and_opc, sflags, Imm_Opnd(size_32,FLG_CLEAR_MASK), size_32); ss = alu(ss, and_opc, rcx_opnd, Imm_Opnd(size_32,FLG_SET_MASK), size_32); ss = alu(ss, or_opc, sflags, rcx_opnd, size_32); *ss++ = (char)0x9D; // POPFQ // Patch conditional jump POINTER_SIZE_SINT offset = (POINTER_SIZE_SINT)ss - (POINTER_SIZE_SINT)patch_offset - 1; *patch_offset = (char)offset; ss = get_reg(ss, rcx_opnd, rdx_reg, CONTEXT_OFFSET(p_rcx), true, true); ss = get_reg(ss, rdx_opnd, rdx_reg, CONTEXT_OFFSET(p_rdx), true, true); // Setup stack pointer to previously saved value ss = mov(ss, rsp_opnd, M_Base_Opnd(rsp_reg, 0)); // Jump to address stored to [<new RSP> - 136] ss = jump(ss, M_Base_Opnd(rsp_reg, -136)); addr = (transfer_control_stub_type)stub; assert(ss-stub <= STUB_SIZE); /* The following code will be generated: mov rdx,rcx mov rbp,qword ptr [rdx+10h] mov rbp,qword ptr [rbp] mov rbx,qword ptr [rdx+20h] test rbx,rbx je __label1__ mov rbx,qword ptr [rbx] __label1__ ; .... The same for r12,r13,r14,r15,rsi,rdi,r8,r9,r10 mov r11,qword ptr [rdx+88h] test r11,r11 je __label11__ mov r11,qword ptr [r11] __label11__ mov rax,qword ptr [rdx+8] mov qword ptr [rsp],rax mov rcx,qword ptr [rdx+18h] mov rcx,qword ptr [rcx] mov qword ptr [rax-88h],rcx mov rax,qword ptr [rdx+48h] test rax,rax je __label12__ mov rax,qword ptr [rax] __label12__ movzx rcx,word ptr [rdx+90h] test rcx,rcx je __label13__ pushfq and dword ptr [rsp], 0x003F7202 and ecx, 0x00000CD5 or dword ptr [esp], ecx popfq __label13__ mov rcx,qword ptr [rdx+50h] pushfq test rcx,rcx je __label14__ mov rcx,qword ptr [rcx] __label14__ popfq mov rdx,qword ptr [rdx+58h] pushfq test rdx,rdx je __label15__ mov rdx,qword ptr [rdx] __label15__ popfq mov rsp,qword ptr [rsp] jmp qword ptr [rsp-88h] */ DUMP_STUB(stub, "getaddress__transfer_control", ss-stub); return addr; }
/** * Generates fast path of monitor enter * the code should not contains safepoint. * * @param[in] ss buffer to put the assembly code to * @param[in] input_param1 register which should point to the object lockword. * If input_param1 == ecx it reduces one register mov. * the code use and do not restore ecx, edx, eax registers * * @return 0 if success in eax register */ char* gen_monitorenter_fast_path_helper(char *ss, const R_Opnd & input_param1) { if (&input_param1 != &ecx_opnd) { ss = mov(ss, ecx_opnd, input_param1); } #ifdef ASM_MONITOR_HELPER //get self_id ss = gen_hythread_self_helper(ss); ss = mov(ss, edx_opnd, M_Base_Opnd(eax_reg, hythread_get_thread_id_offset())); // mov edx,dword [eax+off] ss = mov(ss, eax_opnd, M_Base_Opnd(ecx_reg, 2), size_16); // mov ax,word[ecx+2] ss = alu(ss, cmp_opc, edx_opnd, eax_opnd, size_16); // cmp dx,ax ss = branch8(ss, Condition_NZ, Imm_Opnd(size_8, 0)); // jnz check_zero char *check_zero = ((char *)ss) - 1; //; ax==dx it's safe to do inc ss = mov(ss, eax_opnd, M_Base_Opnd(ecx_reg, 1), size_8); // mov al, byte[ecx+1] //rec_inc: ss = alu(ss, add_opc, eax_opnd, Imm_Opnd(size_8, 0x8), size_8); // add al,0x8 ss = branch8(ss, Condition_C, Imm_Opnd(size_8, 0)); // jc failed char *failed1 = ((char *)ss) - 1; ss = mov(ss, M_Base_Opnd(ecx_reg, 1), eax_opnd, size_8); // mov byte[ecx+1],al ss = ret(ss, Imm_Opnd(4)); // ret 4 signed offset = (signed)ss - (signed)check_zero - 1; *check_zero = (char)offset; //check_zero: ss = test(ss, eax_opnd, eax_opnd, size_16); // test ax,ax ss = branch8(ss, Condition_NZ, Imm_Opnd(size_8, 0)); // jnz failed char *failed2 = ((char *)ss) - 1; ss = prefix(ss, lock_prefix); //; here ax==0. ss = cmpxchg(ss, M_Base_Opnd(ecx_reg, 2), edx_opnd, size_16); // lock cmpxchg16 [ecx+2],dx ss = branch8(ss, Condition_NZ, Imm_Opnd(size_8, 0)); // jnz failed char *failed3 = ((char *)ss) - 1; #ifdef LOCK_RESERVATION ss = mov(ss, eax_opnd, M_Base_Opnd(ecx_reg, 1), size_8); // mov al, byte[ecx+1] ss = test(ss, eax_opnd, Imm_Opnd(size_8, 0x4), size_8); // test al,0x4 ss = branch8(ss, Condition_NZ, Imm_Opnd(size_8, 0)); // jnz finish char *finish = ((char *)ss) - 1; ss = alu(ss, add_opc, eax_opnd, Imm_Opnd(size_8, 0x8), size_8); // add al,0x8 ss = mov(ss, M_Base_Opnd(ecx_reg, 1), eax_opnd, size_8); // mov byte[ecx+1],al offset = (signed)ss - (signed)finish - 1; *finish = (char)offset; //finish: #endif ss = ret(ss, Imm_Opnd(4)); // ret 4 offset = (signed)ss - (signed)failed1 - 1; *failed1 = (char)offset; //failed: offset = (signed)ss - (signed)failed2 - 1; *failed2 = (char)offset; offset = (signed)ss - (signed)failed3 - 1; *failed3 = (char)offset; #endif //ASM_MONITOR_HELPER // the second attempt to lock monitor ss = push(ss, ecx_opnd); ss = call(ss, (char *)hythread_thin_monitor_try_enter); ss = alu(ss, add_opc, esp_opnd, Imm_Opnd(4)); // pop parameters return ss; }
// inputs should be preserved outside if required since we do a call // num_std_need_to_save registers will be preserved char * m2n_gen_push_m2n(char * buf, Method_Handle method, frame_type current_frame_type, bool handles, unsigned num_callee_saves, unsigned num_std_need_to_save, I_32 bytes_to_m2n_top) { // skip callee-saves registers bytes_to_m2n_top -= num_callee_saves * LcgEM64TContext::GR_SIZE; // TODO: check if it makes sense to save all callee-saves registers here //store rest of callee-saves registers for (unsigned i = num_callee_saves; i < LcgEM64TContext::MAX_GR_LOCALS; i++) { bytes_to_m2n_top -= LcgEM64TContext::GR_SIZE; buf = mov(buf, M_Base_Opnd(rsp_reg, bytes_to_m2n_top), LcgEM64TContext::get_reg_from_map(LcgEM64TContext::GR_LOCALS_OFFSET + i), size_64); } // init pop_regs to null bytes_to_m2n_top -= LcgEM64TContext::GR_SIZE; buf = mov(buf, M_Base_Opnd(rsp_reg, bytes_to_m2n_top), Imm_Opnd(size_32, 0), size_64); // store current_frame_type bytes_to_m2n_top -= LcgEM64TContext::GR_SIZE; assert(fit32(current_frame_type)); buf = mov(buf, M_Base_Opnd(rsp_reg, bytes_to_m2n_top), Imm_Opnd(size_32, current_frame_type), size_64); // store a method associated with the current m2n frame bytes_to_m2n_top -= LcgEM64TContext::GR_SIZE; if (fit32((int64)method)) { buf = mov(buf, M_Base_Opnd(rsp_reg, bytes_to_m2n_top), Imm_Opnd(size_32, (int64)method), size_64); } else { buf = mov(buf, rax_opnd, Imm_Opnd(size_64, (int64)method), size_64); buf = mov(buf, M_Base_Opnd(rsp_reg, bytes_to_m2n_top), rax_opnd); } // store local object handles bytes_to_m2n_top -= LcgEM64TContext::GR_SIZE; buf = mov(buf, M_Base_Opnd(rsp_reg, bytes_to_m2n_top), Imm_Opnd(size_64, (int64)0), size_64); // move pointer to the current VM_Thread structure to rax buf = m2n_gen_ts_to_register(buf, &rax_opnd, num_callee_saves, LcgEM64TContext::MAX_GR_LOCALS, num_std_need_to_save, 0); // shift to the last_m2n_frame field I_32 last_m2n_frame_offset = (I_32)(int64)&((VM_thread*)0)->last_m2n_frame; buf = alu(buf, add_opc, rax_opnd, Imm_Opnd(size_32, last_m2n_frame_offset), size_64); // store pointer to pointer to last m2n frame bytes_to_m2n_top -= LcgEM64TContext::GR_SIZE; buf = mov(buf, M_Base_Opnd(rsp_reg, bytes_to_m2n_top), rax_opnd, size_64); // save pointer to the previous m2n frame bytes_to_m2n_top -= LcgEM64TContext::GR_SIZE; buf = mov(buf, r9_opnd, M_Base_Opnd(rax_reg, 0)); buf = mov(buf, M_Base_Opnd(rsp_reg, bytes_to_m2n_top), r9_opnd, size_64); // update last m2n frame of the current thread buf = lea(buf, r9_opnd, M_Base_Opnd(rsp_reg, bytes_to_m2n_top)); buf = mov(buf, M_Base_Opnd(rax_reg, 0), r9_opnd, size_64); return buf; }
char * m2n_gen_set_local_handles_imm(char * buf, unsigned bytes_to_m2n, const Imm_Opnd * imm) { unsigned offset_local_handles = (unsigned)(uint64)&((M2nFrame*)0)->local_object_handles; buf = mov(buf, M_Base_Opnd(rsp_reg, bytes_to_m2n + offset_local_handles), *imm, size_64); return buf; }