OSRAdapter* SharedRuntime::generate_osr_blob(int frame_size) { ResourceMark rm; // setup code generation tools CodeBuffer* cb = new CodeBuffer(128, 128, 0, 0, 0, false); MacroAssembler* masm = new MacroAssembler(cb); OopMapSet *oop_maps = new OopMapSet(); OopMap* map = new OopMap(frame_size, 0 ); OopMap* map2 = new OopMap(frame_size, 0 ); #ifdef COMPILER2 // Create oopmap for osr adapter. All it contains is where to find the // link offset (ebp) on windows. int link_offset = ((frame_size - frame::sender_sp_offset) + frame::link_offset); map->set_callee_saved(OptoReg::Name(SharedInfo::stack0 + link_offset), frame_size, 0, OptoReg::Name(EBP_num)); map2->set_callee_saved(OptoReg::Name(SharedInfo::stack0 + link_offset), frame_size, 0, OptoReg::Name(EBP_num)); #endif oop_maps->add_gc_map(0, true, map); // Empty all except FPR0 in case of float/double returns __ ffree(0); int returning_fp_entry_offset = __ offset(); oop_maps->add_gc_map(returning_fp_entry_offset, true, map2); for (int i = 1; i<8; i++ ) __ ffree(i); __ movl(ecx, Address(ebp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp __ leave(); // remove frame anchor __ popl(esi); // get return address __ movl(esp, ecx); // set sp to sender sp __ jmp(esi); __ flush(); return OSRAdapter::new_osr_adapter(cb, oop_maps, frame_size, returning_fp_entry_offset); }
address generate_catch_exception() { StubCodeMark mark(this, "StubRoutines", "catch_exception"); const Address esp_after_call(ebp, -4 * wordSize); // same as in generate_call_stub()! const Address thread (ebp, 9 * wordSize); // same as in generate_call_stub()! address start = __ pc(); // get thread directly __ movl(ecx, thread); #ifdef ASSERT // verify that threads correspond { Label L; __ get_thread(ebx); __ cmpl(ebx, ecx); __ jcc(Assembler::equal, L); __ stop("StubRoutines::catch_exception: threads must correspond"); __ bind(L); } #endif // set pending exception __ verify_oop(eax); __ movl(Address(ecx, Thread::pending_exception_offset()), eax ); __ movl(Address(ecx, Thread::exception_file_offset ()), (int)__FILE__); __ movl(Address(ecx, Thread::exception_line_offset ()), __LINE__); // complete return to VM assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before"); __ jmp(StubRoutines::_call_stub_return_address, relocInfo::none); return start; }
void stack_tests() { stack_init(); //printf("%p, %p\n", ebp, esp); enter(16); //printf("%p, %p\n", ebp, esp); setl(24, ebp+0); setl(18, ebp+4); /* simulate another function call */ enter(16); //printf("%p, %p\n", ebp, esp); setl(5, ebp+0); setl(3, ebp+4); movl(ebp+4, eax); addl(ebp+0, eax); leave(); //printf("%p, %p\n", ebp, esp); assert(*(int32_t *)eax == 8); /* end inner function */ movl(ebp+4, eax); addl(ebp+0, eax); leave(); //printf("%p, %p\n", ebp, esp); stack_end(); assert(*(int32_t *)eax == 42); }
/** * Method entry for static native methods: * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) */ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { if (UseCRC32Intrinsics) { address entry = __ pc(); // rbx,: Method* // rsi: senderSP must preserved for slow path, set SP to it on fast path // rdx: scratch // rdi: scratch Label slow_path; // If we need a safepoint check, generate full interpreter entry. ExternalAddress state(SafepointSynchronize::address_of_state()); __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), SafepointSynchronize::_not_synchronized); __ jcc(Assembler::notEqual, slow_path); // We don't generate local frame and don't align stack because // we call stub code and there is no safepoint on this path. // Load parameters const Register crc = rax; // crc const Register buf = rdx; // source java byte array address const Register len = rdi; // length // value x86_32 // interp. arg ptr ESP + 4 // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) // 3 2 1 0 // int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) // 4 2,3 1 0 // Arguments are reversed on java expression stack __ movl(len, Address(rsp, 4 + 0)); // Length // Calculate address of start element if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long buf __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset __ movl(crc, Address(rsp, 4 + 4 * wordSize)); // Initial CRC } else { __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset __ movl(crc, Address(rsp, 4 + 3 * wordSize)); // Initial CRC } __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); // result in rax // _areturn __ pop(rdi); // get return address __ mov(rsp, rsi); // set sp to sender sp __ jmp(rdi); // generate a vanilla native entry as the slow path __ bind(slow_path); __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } return NULL; }
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { assert_different_registers(obj, klass, len); if (UseBiasedLocking && !len->is_valid()) { assert_different_registers(obj, klass, len, t1, t2); movptr(t1, Address(klass, Klass::prototype_header_offset())); movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1); } else { // This assumes that all prototype bits fit in an int32_t movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); } #ifdef _LP64 if (UseCompressedClassPointers) { // Take care not to kill klass movptr(t1, klass); encode_klass_not_null(t1); movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); } else #endif { movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); } if (len->is_valid()) { movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); } #ifdef _LP64 else if (UseCompressedClassPointers) { xorptr(t1, t1); store_klass_gap(obj, t1); } #endif }
void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_offset) { __ leal(temp(), Address(from(), from_offset * wordSize)); __ cmpl(Address(from(), from_offset * wordSize), 0); // do not use temp() to avoid AGI Label L; __ jcc(Assembler::notZero, L); __ movl(temp(), 0); __ bind(L); __ movl(Address(to(), to_offset * wordSize), temp()); }
/** * Method entry for static native methods: * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) */ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { if (UseCRC32Intrinsics) { address entry = __ pc(); // rbx,: Method* // r13: senderSP must preserved for slow path, set SP to it on fast path Label slow_path; // If we need a safepoint check, generate full interpreter entry. ExternalAddress state(SafepointSynchronize::address_of_state()); __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), SafepointSynchronize::_not_synchronized); __ jcc(Assembler::notEqual, slow_path); // We don't generate local frame and don't align stack because // we call stub code and there is no safepoint on this path. // Load parameters const Register crc = c_rarg0; // crc const Register buf = c_rarg1; // source java byte array address const Register len = c_rarg2; // length const Register off = len; // offset (never overlaps with 'len') // Arguments are reversed on java expression stack // Calculate address of start element if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { __ movptr(buf, Address(rsp, 3*wordSize)); // long buf __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset __ addq(buf, off); // + offset __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC } else { __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset __ addq(buf, off); // + offset __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC } // Can now load 'len' since we're finished with 'off' __ movl(len, Address(rsp, wordSize)); // Length __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); // result in rax // _areturn __ pop(rdi); // get return address __ mov(rsp, r13); // set sp to sender sp __ jmp(rdi); // generate a vanilla native entry as the slow path __ bind(slow_path); __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } return NULL; }
void CompilerStubs::generate_compiler_new_object() { comment_section("Compiler new object (any size)"); comment("Register edx holds the instance size, register ebx holds the prototypical near of the instance class"); Label slow_case; entry("compiler_new_object"); comment("Get _inline_allocation_top"); movl(eax, Address(Constant("_inline_allocation_top"))); comment("Compute new top"); leal(ecx, Address(eax, edx, times_1)); if (GenerateDebugAssembly) { comment("Check ExcessiveGC"); testl(Address(Constant("ExcessiveGC")), Constant(0)); jcc(not_zero, Constant(slow_case)); } comment("Compare against _inline_allocation_end"); cmpl(ecx, Address(Constant("_inline_allocation_end"))); jcc(above, Constant(slow_case)); comment("Allocation succeeded, set _inline_allocation_top"); movl(Address(Constant("_inline_allocation_top")), ecx); comment("Set prototypical near in object; no need for write barrier"); movl(Address(eax), ebx); comment("Compute remaining size"); decrement(edx, oopSize); comment("One-word object?"); Label init_done; jcc(zero, Constant(init_done)); comment("Zero object fields"); xorl(ecx, ecx); Label init_loop; bind(init_loop); movl(Address(eax, edx, times_1), ecx); decrement(edx, oopSize); jcc(not_zero, Constant(init_loop)); bind(init_done); comment("The newly allocated object is in register eax"); ret(); comment("Slow case - call the VM runtime system"); bind(slow_case); leal(eax, Address(Constant("newobject"))); goto_shared_call_vm(T_OBJECT); entry_end(); // compiler_new_object }
address generate_forward_exception() { StubCodeMark mark(this, "StubRoutines", "forward exception"); address start = __ pc(); // Upon entry, the sp points to the return address returning into Java // (interpreted or compiled) code; i.e., the return address becomes the // throwing pc. // // Arguments pushed before the runtime call are still on the stack but // the exception handler will reset the stack pointer -> ignore them. // A potential result in registers can be ignored as well. #ifdef ASSERT // make sure this code is only executed if there is a pending exception { Label L; __ get_thread(ecx); __ cmpl(Address(ecx, Thread::pending_exception_offset()), (int)NULL); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (1)"); __ bind(L); } #endif // compute exception handler into ebx __ movl(eax, Address(esp)); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), eax); __ movl(ebx, eax); // setup eax & edx, remove return address & clear pending exception __ get_thread(ecx); __ popl(edx); __ movl(eax, Address(ecx, Thread::pending_exception_offset())); __ movl(Address(ecx, Thread::pending_exception_offset()), (int)NULL); #ifdef ASSERT // make sure exception is set { Label L; __ testl(eax, eax); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (2)"); __ bind(L); } #endif // continue at exception handler (return address removed) // eax: exception // ebx: exception handler // edx: throwing pc __ verify_oop(eax); __ jmp(ebx); return start; }
address generate_get_previous_fp() { StubCodeMark mark(this, "StubRoutines", "get_previous_fp"); const Address old_fp (ebp, 0); const Address older_fp (eax, 0); address start = __ pc(); __ enter(); __ movl(eax, old_fp); // callers fp __ movl(eax, older_fp); // the frame for ps() __ popl(ebp); __ ret(0); return start; }
address generate_atomic_xchg() { StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); address start = __ pc(); __ pushl(edx); Address exchange(esp, 2 * wordSize); Address dest_addr(esp, 3 * wordSize); __ movl(eax, exchange); __ movl(edx, dest_addr); __ xchg(eax, Address(edx, 0)); __ popl(edx); __ ret(0); return start; }
// call (Thread*)TlsGetValue(thread_index()); void MacroAssembler::get_thread(Register thread) { if (thread != rax) { push(rax); } push(rdi); push(rsi); push(rdx); push(rcx); push(r8); push(r9); push(r10); // XXX mov(r10, rsp); andq(rsp, -16); push(r10); push(r11); movl(c_rarg0, ThreadLocalStorage::thread_index()); call(RuntimeAddress((address)TlsGetValue)); pop(r11); pop(rsp); pop(r10); pop(r9); pop(r8); pop(rcx); pop(rdx); pop(rsi); pop(rdi); if (thread != rax) { mov(thread, rax); pop(rax); } }
void InterpreterStubs::generate_primordial_to_current_thread() { entry("primordial_to_current_thread"); pushal(); pushl(ebp); movl(Address(Constant("_primordial_sp")), esp); get_thread(ecx); movl(esp, Address(ecx, Constant(Thread::stack_pointer_offset()))); popl(ebp); ret(); entry_end(); // primordial_to_current_thread entry("start_lightweight_thread_asm"); // Should never reach here on x86 int3(); entry_end(); // start_lightweight_thread_asm }
void InterpreterStubs::generate_interpreter_deoptimization_entry() { comment_section("Interpreter deoptimization entry"); entry("interpreter_deoptimization_entry"); // Define an interpreter call info. define_call_info(); comment("Restore bytecode and locals pointers"); movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset()))); movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset()))); // Dispatch to the next bytecode. dispatch_next(); entry_end(); // interpreter_deoptimization_entry }
void instructions_tests() { /* 32 bit copy */ setl(42, eax); assert(*(int32_t *)eax == 42); setl(1000000, eax); assert(*(int32_t *)eax == 1000000); movl(eax, edx); assert(*(int32_t *)edx == 1000000); /* addition */ setl(3, eax); setl(5, edx); addl(edx, eax); assert(*(int32_t *)eax == 8); /* division */ setl(15, eax); setl(5, edx); divl(edx, eax); assert(*(int32_t *)eax == 3); /* multiplication */ setl(3, eax); setl(5, edx); mull(edx, eax); assert(*(int32_t *)eax == 15); /* subtraction */ setl(3, eax); setl(5, edx); subl(edx, eax); assert(*(int32_t *)eax == -2); }
int main() { listPointer list1 = NULL, temp = list1; for (int i = 0; i < 5; i++) { temp = add(&list1, temp, i); } listPointer l = NULL, r = list1; printf("원래 list : "); printList(list1); printf("-----------------------------------------------------------------\n"); for (int i = 1; i < 7; i++) { movr(&l, &r); printf("%d번 move right후 list l : ", i); printList(l); printf("%d번 move right후 list r : ", i); printList(r); } printf("-----------------------------------------------------------------\n"); for (int i = 1; i < 7; i++) { movl(&l, &r); printf("%d번 move left후 list l : ", i); printList(l); printf("%d번 move keft후 list r : ", i); printList(r); } return 0; }
void pushl(const char *src) { if ((esp - &ss[0]) + 4 >= scap) { fprintf(stderr, "Stack overflow!\n"); exit(EXIT_FAILURE); } movl(src, esp); esp += 4; }
void popl(char *dest) { if ((esp - &ss[0]) - 4 < 0) { fprintf(stderr, "Stack underflow!\n"); exit(EXIT_FAILURE); } esp -= 4; movl(esp, dest); }
/** * Method entry for static native methods: * int java.util.zip.CRC32.update(int crc, int b) */ address TemplateInterpreterGenerator::generate_CRC32_update_entry() { if (UseCRC32Intrinsics) { address entry = __ pc(); // rbx,: Method* // r13: senderSP must preserved for slow path, set SP to it on fast path // c_rarg0: scratch (rdi on non-Win64, rcx on Win64) // c_rarg1: scratch (rsi on non-Win64, rdx on Win64) Label slow_path; // If we need a safepoint check, generate full interpreter entry. ExternalAddress state(SafepointSynchronize::address_of_state()); __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), SafepointSynchronize::_not_synchronized); __ jcc(Assembler::notEqual, slow_path); // We don't generate local frame and don't align stack because // we call stub code and there is no safepoint on this path. // Load parameters const Register crc = rax; // crc const Register val = c_rarg0; // source java byte value const Register tbl = c_rarg1; // scratch // Arguments are reversed on java expression stack __ movl(val, Address(rsp, wordSize)); // byte value __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); __ notl(crc); // ~crc __ update_byte_crc32(crc, val, tbl); __ notl(crc); // ~crc // result in rax // _areturn __ pop(rdi); // get return address __ mov(rsp, r13); // set sp to sender sp __ jmp(rdi); // generate a vanilla native entry as the slow path __ bind(slow_path); __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } return NULL; }
/** * Method entry for static native methods: * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) * int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end) */ address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { if (UseCRC32CIntrinsics) { address entry = __ pc(); // Load parameters const Register crc = c_rarg0; // crc const Register buf = c_rarg1; // source java byte array address const Register len = c_rarg2; const Register off = c_rarg3; // offset const Register end = len; // Arguments are reversed on java expression stack // Calculate address of start element if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { __ movptr(buf, Address(rsp, 3 * wordSize)); // long buf __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset __ addq(buf, off); // + offset __ movl(crc, Address(rsp, 5 * wordSize)); // Initial CRC // Note on 5 * wordSize vs. 4 * wordSize: // * int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end) // 4 2,3 1 0 // end starts at SP + 8 // The Java(R) Virtual Machine Specification Java SE 7 Edition // 4.10.2.3. Values of Types long and double // "When calculating operand stack length, values of type long and double have length two." } else { __ movptr(buf, Address(rsp, 3 * wordSize)); // byte[] array __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size __ movl2ptr(off, Address(rsp, 2 * wordSize)); // offset __ addq(buf, off); // + offset __ movl(crc, Address(rsp, 4 * wordSize)); // Initial CRC } __ movl(end, Address(rsp, wordSize)); // end __ subl(end, off); // end - off __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len); // result in rax // _areturn __ pop(rdi); // get return address __ mov(rsp, r13); // set sp to sender sp __ jmp(rdi); return entry; } return NULL; }
void InterpreterRuntime::SignatureHandlerGenerator::generate( uint64_t fingerprint) { // generate code to handle arguments iterate(fingerprint); // return result handler __ movl(eax, (int)AbstractInterpreter::result_handler(method()->result_type())); // return __ ret(0); __ flush(); }
// The current scheme to accelerate access to the thread // pointer is to store the current thread in the os_exception_wrapper // and reference the current thread from stubs and compiled code // via the FS register. FS[0] contains a pointer to the structured // exception block which is actually a stack address. The first time // we call the os exception wrapper, we calculate and store the // offset from this exception block and use that offset here. // // The last mechanism we used was problematic in that the // the offset we had hard coded in the VM kept changing as Microsoft // evolved the OS. // // Warning: This mechanism assumes that we only attempt to get the // thread when we are nested below a call wrapper. // // movl reg, fs:[0] Get exeception pointer // movl reg, [reg + thread_ptr_offset] Load thread // void MacroAssembler::get_thread(Register thread) { // can't use ExternalAddress because it can't take NULL AddressLiteral null(0, relocInfo::none); prefix(FS_segment); movptr(thread, null); assert(ThreadLocalStorage::get_thread_ptr_offset() != 0, "Thread Pointer Offset has not been initialized"); movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset())); }
void MacroAssembler::get_thread(Register thread) { movl(thread, rsp); shrl(thread, PAGE_SHIFT); ExternalAddress tls_base((address)ThreadLocalStorage::sp_map_addr()); Address index(noreg, thread, Address::times_4); ArrayAddress tls(tls_base, index); movptr(thread, tls); }
void InterpreterStubs::generate_interpreter_rethrow_exception() { comment_section("Interpreter rethrow exception"); comment("Register eax holds the exception; Interpreter state is not in registers"); entry("interpreter_rethrow_exception"); comment("Restore bytecode and locals pointers"); movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset()))); movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset()))); comment("Mark the bytecode pointer as being inside an exception"); addl(esi, Constant(JavaFrame::exception_frame_flag)); comment("Clear the expression stack"); movl(esp, Address(ebp, Constant(JavaFrame::stack_bottom_pointer_offset()))); comment("Push the exception on the expression stack"); push_obj(eax); comment("Get exception handler bci for exception"); interpreter_call_vm(Constant("exception_handler_bci_for_exception"), T_INT); comment("Check if we got a bci - otherwise unwind the activation"); cmpl(eax, Constant(-1)); jcc(equal, Constant("interpreter_unwind_activation")); #if ENABLE_JAVA_DEBUGGER Label skip; cmpb(Address(Constant("_debugger_active")), Constant(0)); jcc(equal, Constant(skip)); movl(edx, eax); interpreter_call_vm(Constant("handle_caught_exception"), T_VOID); comment("Re-get exception handler bci for exception"); interpreter_call_vm(Constant("exception_handler_bci_for_exception"), T_INT); bind(skip); #endif comment("Convert the bytecode index into a bytecode pointer"); movl(ecx, Address(ebp, Constant(JavaFrame::method_offset()))); leal(esi, Address(ecx, eax, times_1, Constant(Method::base_offset()))); // Dispatch to the exception handler. dispatch_next(); entry_end(); // interpreter_rethrow_exception }
/** * Method entry for static native methods: * int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end) * int java.util.zip.CRC32C.updateByteBuffer(int crc, long address, int off, int end) */ address InterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { if (UseCRC32CIntrinsics) { address entry = __ pc(); // Load parameters const Register crc = rax; // crc const Register buf = rcx; // source java byte array address const Register len = rdx; // length const Register end = len; // value x86_32 // interp. arg ptr ESP + 4 // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int end) // 3 2 1 0 // int java.util.zip.CRC32.updateByteBuffer(int crc, long address, int off, int end) // 4 2,3 1 0 // Arguments are reversed on java expression stack __ movl(end, Address(rsp, 4 + 0)); // end __ subl(len, Address(rsp, 4 + 1 * wordSize)); // end - offset == length // Calculate address of start element if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long address __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset __ movl(crc, Address(rsp, 4 + 4 * wordSize)); // Initial CRC } else { __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset __ movl(crc, Address(rsp, 4 + 3 * wordSize)); // Initial CRC } __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32C()), crc, buf, len); // result in rax // _areturn __ pop(rdi); // get return address __ mov(rsp, rsi); // set sp to sender sp __ jmp(rdi); return entry; } return NULL; }
void InterpreterStubs::generate_interpreter_call_vm_dispatch() { comment_section("Interpreter call VM - and dispatch to the bytecode returned by the VM upon termination"); entry("interpreter_call_vm_dispatch"); comment("Save bytecode pointer"); movl(Address(ebp, Constant(JavaFrame::bcp_store_offset())), esi); comment("Call the shared call vm and disregard any return value"); call_shared_call_vm(T_INT); comment("Restore bytecode pointer"); movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset()))); comment("Restore locals pointer"); movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset()))); comment("Dispatch to next byte code"); jmp(Address(no_reg, eax, times_4, Constant("interpreter_dispatch_table"))); entry_end(); // interpreter_call_vm_dispatch }
void InterpreterRuntime::SignatureHandlerGenerator::pass_float() { const Address src(from(), Interpreter::local_offset_in_bytes(offset())); #ifdef _WIN64 if (_num_args < Argument::n_float_register_parameters_c-1) { __ movflt(as_XMMRegister(++_num_args), src); } else { __ movl(rax, src); __ movl(Address(to(), _stack_offset), rax); _stack_offset += wordSize; } #else if (_num_fp_args < Argument::n_float_register_parameters_c) { __ movflt(as_XMMRegister(_num_fp_args++), src); } else { __ movl(rax, src); __ movl(Address(to(), _stack_offset), rax); _stack_offset += wordSize; } #endif }
void InterpreterStubs::generate_interpreter_call_vm_redo() { #if ENABLE_JAVA_DEBUGGER Label check_breakpoint, no_breakpoint; #endif comment_section("Interpreter call VM - and repeat current bytecode upon termination"); entry("interpreter_call_vm_redo"); comment("Save bytecode pointer"); movl(Address(ebp, Constant(JavaFrame::bcp_store_offset())), esi); comment("Call the shared call vm and disregard any return value"); call_shared_call_vm(T_VOID); comment("Restore bytecode pointer"); movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset()))); comment("Restore locals pointer"); movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset()))); #if ENABLE_JAVA_DEBUGGER comment("Check to see if we are connected to a debugger"); cmpb(Address(Constant("_debugger_active")), Constant(0)); jcc(not_zero, Constant(check_breakpoint)); bind(no_breakpoint); comment("Not debugging, so just dispatch"); dispatch_next(0); bind(check_breakpoint); comment("We are debugging, so let's see if we just replaced a breakpoint opcode"); cmpb(Address(esi), Constant(Bytecodes::_breakpoint)); jcc(not_zero, Constant(no_breakpoint)); comment("There is a breakpoint in the code, so that means that eax has the correct opcode"); comment("So just jmp directly without using esi"); andl(eax, Constant(0xFF)); movl(ebx, eax); jmp(Address(no_reg, ebx, times_4, Constant("interpreter_dispatch_table"))); #else dispatch_next(0); #endif entry_end(); // interpreter_call_vm_redo }
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) { __ emit_fd(); // generate code to handle arguments iterate(fingerprint); pass_prev(-BytesPerWord); // return result handler __ movl(GR_RET, (uint64_t)AbstractInterpreter::result_handler(method()->result_type())); __ ret(); __ flush(); }
void InterpreterStubs::generate_current_thread_to_primordial() { entry("current_thread_to_primordial"); // We're never going to return to this thread, so it doesn't matter if // it doesn't look like a stopped Java thread anymore. // pushl(ebp); // get_thread(ecx); // movl(Address(ecx, Constant(Thread::stack_pointer_offset())), esp); movl(esp, Address(Constant("_primordial_sp"))); popl(ebp); popal(); ret(); entry_end(); // current_thread_to_primordial }