//---------------------------------------------------------------------------------------------------- // Support for jint Atomic::add(jint inc, volatile jint* dest). // // Arguments: // // inc - GR_I0 (e.g., +1 or -1) // dest - GR_I1 // // Results: // // GR_RET - the new value stored in dest // // address generate_atomic_add() { StubCodeMark mark(this, "StubRoutines", "atomic_add"); const Register inc = GR_I0; const Register dest = GR_I1; address start = __ emit_fd(); __ mf(); // increment or decrement __ cmp4(PR6, PR7, 1, inc, Assembler::equal); __ fetchadd4(PR6, GR_RET, dest, 1, Assembler::acquire); __ fetchadd4(PR7, GR_RET, dest, -1, Assembler::acquire); // GR_RET contains result of the fetch, not the add __ sxt4(GR_RET, GR_RET); __ adds(PR6, GR_RET, 1, GR_RET); __ adds(PR7, GR_RET, -1, GR_RET); __ ret(); return start; }
// Support for jdouble StubRoutine::ia64::ldffill()( address fresult ) // Arguments : // ret : FR_RET, returned // fresult : I0, argument // address generate_ldffill() { StubCodeMark mark(this, "StubRoutines", "ldffill"); address start = __ emit_fd(); __ ldffill(FR_RET, GR_I0); __ ret(); return start; }
//------------------------------------------------------------------------------------------------------------------------ // Flush the register stack. // address generate_get_backing_store_pointer() { StubCodeMark mark(this, "StubRoutines", "get_backing_store_pointer"); address start = __ emit_fd(); __ mov(GR_RET, AR_BSP); __ ret(); return start; }
//---------------------------------------------------------------------------------------------------- // Support for void OrderAccess::acquire(). Windows only until compiler supports inline asm. // address generate_acquire() { StubCodeMark mark(this, "StubRoutines", "acquire"); address start = __ emit_fd(); // Issue a dummy ld8.acq __ ld8(GR31, SP, Assembler::acquired); __ ret(); return start; }
//---------------------------------------------------------------------------------------------------- // Support for void OrderAccess::fence(). // address generate_fence() { StubCodeMark mark(this, "StubRoutines", "fence"); address start = __ emit_fd(); // severe overkill __ mf(); __ ret(); return start; }
void InterpreterRuntime::SignatureHandlerGenerator::generate(uint64_t fingerprint) { __ emit_fd(); // generate code to handle arguments iterate(fingerprint); pass_prev(-BytesPerWord); // return result handler __ movl(GR_RET, (uint64_t)AbstractInterpreter::result_handler(method()->result_type())); __ ret(); __ flush(); }
//---------------------------------------------------------------------------------------------------- // Support for intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest). // // Arguments: // // exchange_value - GR_I0 // dest - GR_I1 // // Results: // // GR_RET - the value previously stored in dest // address generate_atomic_xchg_ptr() { StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr"); const Register exchange_value = GR_I0; const Register dest = GR_I1; address start = __ emit_fd(); __ mf(); __ xchg8(GR_RET, dest, exchange_value); __ ret(); return start; }
// Support for intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) // // Arguments: // // exchange_value - GR_I0 // dest - GR_I1 // compare_value - GR_I2 // // Results: // // GR_RET - the value previously stored in dest // address generate_atomic_cmpxchg_ptr() { StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_ptr"); const Register exchange_value = GR_I0; const Register dest = GR_I1; const Register compare_value = GR_I2; address start = __ emit_fd(); __ mf(); __ mov(AR_CCV, compare_value); __ cmpxchg8(GR_RET, dest, exchange_value, Assembler::acquire); __ ret(); return start; }
//------------------------------------------------------------------------------------------------------------------------ // Flush the register stack. // address generate_flush_register_stack() { StubCodeMark mark(this, "StubRoutines", "flush_register_stack"); address start = __ emit_fd(); const Register orig_RSC = GR2_SCRATCH; const Register mod_RSC = GR3_SCRATCH; __ mov(orig_RSC, AR_RSC); __ movl(mod_RSC, CONST64(0xFFFFFFFFC000FFFC)); // mask tear point to zero, rse to lazy __ flushrs(); __ and3(mod_RSC, mod_RSC, orig_RSC); __ mov(AR_RSC, mod_RSC); __ loadrs(); // Invalidate lower frames __ mov(AR_RSC, orig_RSC); // restore tear point to original __ ret(); return start; }
inline address MacroAssembler::function_entry() { return emit_fd(); }
//------------------------------------------------------------------------------------------------------------------------ // Call stubs are used to call Java from C // // GR_I0 - call wrapper address : address // GR_I1 - result : intptr_t* // GR_I2 - result type : BasicType // GR_I3 - method : methodOop // GR_I4 - interpreter entry point : address // GR_I5 - parameter block : intptr_t* // GR_I6 - parameter count in words : int // GR_I7 - thread : Thread* // address generate_call_stub(address& return_address) { StubCodeMark mark(this, "StubRoutines", "call_stub"); const Register result = GR_I1; const Register type = GR_I2; const Register method = GR_I3; const Register entry_ptr = GR_I4; const Register parms = GR_I5; const Register parm_count = GR_I6; const Register thread = GR_I7; const Register parm_size = GR31_SCRATCH; const Register entry = GR30_SCRATCH; const Register arg = GR29_SCRATCH; const Register out_tos = GR49; // Equivalent of GR_Otos const Register out_parms = GR50; // Equivalent of GR_Olocals (unused) const BranchRegister entry_br = BR6_SCRATCH; const PredicateRegister no_args = PR6_SCRATCH; address start = __ emit_fd(); // Must allocate 8 output registers in case we go thru an i2c // and the callee needs 8 input registers __ alloc(GR_Lsave_PFS, 8, 9, 8, 0); // save AR_PFS __ sxt4(parm_count, parm_count); // # of parms __ mov(GR_Lsave_SP, SP); // save caller's SP __ mov(GR_entry_frame_GR5, GR5_poll_page_addr); __ mov(GR_entry_frame_GR6, GR6_caller_BSP); __ mov(GR_entry_frame_GR7, GR7_reg_stack_limit); // We can not tolerate an eager RSE cpu. Itanium-1 & 2 do not support // this feature but we turn it off anyway const Register RSC = GR2_SCRATCH; __ mov(RSC, AR_RSC); __ and3(RSC, -4, RSC); // Turn off two low bits __ mov(AR_RSC, RSC); // enforced lazy mode __ shladd(parm_size, parm_count, Interpreter::logStackElementSize(), GR0); // size of stack space for the parms __ mov(GR_Lsave_RP, RP); // save return address __ add(parm_size, parm_size, 15); // round up to multiple of 16 bytes. we use // caller's 16-byte scratch area for params, // so no need to add 16 to the current frame size. __ mov(GR_Lsave_LC, AR_LC); // save AR_LC __ add(out_parms, SP, Interpreter::stackElementSize()); // caller's SP+8 is 1st parm addr == target method locals addr __ and3(parm_size, parm_size, -16); __ cmp4(PR0, no_args, 0, parm_count, Assembler::less); // any parms? __ mov(GR_entry_frame_GR4, GR4_thread); // save GR4_thread: it's a preserved register __ sub(SP, SP, parm_size); // allocate the space for args + scratch __ mov(entry_br, entry_ptr); __ mov(GR27_method, method); // load method __ mov(GR4_thread, thread); // load thread if (TaggedStackInterpreter) __ shl(parm_count, parm_count, 1); // 2x tags __ sub(parm_count, parm_count, 1); // cloop counts down to zero // Initialize the register and memory stack limits for stack checking in compiled code __ add(GR7_reg_stack_limit, thread_(register_stack_limit)); __ mov(GR6_caller_BSP, AR_BSP); // load register SP __ movl(GR5_poll_page_addr, (intptr_t) os::get_polling_page() ); __ ld8(GR7_reg_stack_limit, GR7_reg_stack_limit); // load register stack limit Label exit; __ mov(AR_LC, parm_count); __ mov(out_tos, out_parms); // out_tos = &out_parms[0] __ br(no_args, exit, Assembler::dpnt); // Reverse argument list and set up sender tos Label copy_word; __ bind(copy_word); __ ld8(arg, parms, BytesPerWord); // load *parms++ __ st8(out_tos, arg, -BytesPerWord); // store *out_tos-- __ cloop(copy_word, Assembler::sptk, Assembler::few); // Bias stack for tags. if (TaggedStackInterpreter) __ st8(out_tos, GR0, -BytesPerWord); __ bind(exit); __ mov(GR_entry_frame_TOS, out_tos); // so entry_frame_argument_at can find TOS // call interpreter frame manager // Remember the senderSP so we interpreter can pop c2i arguments off of the stack // when called via a c2i. __ mov(GR28_sender_SP, SP); __ call(entry_br); return_address = __ pc(); // Store result depending on type. Everything that is not // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT. const PredicateRegister is_obj = PR6_SCRATCH; const PredicateRegister is_flt = PR7_SCRATCH; const PredicateRegister is_dbl = PR8_SCRATCH; const PredicateRegister is_lng = PR9_SCRATCH; __ cmp4(is_obj, PR0, T_OBJECT, type, Assembler::equal); __ cmp4(is_flt, PR0, T_FLOAT, type, Assembler::equal); __ st4( result, GR_RET); __ st8( is_obj, result, GR_RET); __ stfs(is_flt, result, FR_RET); __ cmp4(is_dbl, PR0, T_DOUBLE, type, Assembler::equal); __ stfd(is_dbl, result, FR_RET); __ cmp4(is_lng, PR0, T_LONG, type, Assembler::equal); __ mov(RP, GR_Lsave_RP); __ st8( is_lng, result, GR_RET); __ mov(GR4_thread, GR_entry_frame_GR4); __ mov(GR6_caller_BSP, GR_entry_frame_GR6); __ mov(GR7_reg_stack_limit, GR_entry_frame_GR7); __ mov(GR5_poll_page_addr, GR_entry_frame_GR5); __ mov(AR_PFS, GR_Lsave_PFS); __ mov(AR_LC, GR_Lsave_LC); __ mov(SP, GR_Lsave_SP); __ ret(); return start; }