//---------------------------------------------------------------------------------------------------- // Support for jint Atomic::add(jint inc, volatile jint* dest). // // Arguments: // // inc - GR_I0 (e.g., +1 or -1) // dest - GR_I1 // // Results: // // GR_RET - the new value stored in dest // // address generate_atomic_add() { StubCodeMark mark(this, "StubRoutines", "atomic_add"); const Register inc = GR_I0; const Register dest = GR_I1; address start = __ emit_fd(); __ mf(); // increment or decrement __ cmp4(PR6, PR7, 1, inc, Assembler::equal); __ fetchadd4(PR6, GR_RET, dest, 1, Assembler::acquire); __ fetchadd4(PR7, GR_RET, dest, -1, Assembler::acquire); // GR_RET contains result of the fetch, not the add __ sxt4(GR_RET, GR_RET); __ adds(PR6, GR_RET, 1, GR_RET); __ adds(PR7, GR_RET, -1, GR_RET); __ ret(); return start; }
//---------------------------------------------------------------------------------------------------- // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest) // // Arguments: // // exchange_value - GR_I0 // dest - GR_I1 // // Results: // // GR_RET - the value previously stored in dest // address generate_atomic_xchg() { StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); const Register exchange_value = GR_I0; const Register dest = GR_I1; address start = __ emit_fd(); __ mf(); __ xchg4(GR_RET, dest, exchange_value); __ sxt4(GR_RET, GR_RET); __ ret(); return start; }
//---------------------------------------------------------------------------------------------------- // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) // // Arguments: // // exchange_value - GR_I0 // dest - GR_I1 // compare_value - GR_I2 // // Results: // // GR_RET - the value previously stored in dest // address generate_atomic_cmpxchg() { StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); const Register exchange_value = GR_I0; const Register dest = GR_I1; const Register compare_value = GR_I2; address start = __ emit_fd(); __ mf(); __ zxt4(compare_value, compare_value); __ mov(AR_CCV, compare_value); __ cmpxchg4(GR_RET, dest, exchange_value, Assembler::acquire); __ sxt4(GR_RET, GR_RET); __ ret(); return start; }
void InterpreterRuntime::SignatureHandlerGenerator::pass_prev(int slot_offset) { Argument jni_arg(_prev_jni_offset); Argument::Sig sig = _prev_sig; if (sig == Argument::no_sig) { return; } slot_offset += BytesPerWord; if (Argument::is_integral(sig)) { // Integral argument // Load either the output register or a very-local temp from the java stack // Bump java stack offset address if requested. const Register tmp = jni_arg.is_register() ? jni_arg.as_register() : GR2_SCRATCH; if (slot_offset == 0) { __ ld8(tmp, GR_I0); } else { __ ld8(tmp, GR_I0, -slot_offset); } if (Argument::is_4byte(sig)) { __ sxt4(tmp, tmp); } if (Argument::is_obj(sig)) { // Object, box if not null const PredicateRegister box = PR15_SCRATCH; __ cmp(box, PR0, 0, tmp, Assembler::notEqual); __ add(box, tmp, GR_I0, slot_offset); } if (!jni_arg.is_register()) { // Store into native memory parameter list __ add(GR3_SCRATCH, SP, jni_arg.jni_offset_in_frame()); __ st8(GR3_SCRATCH, tmp); } } else { // Floating point argument const FloatRegister tmp = jni_arg.is_register() ? as_FloatRegister(FR_I0->encoding() + _prev_float_reg_offset) : FR6; if (jni_arg.is_register()) { if (Argument::is_4byte(sig)) { // Single precision float if (slot_offset == 0) { __ ldfs(tmp, GR_I0); } else { __ ldfs(tmp, GR_I0, -slot_offset); } } else { // Double precision float if (slot_offset == 0) { __ ldfd(tmp, GR_I0); } else { __ ldfd(tmp, GR_I0, -slot_offset); } } } else { if (slot_offset == 0) { __ ld8(GR2_SCRATCH, GR_I0); } else { __ ld8(GR2_SCRATCH, GR_I0, -slot_offset); } __ add(GR3_SCRATCH, SP, jni_arg.jni_offset_in_frame()); __ st8(GR3_SCRATCH, GR2_SCRATCH); } } }
//------------------------------------------------------------------------------------------------------------------------ // Call stubs are used to call Java from C // // GR_I0 - call wrapper address : address // GR_I1 - result : intptr_t* // GR_I2 - result type : BasicType // GR_I3 - method : methodOop // GR_I4 - interpreter entry point : address // GR_I5 - parameter block : intptr_t* // GR_I6 - parameter count in words : int // GR_I7 - thread : Thread* // address generate_call_stub(address& return_address) { StubCodeMark mark(this, "StubRoutines", "call_stub"); const Register result = GR_I1; const Register type = GR_I2; const Register method = GR_I3; const Register entry_ptr = GR_I4; const Register parms = GR_I5; const Register parm_count = GR_I6; const Register thread = GR_I7; const Register parm_size = GR31_SCRATCH; const Register entry = GR30_SCRATCH; const Register arg = GR29_SCRATCH; const Register out_tos = GR49; // Equivalent of GR_Otos const Register out_parms = GR50; // Equivalent of GR_Olocals (unused) const BranchRegister entry_br = BR6_SCRATCH; const PredicateRegister no_args = PR6_SCRATCH; address start = __ emit_fd(); // Must allocate 8 output registers in case we go thru an i2c // and the callee needs 8 input registers __ alloc(GR_Lsave_PFS, 8, 9, 8, 0); // save AR_PFS __ sxt4(parm_count, parm_count); // # of parms __ mov(GR_Lsave_SP, SP); // save caller's SP __ mov(GR_entry_frame_GR5, GR5_poll_page_addr); __ mov(GR_entry_frame_GR6, GR6_caller_BSP); __ mov(GR_entry_frame_GR7, GR7_reg_stack_limit); // We can not tolerate an eager RSE cpu. Itanium-1 & 2 do not support // this feature but we turn it off anyway const Register RSC = GR2_SCRATCH; __ mov(RSC, AR_RSC); __ and3(RSC, -4, RSC); // Turn off two low bits __ mov(AR_RSC, RSC); // enforced lazy mode __ shladd(parm_size, parm_count, Interpreter::logStackElementSize(), GR0); // size of stack space for the parms __ mov(GR_Lsave_RP, RP); // save return address __ add(parm_size, parm_size, 15); // round up to multiple of 16 bytes. we use // caller's 16-byte scratch area for params, // so no need to add 16 to the current frame size. __ mov(GR_Lsave_LC, AR_LC); // save AR_LC __ add(out_parms, SP, Interpreter::stackElementSize()); // caller's SP+8 is 1st parm addr == target method locals addr __ and3(parm_size, parm_size, -16); __ cmp4(PR0, no_args, 0, parm_count, Assembler::less); // any parms? __ mov(GR_entry_frame_GR4, GR4_thread); // save GR4_thread: it's a preserved register __ sub(SP, SP, parm_size); // allocate the space for args + scratch __ mov(entry_br, entry_ptr); __ mov(GR27_method, method); // load method __ mov(GR4_thread, thread); // load thread if (TaggedStackInterpreter) __ shl(parm_count, parm_count, 1); // 2x tags __ sub(parm_count, parm_count, 1); // cloop counts down to zero // Initialize the register and memory stack limits for stack checking in compiled code __ add(GR7_reg_stack_limit, thread_(register_stack_limit)); __ mov(GR6_caller_BSP, AR_BSP); // load register SP __ movl(GR5_poll_page_addr, (intptr_t) os::get_polling_page() ); __ ld8(GR7_reg_stack_limit, GR7_reg_stack_limit); // load register stack limit Label exit; __ mov(AR_LC, parm_count); __ mov(out_tos, out_parms); // out_tos = &out_parms[0] __ br(no_args, exit, Assembler::dpnt); // Reverse argument list and set up sender tos Label copy_word; __ bind(copy_word); __ ld8(arg, parms, BytesPerWord); // load *parms++ __ st8(out_tos, arg, -BytesPerWord); // store *out_tos-- __ cloop(copy_word, Assembler::sptk, Assembler::few); // Bias stack for tags. if (TaggedStackInterpreter) __ st8(out_tos, GR0, -BytesPerWord); __ bind(exit); __ mov(GR_entry_frame_TOS, out_tos); // so entry_frame_argument_at can find TOS // call interpreter frame manager // Remember the senderSP so we interpreter can pop c2i arguments off of the stack // when called via a c2i. __ mov(GR28_sender_SP, SP); __ call(entry_br); return_address = __ pc(); // Store result depending on type. Everything that is not // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT. const PredicateRegister is_obj = PR6_SCRATCH; const PredicateRegister is_flt = PR7_SCRATCH; const PredicateRegister is_dbl = PR8_SCRATCH; const PredicateRegister is_lng = PR9_SCRATCH; __ cmp4(is_obj, PR0, T_OBJECT, type, Assembler::equal); __ cmp4(is_flt, PR0, T_FLOAT, type, Assembler::equal); __ st4( result, GR_RET); __ st8( is_obj, result, GR_RET); __ stfs(is_flt, result, FR_RET); __ cmp4(is_dbl, PR0, T_DOUBLE, type, Assembler::equal); __ stfd(is_dbl, result, FR_RET); __ cmp4(is_lng, PR0, T_LONG, type, Assembler::equal); __ mov(RP, GR_Lsave_RP); __ st8( is_lng, result, GR_RET); __ mov(GR4_thread, GR_entry_frame_GR4); __ mov(GR6_caller_BSP, GR_entry_frame_GR6); __ mov(GR7_reg_stack_limit, GR_entry_frame_GR7); __ mov(GR5_poll_page_addr, GR_entry_frame_GR5); __ mov(AR_PFS, GR_Lsave_PFS); __ mov(AR_LC, GR_Lsave_LC); __ mov(SP, GR_Lsave_SP); __ ret(); return start; }