address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { // rmethod: Method* // r13: sender sp // esp: args if (!InlineIntrinsics) return NULL; // Generate a vanilla entry // These don't need a safepoint check because they aren't virtually // callable. We won't enter these intrinsics from compiled code. // If in the future we added an intrinsic which was virtually callable // we'd have to worry about how to safepoint so that this code is used. // mathematical functions inlined by compiler // (interpreter must provide identical implementation // in order to avoid monotonicity bugs when switching // from interpreter to compiler in the middle of some // computation) // // stack: // [ arg ] <-- esp // [ arg ] // retaddr in lr address entry_point = NULL; Register continuation = lr; switch (kind) { case Interpreter::java_lang_math_abs: entry_point = __ pc(); __ ldrd(v0, Address(esp)); __ fabsd(v0, v0); __ mov(sp, r13); // Restore caller's SP break; case Interpreter::java_lang_math_sqrt: entry_point = __ pc(); __ ldrd(v0, Address(esp)); __ fsqrtd(v0, v0); __ mov(sp, r13); break; case Interpreter::java_lang_math_sin : case Interpreter::java_lang_math_cos : case Interpreter::java_lang_math_tan : case Interpreter::java_lang_math_log : case Interpreter::java_lang_math_log10 : case Interpreter::java_lang_math_exp : entry_point = __ pc(); __ ldrd(v0, Address(esp)); __ mov(sp, r13); __ mov(r19, lr); continuation = r19; // The first callee-saved register generate_transcendental_entry(kind, 1); break; case Interpreter::java_lang_math_pow : entry_point = __ pc(); __ mov(r19, lr); continuation = r19; __ ldrd(v0, Address(esp, 2 * Interpreter::stackElementSize)); __ ldrd(v1, Address(esp)); __ mov(sp, r13); generate_transcendental_entry(kind, 2); break; default: ; } if (entry_point) { __ br(continuation); } return entry_point; }
address AbstractInterpreterGenerator::generate_slow_signature_handler() { address entry = __ pc(); __ andr(esp, esp, -16); __ mov(c_rarg3, esp); // rmethod // rlocals // c_rarg3: first stack arg - wordSize // adjust sp __ sub(sp, c_rarg3, 18 * wordSize); __ str(lr, Address(__ pre(sp, -2 * wordSize))); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rmethod, rlocals, c_rarg3); // r0: result handler // Stack layout: // rsp: return address <- sp // 1 garbage // 8 integer args (if static first is unused) // 1 float/double identifiers // 8 double args // stack args <- esp // garbage // expression stack bottom // bcp (NULL) // ... // Restore LR __ ldr(lr, Address(__ post(sp, 2 * wordSize))); // Do FP first so we can use c_rarg3 as temp __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers for (int i = 0; i < Argument::n_float_register_parameters_c; i++) { const FloatRegister r = as_FloatRegister(i); Label d, done; __ tbnz(c_rarg3, i, d); __ ldrs(r, Address(sp, (10 + i) * wordSize)); __ b(done); __ bind(d); __ ldrd(r, Address(sp, (10 + i) * wordSize)); __ bind(done); } // c_rarg0 contains the result from the call of // InterpreterRuntime::slow_signature_handler so we don't touch it // here. It will be loaded with the JNIEnv* later. __ ldr(c_rarg1, Address(sp, 1 * wordSize)); for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) { Register rm = as_Register(i), rn = as_Register(i+1); __ ldp(rm, rn, Address(sp, i * wordSize)); } __ add(sp, sp, 18 * wordSize); __ ret(lr); return entry; }
void MacroAssembler::atomic_cas64(Register memval_lo, Register memval_hi, Register result, Register oldval_lo, Register oldval_hi, Register newval_lo, Register newval_hi, Register base, int offset) { if (VM_Version::supports_ldrexd()) { Label loop; assert_different_registers(memval_lo, memval_hi, result, oldval_lo, oldval_hi, newval_lo, newval_hi, base); assert(memval_hi == memval_lo + 1 && memval_lo < R9, "cmpxchg_long: illegal registers"); assert(oldval_hi == oldval_lo + 1 && oldval_lo < R9, "cmpxchg_long: illegal registers"); assert(newval_hi == newval_lo + 1 && newval_lo < R9, "cmpxchg_long: illegal registers"); assert(result != R10, "cmpxchg_long: illegal registers"); assert(base != R10, "cmpxchg_long: illegal registers"); mov(result, 0); bind(loop); ldrexd(memval_lo, Address(base, offset)); cmp(memval_lo, oldval_lo); cmp(memval_hi, oldval_hi, eq); strexd(result, newval_lo, Address(base, offset), eq); rsbs(result, result, 1, eq); b(loop, eq); } else if (VM_Version::supports_kuser_cmpxchg64()) { // On armv5 platforms we must use the Linux kernel helper // function for atomic cas64 operations since ldrexd/strexd is // not supported. // // This is a special routine at a fixed address 0xffff0f60 // // input: // r0 = (long long *)oldval, r1 = (long long *)newval, // r2 = ptr, lr = return adress // output: // r0 = 0 carry set on success // r0 != 0 carry clear on failure // // r3, and flags are clobbered // Label done; Label loop; if (result != R12) { push(R12); } push(RegisterSet(R10) | RegisterSet(LR)); mov(R10, SP); // Save SP bic(SP, SP, StackAlignmentInBytes - 1); // align stack push(RegisterSet(oldval_lo, oldval_hi)); push(RegisterSet(newval_lo, newval_hi)); if ((offset != 0) || (base != R12)) { add(R12, base, offset); } push(RegisterSet(R0, R3)); bind(loop); ldrd(memval_lo, Address(R12)); //current ldrd(oldval_lo, Address(SP, 24)); cmp(memval_lo, oldval_lo); cmp(memval_hi, oldval_hi, eq); pop(RegisterSet(R0, R3), ne); mov(result, 0, ne); b(done, ne); // Setup for kernel call mov(R2, R12); add(R0, SP, 24); // R0 == &oldval_lo add(R1, SP, 16); // R1 == &newval_lo mvn(R3, 0xf000); // call kernel helper at 0xffff0f60 mov(LR, PC); sub(PC, R3, 0x9f); b(loop, cc); // if Carry clear then oldval != current // try again. Otherwise, return oldval // Here on success pop(RegisterSet(R0, R3)); mov(result, 1); ldrd(memval_lo, Address(SP, 8)); bind(done); pop(RegisterSet(newval_lo, newval_hi)); pop(RegisterSet(oldval_lo, oldval_hi)); mov(SP, R10); // restore SP pop(RegisterSet(R10) | RegisterSet(LR)); if (result != R12) { pop(R12); } } else { stop("Atomic cmpxchg64 unsupported on this platform"); } }