void generate_all() { // Generates all stubs and initializes the entry points // These entry points require SharedInfo::stack0 to be set up in // non-core builds and need to be relocatable, so they each // fabricate a RuntimeStub internally. StubRoutines::_throw_AbstractMethodError_entry = ShouldNotCallThisStub(); StubRoutines::_throw_NullPointerException_at_call_entry = ShouldNotCallThisStub(); StubRoutines::_throw_StackOverflowError_entry = ShouldNotCallThisStub(); // support for verify_oop (must happen after universe_init) StubRoutines::_verify_oop_subroutine_entry = ShouldNotCallThisStub(); // arraycopy stubs used by compilers generate_arraycopy_stubs(); // Safefetch stubs. pthread_key_create(&g_jmpbuf_key, NULL); StubRoutines::_safefetch32_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetch32); StubRoutines::_safefetch32_fault_pc = NULL; StubRoutines::_safefetch32_continuation_pc = NULL; StubRoutines::_safefetchN_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetchN); StubRoutines::_safefetchN_fault_pc = NULL; StubRoutines::_safefetchN_continuation_pc = NULL; }
static void save_signal(int idx, int sig) { struct sigaction sa; sigaction(sig, NULL, &sa); resettedSigflags[idx] = sa.sa_flags; resettedSighandler[idx] = (sa.sa_flags & SA_SIGINFO) ? CAST_FROM_FN_PTR(address, sa.sa_sigaction) : CAST_FROM_FN_PTR(address, sa.sa_handler); }
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { switch (x->id()) { case vmIntrinsics::_dabs: case vmIntrinsics::_dsqrt: { assert(x->number_of_arguments() == 1, "wrong type"); LIRItem value(x->argument_at(0), this); value.load_item(); LIR_Opr dst = rlock_result(x); switch (x->id()) { case vmIntrinsics::_dsqrt: { __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); break; } case vmIntrinsics::_dabs: { __ abs(value.result(), dst, LIR_OprFact::illegalOpr); break; } } break; } case vmIntrinsics::_dlog10: // fall through case vmIntrinsics::_dlog: // fall through case vmIntrinsics::_dsin: // fall through case vmIntrinsics::_dtan: // fall through case vmIntrinsics::_dcos: { assert(x->number_of_arguments() == 1, "wrong type"); address runtime_entry = NULL; switch (x->id()) { case vmIntrinsics::_dsin: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break; case vmIntrinsics::_dcos: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break; case vmIntrinsics::_dtan: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break; case vmIntrinsics::_dlog: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); break; case vmIntrinsics::_dlog10: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); break; default: ShouldNotReachHere(); } LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL); set_result(x, result); } } }
AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters( MacroAssembler *masm, int total_args_passed, int comp_args_on_stack, const BasicType *sig_bt, const VMRegPair *regs, AdapterFingerPrint *fingerprint) { return AdapterHandlerLibrary::new_entry( fingerprint, CAST_FROM_FN_PTR(address,zero_null_code_stub), CAST_FROM_FN_PTR(address,zero_null_code_stub), CAST_FROM_FN_PTR(address,zero_null_code_stub)); }
// for _ladd, _lmul, _lsub, _ldiv, _lrem void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { switch (x->op()) { case Bytecodes::_lrem: case Bytecodes::_lmul: case Bytecodes::_ldiv: { if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { LIRItem right(x->y(), this); right.load_item(); CodeEmitInfo* info = state_for(x); LIR_Opr item = right.result(); assert(item->is_register(), "must be"); __ cmp(lir_cond_equal, item, LIR_OprFact::longConst(0)); __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); } address entry; switch (x->op()) { case Bytecodes::_lrem: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); break; // check if dividend is 0 is done elsewhere case Bytecodes::_ldiv: entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); break; // check if dividend is 0 is done elsewhere case Bytecodes::_lmul: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); break; default: ShouldNotReachHere(); } // order of arguments to runtime call is reversed. LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL); set_result(x, result); break; } case Bytecodes::_ladd: case Bytecodes::_lsub: { LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_item(); rlock_result(x); arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); break; } default: ShouldNotReachHere(); } }
/** * Method entry for static native methods: * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) */ address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { if (UseCRC32Intrinsics) { address entry = __ pc(); // rbx,: Method* // rsi: senderSP must preserved for slow path, set SP to it on fast path // rdx: scratch // rdi: scratch Label slow_path; // If we need a safepoint check, generate full interpreter entry. ExternalAddress state(SafepointSynchronize::address_of_state()); __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), SafepointSynchronize::_not_synchronized); __ jcc(Assembler::notEqual, slow_path); // We don't generate local frame and don't align stack because // we call stub code and there is no safepoint on this path. // Load parameters const Register crc = rax; // crc const Register buf = rdx; // source java byte array address const Register len = rdi; // length // value x86_32 // interp. arg ptr ESP + 4 // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) // 3 2 1 0 // int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) // 4 2,3 1 0 // Arguments are reversed on java expression stack __ movl(len, Address(rsp, 4 + 0)); // Length // Calculate address of start element if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long buf __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset __ movl(crc, Address(rsp, 4 + 4 * wordSize)); // Initial CRC } else { __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset __ movl(crc, Address(rsp, 4 + 3 * wordSize)); // Initial CRC } __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); // result in rax // _areturn __ pop(rdi); // get return address __ mov(rsp, rsi); // set sp to sender sp __ jmp(rdi); // generate a vanilla native entry as the slow path __ bind(slow_path); __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } return NULL; }
void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { if (!TraceMethodHandles) return; BLOCK_COMMENT("trace_method_handle {"); int nbytes_save = 10 * 8; // 10 volatile gprs __ save_LR_CR(R0); __ mr(R0, R1_SP); // saved_sp assert(Assembler::is_simm(-nbytes_save, 16), "Overwriting R0"); // Push_frame_reg_args only uses R0 if nbytes_save is wider than 16 bit. __ push_frame_reg_args(nbytes_save, R0); __ save_volatile_gprs(R1_SP, frame::abi_reg_args_size); // Except R0. __ load_const(R3_ARG1, (address)adaptername); __ mr(R4_ARG2, R23_method_handle); __ mr(R5_ARG3, R0); // saved_sp __ mr(R6_ARG4, R1_SP); __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub)); __ restore_volatile_gprs(R1_SP, 112); // Except R0. __ pop_frame(); __ restore_LR_CR(R0); BLOCK_COMMENT("} trace_method_handle"); }
void MacroAssembler::breakpoint(AsmCondition cond) { if (cond == al) { emit_int32(0xe7f001f0); } else { call(CAST_FROM_FN_PTR(address, os::breakpoint), relocInfo::runtime_call_type, cond); } }
//------------------------------profile_virtual_call--------------------------- void Parse::profile_virtual_call(Node* receiver) { assert(method_data_update(), "must be generating profile code"); // Skip if we aren't tracking receivers if (TypeProfileWidth < 1) return; ciMethodData* md = method()->method_data(); assert(md != NULL, "expected valid ciMethodData"); ciProfileData* data = md->bci_to_data(bci()); assert(data->is_VirtualCallData(), "need VirtualCallData at call site"); ciVirtualCallData* call_data = (ciVirtualCallData*)data->as_VirtualCallData(); Node* method_data = method_data_addressing(md, call_data, in_ByteSize(0)); // The following construction of the CallLeafNode is almost identical to // make_slow_call(). However, with make_slow_call(), the merge mem // characteristics were causing incorrect anti-deps to be added. CallRuntimeNode *call = new CallLeafNode(OptoRuntime::profile_virtual_call_Type(), CAST_FROM_FN_PTR(address, OptoRuntime::profile_virtual_call_C), "profile_virtual_call_C"); set_predefined_input_for_runtime_call(call); call->set_req( TypeFunc::Parms+0, method_data ); call->set_req( TypeFunc::Parms+1, receiver ); Node* c = _gvn.transform(call); set_predefined_output_for_runtime_call(c); }
static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) { // slow call to of thr_getspecific // int thr_getspecific(thread_key_t key, void **value); // Consider using pthread_getspecific instead. __ push(0); // allocate space for return value if (thread != rax) __ push(rax); // save rax, if caller still wants it __ push(rcx); // save caller save __ push(rdx); // save caller save if (thread != rax) { __ lea(thread, Address(rsp, 3 * sizeof(int))); // address of return value } else { __ lea(thread, Address(rsp, 2 * sizeof(int))); // address of return value } __ push(thread); // and pass the address __ push(ThreadLocalStorage::thread_index()); // the key __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific))); __ increment(rsp, 2 * wordSize); __ pop(rdx); __ pop(rcx); if (thread != rax) __ pop(rax); __ pop(thread); }
void LIRGenerator::trace_block_entry(BlockBegin* block) { __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::O0_opr); LIR_OprList* args = new LIR_OprList(1); args->append(FrameMap::O0_opr); address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry); __ call_runtime_leaf(func, rlock_callee_saved(T_INT), LIR_OprFact::illegalOpr, args); }
//-----------------------------profile_receiver_type--------------------------- void Parse::profile_receiver_type(Node* receiver) { assert(method_data_update(), "must be generating profile code"); ciMethodData* md = method()->method_data(); assert(md != NULL, "expected valid ciMethodData"); ciProfileData* data = md->bci_to_data(bci()); assert(data->is_ReceiverTypeData(), "need ReceiverTypeData here"); // Skip if we aren't tracking receivers if (TypeProfileWidth < 1) { increment_md_counter_at(md, data, CounterData::count_offset()); return; } ciReceiverTypeData* rdata = (ciReceiverTypeData*)data->as_ReceiverTypeData(); Node* method_data = method_data_addressing(md, rdata, in_ByteSize(0)); // Using an adr_type of TypePtr::BOTTOM to work around anti-dep problems. // A better solution might be to use TypeRawPtr::BOTTOM with RC_NARROW_MEM. make_runtime_call(RC_LEAF, OptoRuntime::profile_receiver_type_Type(), CAST_FROM_FN_PTR(address, OptoRuntime::profile_receiver_type_C), "profile_receiver_type_C", TypePtr::BOTTOM, method_data, receiver); }
//---------------------------------------------------------------------------------------------------- // Non-destructive plausibility checks for oops // // Arguments: // GR_I0 - oop to verify // address generate_verify_oop() { StubCodeMark mark(this, "StubRoutines", "verify_oop"); address start = CAST_FROM_FN_PTR(address, ia_64_verify_oop); return start; }
address AbstractInterpreterGenerator::generate_slow_signature_handler() { address entry = __ pc(); Argument argv(0, true); // We are in the jni transition frame. Save the last_java_frame corresponding to the // outer interpreter frame // __ set_last_Java_frame(FP, noreg); // make sure the interpreter frame we've pushed has a valid return pc __ mov(O7, I7); __ mov(Lmethod, G3_scratch); __ mov(Llocals, G4_scratch); __ save_frame(0); __ mov(G2_thread, L7_thread_cache); __ add(argv.address_in_frame(), O3); __ mov(G2_thread, O0); __ mov(G3_scratch, O1); __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type); __ delayed()->mov(G4_scratch, O2); __ mov(L7_thread_cache, G2_thread); __ reset_last_Java_frame(); // load the register arguments (the C code packed them as varargs) for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) { __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register()); } __ ret(); __ delayed()-> restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler return entry; }
void InterpreterGenerator::generate_transcendental_entry(AbstractInterpreter::MethodKind kind, int fpargs) { address fn; switch (kind) { case Interpreter::java_lang_math_sin : fn = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break; case Interpreter::java_lang_math_cos : fn = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break; case Interpreter::java_lang_math_tan : fn = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break; case Interpreter::java_lang_math_log : fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); break; case Interpreter::java_lang_math_log10 : fn = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); break; case Interpreter::java_lang_math_exp : fn = CAST_FROM_FN_PTR(address, SharedRuntime::dexp); break; case Interpreter::java_lang_math_pow : fpargs = 2; fn = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); break; default: ShouldNotReachHere(); } const int gpargs = 0, rtype = 3; __ mov(rscratch1, fn); __ blrt(rscratch1, gpargs, fpargs, rtype); }
void ICache::initialize() { ResourceMark rm; // Making this stub must be FIRST use of assembler CodeBuffer* c = new CodeBuffer(address(stubCode), sizeof(stubCode)); ICacheStubGenerator g(c); flush_icache_stub = CAST_TO_FN_PTR(_flush_icache_stub_t, g.generate_icache_flush()); // The first use of flush_icache_stub must apply it to itself: ICache::invalidate_range(CAST_FROM_FN_PTR(address, flush_icache_stub), c->code_size()); }
bool CppInterpreter::contains(address pc) { #ifdef PPC return pc == CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation) || _code->contains(pc); #else Unimplemented(); #endif // PPC }
void MacroAssembler::int3() { push(rax); push(rdx); push(rcx); call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); pop(rcx); pop(rdx); pop(rax); }
// Abstract method entry // Attempt to execute abstract method. Throw exception // address InterpreterGenerator::generate_abstract_entry(void) { address entry = __ pc(); // abstract method entry // throw exception __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); // the call_VM checks for exception, so we should never return here. __ should_not_reach_here(); return entry; }
static address lookup_special_native(char* jni_name) { int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod); for (int i = 0; i < count; i++) { // NB: To ignore the jni prefix and jni postfix strstr is used matching. if (strstr(jni_name, lookup_special_native_methods[i].name) != NULL) { return CAST_FROM_FN_PTR(address, lookup_special_native_methods[i].fnPtr); } } return NULL; }
// Abstract method entry. // address InterpreterGenerator::generate_abstract_entry(void) { address entry = __ pc(); // // Registers alive // R16_thread - JavaThread* // R19_method - callee's method (method to be invoked) // R1_SP - SP prepared such that caller's outgoing args are near top // LR - return address to caller // // Stack layout at this point: // // 0 [TOP_IJAVA_FRAME_ABI] <-- R1_SP // alignment (optional) // [outgoing Java arguments] // ... // PARENT [PARENT_IJAVA_FRAME_ABI] // ... // // Can't use call_VM here because we have not set up a new // interpreter state. Make the call to the vm and make it look like // our caller set up the JavaFrameAnchor. __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R12_scratch2/*tmp*/); // Push a new C frame and save LR. __ save_LR_CR(R0); __ push_frame_reg_args(0, R11_scratch1); // This is not a leaf but we have a JavaFrameAnchor now and we will // check (create) exceptions afterward so this is ok. __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError), R16_thread); // Pop the C frame and restore LR. __ pop_frame(); __ restore_LR_CR(R0); // Reset JavaFrameAnchor from call_VM_leaf above. __ reset_last_Java_frame(); #ifdef CC_INTERP // Return to frame manager, it will handle the pending exception. __ blr(); #else // We don't know our caller, so jump to the general forward exception stub, // which will also pop our full frame off. Satisfy the interface of // SharedRuntime::generate_forward_exception() __ load_const_optimized(R11_scratch1, StubRoutines::forward_exception_entry(), R0); __ mtctr(R11_scratch1); __ bctr(); #endif return entry; }
/** * Method entry for static native methods: * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) */ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { if (UseCRC32Intrinsics) { address entry = __ pc(); // rbx,: Method* // r13: senderSP must preserved for slow path, set SP to it on fast path Label slow_path; // If we need a safepoint check, generate full interpreter entry. ExternalAddress state(SafepointSynchronize::address_of_state()); __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), SafepointSynchronize::_not_synchronized); __ jcc(Assembler::notEqual, slow_path); // We don't generate local frame and don't align stack because // we call stub code and there is no safepoint on this path. // Load parameters const Register crc = c_rarg0; // crc const Register buf = c_rarg1; // source java byte array address const Register len = c_rarg2; // length const Register off = len; // offset (never overlaps with 'len') // Arguments are reversed on java expression stack // Calculate address of start element if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { __ movptr(buf, Address(rsp, 3*wordSize)); // long buf __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset __ addq(buf, off); // + offset __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC } else { __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset __ addq(buf, off); // + offset __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC } // Can now load 'len' since we're finished with 'off' __ movl(len, Address(rsp, wordSize)); // Length __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); // result in rax // _areturn __ pop(rdi); // get return address __ mov(rsp, r13); // set sp to sender sp __ jmp(rdi); // generate a vanilla native entry as the slow path __ bind(slow_path); __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native)); return entry; } return NULL; }
frame os::current_frame() { intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()(); frame myframe(sp, frame::unpatchable, CAST_FROM_FN_PTR(address, os::current_frame)); if (os::is_first_C_frame(&myframe)) { // stack is not walkable return frame(NULL, frame::unpatchable, NULL); } else { return os::get_sender_for_C_frame(&myframe); } }
address AbstractInterpreterGenerator::generate_slow_signature_handler() { address entry = __ pc(); // rbx,: method // rcx: temporary // rdi: pointer to locals // rsp: end of copied parameters area __ mov(rcx, rsp); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx); __ ret(0); return entry; }
/** * 特殊的本地方法,jdk1.4之前是5个,之后是3个 */ static address lookup_special_native(char* jni_name) { int i = !JDK_Version::is_gte_jdk14x_version() ? 0 : 2; // see comment in lookup_special_native_methods int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod); for (; i < count; i++) { // NB: To ignore the jni prefix and jni postfix strstr is used matching. if (strstr(jni_name, lookup_special_native_methods[i].name) != NULL) { return CAST_FROM_FN_PTR(address, lookup_special_native_methods[i].fnPtr); } } return NULL; }
address generate_forward_exception() { StubCodeMark mark(this, "StubRoutines", "forward exception"); address start = __ pc(); // Upon entry, the sp points to the return address returning into Java // (interpreted or compiled) code; i.e., the return address becomes the // throwing pc. // // Arguments pushed before the runtime call are still on the stack but // the exception handler will reset the stack pointer -> ignore them. // A potential result in registers can be ignored as well. #ifdef ASSERT // make sure this code is only executed if there is a pending exception { Label L; __ get_thread(ecx); __ cmpl(Address(ecx, Thread::pending_exception_offset()), (int)NULL); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (1)"); __ bind(L); } #endif // compute exception handler into ebx __ movl(eax, Address(esp)); __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), eax); __ movl(ebx, eax); // setup eax & edx, remove return address & clear pending exception __ get_thread(ecx); __ popl(edx); __ movl(eax, Address(ecx, Thread::pending_exception_offset())); __ movl(Address(ecx, Thread::pending_exception_offset()), (int)NULL); #ifdef ASSERT // make sure exception is set { Label L; __ testl(eax, eax); __ jcc(Assembler::notEqual, L); __ stop("StubRoutines::forward exception: no pending exception (2)"); __ bind(L); } #endif // continue at exception handler (return address removed) // eax: exception // ebx: exception handler // edx: throwing pc __ verify_oop(eax); __ jmp(ebx); return start; }
frame os::current_frame() { intptr_t* fp = _get_previous_fp(); frame myframe((intptr_t*)os::current_stack_pointer(), (intptr_t*)fp, CAST_FROM_FN_PTR(address, os::current_frame)); if (os::is_first_C_frame(&myframe)) { // stack is not walkable return frame(); } else { return os::get_sender_for_C_frame(&myframe); } }
// for _fadd, _fmul, _fsub, _fdiv, _frem // _dadd, _dmul, _dsub, _ddiv, _drem void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { switch (x->op()) { case Bytecodes::_fadd: case Bytecodes::_fmul: case Bytecodes::_fsub: case Bytecodes::_fdiv: case Bytecodes::_dadd: case Bytecodes::_dmul: case Bytecodes::_dsub: case Bytecodes::_ddiv: { LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_item(); rlock_result(x); arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp()); } break; case Bytecodes::_frem: case Bytecodes::_drem: { address entry; switch (x->op()) { case Bytecodes::_frem: entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem); break; case Bytecodes::_drem: entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem); break; default: ShouldNotReachHere(); } LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL); set_result(x, result); } break; default: ShouldNotReachHere(); } }
//------------------------------make_dtrace_method_entry_exit ---------------- // Dtrace -- record entry or exit of a method if compiled with dtrace support void GraphKit::make_dtrace_method_entry_exit(ciMethod* method, bool is_entry) { const TypeFunc *call_type = OptoRuntime::dtrace_method_entry_exit_Type(); address call_address = is_entry ? CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry) : CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit); const char *call_name = is_entry ? "dtrace_method_entry" : "dtrace_method_exit"; // Get base of thread-local storage area Node* thread = _gvn.transform( new (C) ThreadLocalNode() ); // Get method const TypePtr* method_type = TypeMetadataPtr::make(method); Node *method_node = _gvn.transform( ConNode::make(C, method_type) ); kill_dead_locals(); // For some reason, this call reads only raw memory. const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; make_runtime_call(RC_LEAF | RC_NARROW_MEM, call_type, call_address, call_name, raw_adr_type, thread, method_node); }
//--------------------------------------------------------------------------- // The following routine generates a subroutine to throw an asynchronous // UnknownError when an unsafe access gets a fault that could not be // reasonably prevented by the programmer. (Example: SIGBUS/OBJERR.) address generate_handler_for_unsafe_access() { StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access"); address start = __ pc(); __ pushl(0); // hole for return address-to-be __ pushad(); // push registers Address next_pc(esp, RegisterImpl::number_of_registers * BytesPerWord); __ call(CAST_FROM_FN_PTR(address, handle_unsafe_access), relocInfo::runtime_call_type); __ movl(next_pc, eax); // stuff next address __ popad(); __ ret(0); // jump to next address return start; }