void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, oop cached_oop, address entry_point) { ResourceMark rm; CodeBuffer code(code_begin, ic_stub_code_size()); MacroAssembler* masm = new MacroAssembler(&code); // note: even though the code contains an embedded oop, we do not need reloc info // because // (1) the oop is old (i.e., doesn't matter for scavenges) // (2) these ICStubs are removed *before* a GC happens, so the roots disappear assert(cached_oop == NULL || cached_oop->is_perm(), "must be perm oop"); masm->lea(rax, OopAddress((address) cached_oop)); masm->jump(ExternalAddress(entry_point)); }
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) { ResourceMark rm; CodeBuffer code(code_begin, ic_stub_code_size()); MacroAssembler* masm = new MacroAssembler(&code); InlinedAddress oop_literal((address) cached_value); __ ldr_literal(Ricklass, oop_literal); // FIXME: OK to remove reloc here? __ patchable_jump(entry_point, relocInfo::runtime_call_type, Rtemp); __ bind_literal(oop_literal); __ flush(); }
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) { ResourceMark rm; CodeBuffer code(code_begin, ic_stub_code_size()); MacroAssembler* masm = new MacroAssembler(&code); // note: even though the code contains an embedded metadata, we do not need reloc info // because // (1) the metadata is old (i.e., doesn't matter for scavenges) // (2) these ICStubs are removed *before* a GC happens, so the roots disappear AddressLiteral cached_value_addrlit((address)cached_value, relocInfo::none); // Force the set to generate the fixed sequence so next_instruction_address works masm->patchable_set(cached_value_addrlit, G5_inline_cache_reg); assert(G3_scratch != G5_method, "Do not clobber the method oop in the transition stub"); assert(G3_scratch != G5_inline_cache_reg, "Do not clobber the inline cache register in the transition stub"); AddressLiteral entry(entry_point); masm->JUMP(entry, G3_scratch, 0); masm->delayed()->nop(); masm->flush(); }
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, oop cached_oop, address entry_point) { ResourceMark rm; CodeBuffer* code = new CodeBuffer(code_begin, ic_stub_code_size()); MacroAssembler* masm = new MacroAssembler(code); // note: even though the code contains an embedded oop, we do not need reloc info // because // (1) the oop is old (i.e., doesn't matter for scavenges) // (2) these ICStubs are removed *before* a GC happens, so the roots disappear assert(cached_oop == NULL || cached_oop->is_perm(), "must be old oop"); Address cached_oop_addr(G5_inline_cache_reg, address(cached_oop)); // Force the sethi to generate the fixed sequence so next_instruction_address works masm->sethi(cached_oop_addr, true /* ForceRelocatable */ ); masm->add(cached_oop_addr, G5_inline_cache_reg); assert(G3_scratch != G5_method, "Do not clobber the method oop in the transition stub"); assert(G3_scratch != G5_inline_cache_reg, "Do not clobber the inline cache register in the transition stub"); // masm->jump_to(Address(G3_scratch, entry_point)); masm->JUMP(Address(G3_scratch, entry_point), 0); masm->delayed()->nop(); masm->flush(); }
ICStub* InlineCacheBuffer::new_ic_stub() { while (true) { ICStub* ic_stub = (ICStub*)buffer()->request_committed(ic_stub_code_size()); if (ic_stub != NULL) { return ic_stub; } // we ran out of inline cache buffer space; must enter safepoint. // We do this by forcing a safepoint EXCEPTION_MARK; VM_ForceSafepoint vfs; VMThread::execute(&vfs); // We could potential get an async. exception at this point. // In that case we will rethrow it to ourselvs. if (HAS_PENDING_EXCEPTION) { oop exception = PENDING_EXCEPTION; CLEAR_PENDING_EXCEPTION; Thread::send_async_exception(JavaThread::current()->threadObj(), exception); } } ShouldNotReachHere(); return NULL; }
void InlineCacheBuffer::init_next_stub() { ICStub* ic_stub = (ICStub*)buffer()->request_committed (ic_stub_code_size()); assert (ic_stub != NULL, "no room for a single stub"); set_next_stub(ic_stub); }