void* VtableStub::operator new(size_t size, int code_size) throw() { assert(size == sizeof(VtableStub), "mismatched size"); // compute real VtableStub size (rounded to nearest word) const int real_size = round_to(code_size + sizeof(VtableStub), wordSize); // malloc them in chunks to minimize header overhead const int chunk_factor = 32; if (_chunk == NULL || _chunk + real_size > _chunk_end) { const int bytes = chunk_factor * real_size + pd_code_alignment(); // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp // If changing the name, update the other file accordingly. BufferBlob* blob = BufferBlob::create("vtable chunks", bytes); if (blob == NULL) { return NULL; } _chunk = blob->content_begin(); _chunk_end = _chunk + bytes; Forte::register_stub("vtable stub", _chunk, _chunk_end); align_chunk(); } assert(_chunk + real_size <= _chunk_end, "bad allocation"); void* res = _chunk; _chunk += real_size; align_chunk(); return res; }
void* VtableStub::operator new(size_t size, int code_size) { assert(size == sizeof(VtableStub), "mismatched size"); num_vtable_chunks++; // compute real VtableStub size (rounded to nearest word) const int real_size = round_to(code_size + sizeof(VtableStub), wordSize); // malloc them in chunks to minimize header overhead const int chunk_factor = 32; if (_chunk == NULL || _chunk + real_size > _chunk_end) { const int bytes = chunk_factor * real_size + pd_code_alignment(); BufferBlob* blob = BufferBlob::create("vtable chunks", bytes); if (blob == NULL) { vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks"); } _chunk = blob->instructions_begin(); _chunk_end = _chunk + bytes; Forte::register_stub("vtable stub", _chunk, _chunk_end); // Notify JVMTI about this stub. The event will be recorded by the enclosing // JvmtiDynamicCodeEventCollector and posted when this thread has released // all locks. if (JvmtiExport::should_post_dynamic_code_generated()) { JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end); } align_chunk(); } assert(_chunk + real_size <= _chunk_end, "bad allocation"); void* res = _chunk; _chunk += real_size; align_chunk(); return res; }
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { const char *name; switch (type) { case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; default: ShouldNotReachHere(); } ResourceMark rm; BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); address fast_entry = b->instructions_begin(); CodeBuffer cbuf(fast_entry, b->instructions_size()); MacroAssembler* masm = new MacroAssembler(&cbuf); Label label1, label2; AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr()); __ sethi (cnt_addrlit, O3); Address cnt_addr(O3, cnt_addrlit.low10()); __ ld (cnt_addr, G4); __ andcc (G4, 1, G0); __ br (Assembler::notZero, false, Assembler::pn, label1); __ delayed()->srl (O2, 2, O4); __ ld_ptr (O1, 0, O5); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { case T_FLOAT: __ ldf (FloatRegisterImpl::S, O5, O4, F0); break; case T_DOUBLE: __ ldf (FloatRegisterImpl::D, O5, O4, F0); break; default: ShouldNotReachHere(); } __ ld (cnt_addr, O5); __ cmp (O5, G4); __ br (Assembler::notEqual, false, Assembler::pn, label2); __ delayed()->mov (O7, G1); __ retl (); __ delayed()-> nop (); slowcase_entry_pclist[count++] = __ pc(); __ bind (label1); __ mov (O7, G1); address slow_case_addr; switch (type) { case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; default: ShouldNotReachHere(); } __ bind (label2); __ call (slow_case_addr, relocInfo::none); __ delayed()->mov (G1, O7); __ flush (); return fast_entry; }
StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size, Mutex* lock, const char* name) : _mutex(lock) { BufferBlob* blob = BufferBlob::create(round_to(buffer_size, 2*BytesPerWord), name); if( blob == NULL ) fatal1( "CodeCache: no room for %s", name); _stub_interface = stub_interface; _buffer_size = blob->instructions_size(); _buffer_limit = blob->instructions_size(); _stub_buffer = blob->instructions_begin(); _queue_begin = 0; _queue_end = 0; _number_of_stubs = 0; register_queue(this); }
StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size, Mutex* lock, const char* name) : _mutex(lock) { intptr_t size = round_to(buffer_size, 2*BytesPerWord); BufferBlob* blob = BufferBlob::create(name, size); if( blob == NULL) { vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", name); } _stub_interface = stub_interface; _buffer_size = blob->content_size(); _buffer_limit = blob->content_size(); _stub_buffer = blob->content_begin(); _queue_begin = 0; _queue_end = 0; _number_of_stubs = 0; register_queue(this); }
void AbstractICache::initialize() { // Making this stub must be FIRST use of assembler ResourceMark rm; BufferBlob* b = BufferBlob::create("flush_icache_stub", ICache::stub_size); CodeBuffer c(b->instructions_begin(), b->instructions_size()); ICacheStubGenerator g(&c); g.generate_icache_flush(&_flush_icache_stub); // The first use of flush_icache_stub must apply it to itself. // The StubCodeMark destructor in generate_icache_flush will // call Assembler::flush, which in turn will call invalidate_range, // which will in turn call the flush stub. Thus we don't need an // explicit call to invalidate_range here. This assumption is // checked in invalidate_range. }
void* VtableStub::operator new(size_t size, int code_size) { assert(size == sizeof(VtableStub), "mismatched size"); num_vtable_chunks++; // compute real VtableStub size (rounded to nearest word) const int real_size = round_to(code_size + sizeof(VtableStub), wordSize); // malloc them in chunks to minimize header overhead const int chunk_factor = 32; if (_chunk == NULL || _chunk + real_size > _chunk_end) { const int bytes = chunk_factor * real_size + pd_code_alignment(); BufferBlob* blob = BufferBlob::create(bytes, "vtable chunks"); if( blob == NULL ) fatal1( "CodeCache: no room for %s", "vtable chunks"); _chunk = blob->instructions_begin(); _chunk_end = _chunk + bytes; VTune::register_stub("vtable stub", _chunk, _chunk_end); align_chunk(); } assert(_chunk + real_size <= _chunk_end, "bad allocation"); void* res = _chunk; _chunk += real_size; align_chunk(); return res; }
void VM_Version::initialize() { ResourceMark rm; // Create the processor info stub BufferBlob* blob = BufferBlob::create("getPsrInfo stub", 160); if (blob == NULL) { vm_exit_during_initialization("Unable to allocate getPsrInfo stub"); } CodeBuffer* buffer = new CodeBuffer(blob->instructions_begin(), blob->instructions_size()); VM_Version_StubGenerator g(buffer); getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t, g.generate_getPsrInfo()); // Get raw processor info getPsrInfo_stub(&_cpuid_info); // We know 64-bit x86's support cmpxchg8 _supports_cx8 = true; // Prefetch settings PrefetchCopyIntervalInBytes = prefetch_copy_interval_in_bytes(); PrefetchCopyIntervalInBytes = prefetch_scan_interval_in_bytes(); PrefetchFieldsAhead = prefetch_fields_ahead(); }
static bool contains(address addr) { return (_code1 != NULL && _code1->blob_contains(addr)) || (_code2 != NULL && _code2->blob_contains(addr)) ; }
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { const char *name; switch (type) { case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; case T_BYTE: name = "jni_fast_GetByteField"; break; case T_CHAR: name = "jni_fast_GetCharField"; break; case T_SHORT: name = "jni_fast_GetShortField"; break; case T_INT: name = "jni_fast_GetIntField"; break; default: ShouldNotReachHere(); } ResourceMark rm; BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); address fast_entry = b->instructions_begin(); CodeBuffer cbuf(fast_entry, b->instructions_size()); MacroAssembler* masm = new MacroAssembler(&cbuf); Label slow; // stack layout: offset from rsp (in words): // return pc 0 // jni env 1 // obj 2 // jfieldID 3 ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); __ mov32 (rcx, counter); __ testb (rcx, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { __ mov(rax, rcx); __ andptr(rax, 1); // rax, must end up 0 __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize)); // obj, notice rax, is 0. // rdx is data dependent on rcx. } else { __ movptr (rdx, Address(rsp, 2*wordSize)); // obj } __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID __ movptr(rdx, Address(rdx, 0)); // *obj __ shrptr (rax, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { case T_BOOLEAN: __ movzbl (rax, Address(rdx, rax, Address::times_1)); break; case T_BYTE: __ movsbl (rax, Address(rdx, rax, Address::times_1)); break; case T_CHAR: __ movzwl (rax, Address(rdx, rax, Address::times_1)); break; case T_SHORT: __ movswl (rax, Address(rdx, rax, Address::times_1)); break; case T_INT: __ movl (rax, Address(rdx, rax, Address::times_1)); break; default: ShouldNotReachHere(); } Address ca1; if (os::is_MP()) { __ lea(rdx, counter); __ xorptr(rdx, rax); __ xorptr(rdx, rax); __ cmp32(rcx, Address(rdx, 0)); // ca1 is the same as ca because // rax, ^ counter_addr ^ rax, = address // ca1 is data dependent on rax,. } else { __ cmp32(rcx, counter); } __ jcc (Assembler::notEqual, slow); #ifndef _WINDOWS __ ret (0); #else // __stdcall calling convention __ ret (3*wordSize); #endif slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); address slow_case_addr; switch (type) { case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break; case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break; case T_INT: slow_case_addr = jni_GetIntField_addr(); } // tail call __ jump (ExternalAddress(slow_case_addr)); __ flush (); #ifndef _WINDOWS return fast_entry; #else switch (type) { case T_BOOLEAN: jni_fast_GetBooleanField_fp = (GetBooleanField_t)fast_entry; break; case T_BYTE: jni_fast_GetByteField_fp = (GetByteField_t)fast_entry; break; case T_CHAR: jni_fast_GetCharField_fp = (GetCharField_t)fast_entry; break; case T_SHORT: jni_fast_GetShortField_fp = (GetShortField_t)fast_entry; break; case T_INT: jni_fast_GetIntField_fp = (GetIntField_t)fast_entry; } return os::win32::fast_jni_accessor_wrapper(type); #endif }
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { const char *name; switch (type) { case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; default: ShouldNotReachHere(); } ResourceMark rm; BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); address fast_entry = b->instructions_begin(); CodeBuffer cbuf(fast_entry, b->instructions_size()); MacroAssembler* masm = new MacroAssembler(&cbuf); Label slow_with_pop, slow; // stack layout: offset from rsp (in words): // return pc 0 // jni env 1 // obj 2 // jfieldID 3 ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); __ mov32 (rcx, counter); __ testb (rcx, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { __ mov(rax, rcx); __ andptr(rax, 1); // rax, must end up 0 __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize)); // obj, notice rax, is 0. // rdx is data dependent on rcx. } else { __ movptr(rdx, Address(rsp, 2*wordSize)); // obj } __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID __ movptr(rdx, Address(rdx, 0)); // *obj __ shrptr(rax, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { #ifndef _LP64 case T_FLOAT: __ fld_s (Address(rdx, rax, Address::times_1)); break; case T_DOUBLE: __ fld_d (Address(rdx, rax, Address::times_1)); break; #else case T_FLOAT: __ movflt (xmm0, Address(robj, roffset, Address::times_1)); break; case T_DOUBLE: __ movdbl (xmm0, Address(robj, roffset, Address::times_1)); break; #endif // _LP64 default: ShouldNotReachHere(); } Address ca1; if (os::is_MP()) { __ fst_s (Address(rsp, -4)); __ lea(rdx, counter); __ movl (rax, Address(rsp, -4)); // garbage hi-order bits on 64bit are harmless. __ xorptr(rdx, rax); __ xorptr(rdx, rax); __ cmp32(rcx, Address(rdx, 0)); // rax, ^ counter_addr ^ rax, = address // ca1 is data dependent on the field // access. } else { __ cmp32(rcx, counter); } __ jcc (Assembler::notEqual, slow_with_pop); #ifndef _WINDOWS __ ret (0); #else // __stdcall calling convention __ ret (3*wordSize); #endif __ bind (slow_with_pop); // invalid load. pop FPU stack. __ fstp_d (0); slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); address slow_case_addr; switch (type) { case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; default: ShouldNotReachHere(); } // tail call __ jump (ExternalAddress(slow_case_addr)); __ flush (); #ifndef _WINDOWS return fast_entry; #else switch (type) { case T_FLOAT: jni_fast_GetFloatField_fp = (GetFloatField_t)fast_entry; break; case T_DOUBLE: jni_fast_GetDoubleField_fp = (GetDoubleField_t)fast_entry; } return os::win32::fast_jni_accessor_wrapper(type); #endif }
address JNI_FastGetField::generate_fast_get_long_field() { const char *name = "jni_fast_GetLongField"; ResourceMark rm; BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); address fast_entry = b->instructions_begin(); CodeBuffer cbuf(fast_entry, b->instructions_size()); MacroAssembler* masm = new MacroAssembler(&cbuf); Label slow; // stack layout: offset from rsp (in words): // old rsi 0 // return pc 1 // jni env 2 // obj 3 // jfieldID 4 ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); __ push (rsi); __ mov32 (rcx, counter); __ testb (rcx, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { __ mov(rax, rcx); __ andptr(rax, 1); // rax, must end up 0 __ movptr(rdx, Address(rsp, rax, Address::times_1, 3*wordSize)); // obj, notice rax, is 0. // rdx is data dependent on rcx. } else { __ movptr(rdx, Address(rsp, 3*wordSize)); // obj } __ movptr(rsi, Address(rsp, 4*wordSize)); // jfieldID __ movptr(rdx, Address(rdx, 0)); // *obj __ shrptr(rsi, 2); // offset assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small"); speculative_load_pclist[count++] = __ pc(); __ movptr(rax, Address(rdx, rsi, Address::times_1)); #ifndef _LP64 speculative_load_pclist[count] = __ pc(); __ movl(rdx, Address(rdx, rsi, Address::times_1, 4)); #endif // _LP64 if (os::is_MP()) { __ lea(rsi, counter); __ xorptr(rsi, rdx); __ xorptr(rsi, rax); __ xorptr(rsi, rdx); __ xorptr(rsi, rax); __ cmp32(rcx, Address(rsi, 0)); // ca1 is the same as ca because // rax, ^ rdx ^ counter_addr ^ rax, ^ rdx = address // ca1 is data dependent on both rax, and rdx. } else { __ cmp32(rcx, counter); } __ jcc (Assembler::notEqual, slow); __ pop (rsi); #ifndef _WINDOWS __ ret (0); #else // __stdcall calling convention __ ret (3*wordSize); #endif slowcase_entry_pclist[count-1] = __ pc(); slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); __ pop (rsi); address slow_case_addr = jni_GetLongField_addr();; // tail call __ jump (ExternalAddress(slow_case_addr)); __ flush (); #ifndef _WINDOWS return fast_entry; #else jni_fast_GetLongField_fp = (GetLongField_t)fast_entry; return os::win32::fast_jni_accessor_wrapper(T_LONG); #endif }
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { const char *name; switch (type) { case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break; case T_BYTE: name = "jni_fast_GetByteField"; break; case T_CHAR: name = "jni_fast_GetCharField"; break; case T_SHORT: name = "jni_fast_GetShortField"; break; case T_INT: name = "jni_fast_GetIntField"; break; case T_LONG: name = "jni_fast_GetLongField"; break; default: ShouldNotReachHere(); } ResourceMark rm; BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE); address fast_entry = b->instructions_begin(); CodeBuffer* cbuf = new CodeBuffer(fast_entry, b->instructions_size()); MacroAssembler* masm = new MacroAssembler(cbuf); Label slow; address counter_addr = SafepointSynchronize::safepoint_counter_addr(); Address ca(counter_addr, relocInfo::none); __ movl (rcounter, ca); __ movq (robj, rarg1); __ testb (rcounter, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { __ xorq (robj, rcounter); __ xorq (robj, rcounter); // obj, since // robj ^ rcounter ^ rcounter == robj // robj is data dependent on rcounter. } __ movq (robj, Address(robj)); // *obj __ movq (roffset, rarg2); __ shrq (roffset, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { case T_BOOLEAN: __ movzbl (rax, Address(robj, roffset, Address::times_1)); break; case T_BYTE: __ movsbl (rax, Address(robj, roffset, Address::times_1)); break; case T_CHAR: __ movzwl (rax, Address(robj, roffset, Address::times_1)); break; case T_SHORT: __ movswl (rax, Address(robj, roffset, Address::times_1)); break; case T_INT: __ movl (rax, Address(robj, roffset, Address::times_1)); break; case T_LONG: __ movq (rax, Address(robj, roffset, Address::times_1)); break; default: ShouldNotReachHere(); } __ movq (rcounter_addr, (int64_t)counter_addr); ca = Address(rcounter_addr); if (os::is_MP()) { __ xorq (rcounter_addr, rax); __ xorq (rcounter_addr, rax); // ca is data dependent on rax. } __ cmpl (rcounter, ca); __ jcc (Assembler::notEqual, slow); __ ret (0); slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); address slow_case_addr; switch (type) { case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break; case T_BYTE: slow_case_addr = jni_GetByteField_addr(); break; case T_CHAR: slow_case_addr = jni_GetCharField_addr(); break; case T_SHORT: slow_case_addr = jni_GetShortField_addr(); break; case T_INT: slow_case_addr = jni_GetIntField_addr(); break; case T_LONG: slow_case_addr = jni_GetLongField_addr(); } // tail call __ jmp (slow_case_addr, relocInfo::none); __ flush (); return fast_entry; }
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { const char *name; switch (type) { case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; default: ShouldNotReachHere(); } ResourceMark rm; BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE); address fast_entry = b->instructions_begin(); CodeBuffer cbuf(fast_entry, b->instructions_size()); MacroAssembler* masm = new MacroAssembler(&cbuf); Label slow; ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr()); __ mov32 (rcounter, counter); __ mov (robj, c_rarg1); __ testb (rcounter, 1); __ jcc (Assembler::notZero, slow); if (os::is_MP()) { __ xorptr(robj, rcounter); __ xorptr(robj, rcounter); // obj, since // robj ^ rcounter ^ rcounter == robj // robj is data dependent on rcounter. } __ movptr(robj, Address(robj, 0)); // *obj __ mov (roffset, c_rarg2); __ shrptr(roffset, 2); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { case T_FLOAT: __ movflt (xmm0, Address(robj, roffset, Address::times_1)); break; case T_DOUBLE: __ movdbl (xmm0, Address(robj, roffset, Address::times_1)); break; default: ShouldNotReachHere(); } if (os::is_MP()) { __ lea(rcounter_addr, counter); __ movdq (rax, xmm0); // counter address is data dependent on xmm0. __ xorptr(rcounter_addr, rax); __ xorptr(rcounter_addr, rax); __ cmpl (rcounter, Address(rcounter_addr, 0)); } else { __ cmp32 (rcounter, counter); } __ jcc (Assembler::notEqual, slow); __ ret (0); slowcase_entry_pclist[count++] = __ pc(); __ bind (slow); address slow_case_addr; switch (type) { case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); } // tail call __ jump (ExternalAddress(slow_case_addr)); __ flush (); return fast_entry; }
address JNI_FastGetField::generate_fast_get_long_field() { const char *name = "jni_fast_GetLongField"; ResourceMark rm; BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); address fast_entry = b->instructions_begin(); CodeBuffer cbuf(fast_entry, b->instructions_size()); MacroAssembler* masm = new MacroAssembler(&cbuf); Label label1, label2; AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr()); __ sethi (cnt_addrlit, G3); Address cnt_addr(G3, cnt_addrlit.low10()); __ ld (cnt_addr, G4); __ andcc (G4, 1, G0); __ br (Assembler::notZero, false, Assembler::pn, label1); __ delayed()->srl (O2, 2, O4); __ ld_ptr (O1, 0, O5); __ add (O5, O4, O5); #ifndef _LP64 assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small"); speculative_load_pclist[count++] = __ pc(); __ ld (O5, 0, G2); speculative_load_pclist[count] = __ pc(); __ ld (O5, 4, O3); #else assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); __ ldx (O5, 0, O3); #endif __ ld (cnt_addr, G1); __ cmp (G1, G4); __ br (Assembler::notEqual, false, Assembler::pn, label2); __ delayed()->mov (O7, G1); #ifndef _LP64 __ mov (G2, O0); __ retl (); __ delayed()->mov (O3, O1); #else __ retl (); __ delayed()->mov (O3, O0); #endif #ifndef _LP64 slowcase_entry_pclist[count-1] = __ pc(); slowcase_entry_pclist[count++] = __ pc() ; #else slowcase_entry_pclist[count++] = __ pc(); #endif __ bind (label1); __ mov (O7, G1); address slow_case_addr = jni_GetLongField_addr(); __ bind (label2); __ call (slow_case_addr, relocInfo::none); __ delayed()->mov (G1, O7); __ flush (); return fast_entry; }