void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { // At this point we know that marking is in progress. // If do_load() is true then we have to emit the // load of the previous value; otherwise it has already // been loaded into _pre_val. __ bind(_entry); assert(pre_val()->is_register(), "Precondition."); Register pre_val_reg = pre_val()->as_register(); if (do_load()) { ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); } if (__ is_in_wdisp16_range(_continuation)) { __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation); } else { __ cmp(pre_val_reg, G0); __ brx(Assembler::equal, false, Assembler::pn, _continuation); } __ delayed()->nop(); __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id)); __ delayed()->mov(pre_val_reg, G4); __ br(Assembler::always, false, Assembler::pt, _continuation); __ delayed()->nop(); }
address AbstractInterpreterGenerator::generate_slow_signature_handler() { address entry = __ pc(); Argument argv(0, true); // We are in the jni transition frame. Save the last_java_frame corresponding to the // outer interpreter frame // __ set_last_Java_frame(FP, noreg); // make sure the interpreter frame we've pushed has a valid return pc __ mov(O7, I7); __ mov(Lmethod, G3_scratch); __ mov(Llocals, G4_scratch); __ save_frame(0); __ mov(G2_thread, L7_thread_cache); __ add(argv.address_in_frame(), O3); __ mov(G2_thread, O0); __ mov(G3_scratch, O1); __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type); __ delayed()->mov(G4_scratch, O2); __ mov(L7_thread_cache, G2_thread); __ reset_last_Java_frame(); // load the register arguments (the C code packed them as varargs) for (Argument ldarg = argv.successor(); ldarg.is_register(); ldarg = ldarg.successor()) { __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register()); } __ ret(); __ delayed()-> restore(O0, 0, Lscratch); // caller's Lscratch gets the result handler return entry; }
void ArrayCopyStub::emit_code(LIR_Assembler* ce) { //---------------slow case: call to native----------------- __ bind(_entry); __ mov(src()->as_register(), O0); __ mov(src_pos()->as_register(), O1); __ mov(dst()->as_register(), O2); __ mov(dst_pos()->as_register(), O3); __ mov(length()->as_register(), O4); ce->emit_static_call_stub(); __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type); __ delayed()->nop(); ce->add_call_info_here(info()); ce->verify_oop_map(info()); #ifndef PRODUCT __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0); __ ld(O0, 0, O1); __ inc(O1); __ st(O1, 0, O0); #endif __ br(Assembler::always, false, Assembler::pt, _continuation); __ delayed()->nop(); }
OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { // make a frame and preserve the caller's caller-save registers OopMap* oop_map = save_live_registers(sasm); // call the runtime patching routine, returns non-zero if nmethod got deopted. int call_offset = __ call_RT(noreg, noreg, target); OopMapSet* oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); // re-execute the patched instruction or, if the nmethod was deoptmized, return to the // deoptimization handler entry that will cause re-execution of the current bytecode DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); assert(deopt_blob != NULL, "deoptimization blob must have been created"); Label no_deopt; __ br_null_short(O0, Assembler::pt, no_deopt); // return to the deoptimization handler entry for unpacking and rexecute // if we simply returned the we'd deopt as if any call we patched had just // returned. restore_live_registers(sasm); AddressLiteral dest(deopt_blob->unpack_with_reexecution()); __ jump_to(dest, O0); __ delayed()->restore(); __ bind(no_deopt); restore_live_registers(sasm); __ ret(); __ delayed()->restore(); return oop_maps; }
void ArrayCopyStub::emit_code(LIR_Assembler* ce) { //---------------slow case: call to native----------------- __ bind(_entry); __ mov(src().as_register(), O0); __ mov(src_pos().as_register(), O1); __ mov(dst().as_register(), O2); __ mov(dst_pos().as_register(), O3); __ mov(length().as_register(), O4); address call_pc = __ pc(); ce->emit_code_stub(_call_stub); __ call(Runtime1::entry_for(Runtime1::resolve_invokestatic_id), relocInfo::static_call_type); __ delayed()->nop(); ce->add_call_info_here(_info); #ifndef PRODUCT __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0); __ ld(O0, 0, O1); __ inc(O1); __ st(O1, 0, O0); #endif __ br(Assembler::always, false, Assembler::pt, _continuation); __ delayed()->nop(); _call_stub->set_code_pc(call_pc); }
void RangeCheckStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_info->deoptimize_on_exception()) { address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id); __ call(a, relocInfo::runtime_call_type); __ delayed()->nop(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); debug_only(__ should_not_reach_here()); return; } if (_index->is_register()) { __ mov(_index->as_register(), G4); } else { __ set(_index->as_jint(), G4); } if (_throw_index_out_of_bounds_exception) { __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type); } else { __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type); } __ delayed()->nop(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); debug_only(__ should_not_reach_here()); }
void NewInstanceStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type); __ delayed()->mov_or_nop(_klass_reg.as_register(), G5); ce->add_call_info_here(_info); __ br(Assembler::always, false, Assembler::pt, _continuation); __ delayed()->mov_or_nop(O0, _result->as_register()); }
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) { const char *name; switch (type) { case T_FLOAT: name = "jni_fast_GetFloatField"; break; case T_DOUBLE: name = "jni_fast_GetDoubleField"; break; default: ShouldNotReachHere(); } ResourceMark rm; BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); CodeBuffer cbuf(blob); MacroAssembler* masm = new MacroAssembler(&cbuf); address fast_entry = __ pc(); Label label1, label2; AddressLiteral cnt_addrlit(SafepointSynchronize::safepoint_counter_addr()); __ sethi (cnt_addrlit, O3); Address cnt_addr(O3, cnt_addrlit.low10()); __ ld (cnt_addr, G4); __ andcc (G4, 1, G0); __ br (Assembler::notZero, false, Assembler::pn, label1); __ delayed()->srl (O2, 2, O4); __ ld_ptr (O1, 0, O5); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); switch (type) { case T_FLOAT: __ ldf (FloatRegisterImpl::S, O5, O4, F0); break; case T_DOUBLE: __ ldf (FloatRegisterImpl::D, O5, O4, F0); break; default: ShouldNotReachHere(); } __ ld (cnt_addr, O5); __ cmp (O5, G4); __ br (Assembler::notEqual, false, Assembler::pn, label2); __ delayed()->mov (O7, G1); __ retl (); __ delayed()-> nop (); slowcase_entry_pclist[count++] = __ pc(); __ bind (label1); __ mov (O7, G1); address slow_case_addr; switch (type) { case T_FLOAT: slow_case_addr = jni_GetFloatField_addr(); break; case T_DOUBLE: slow_case_addr = jni_GetDoubleField_addr(); break; default: ShouldNotReachHere(); } __ bind (label2); __ call (slow_case_addr, relocInfo::none); __ delayed()->mov (G1, O7); __ flush (); return fast_entry; }
void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ mov(_length.as_register(), G4); __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type); __ delayed()->mov_or_nop(_klass_reg.as_register(), G5); ce->add_call_info_here(_info); __ br(Assembler::always, false, Assembler::pt, _continuation); __ delayed()->mov_or_nop(O0, _result.as_register()); }
// Note: %g1 and %g3 are already in use void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type); if (_obj.is_valid()) { __ delayed()->mov(_obj.as_register(), G4); // _obj contains the optional argument to the stub } else { __ delayed()->mov(G0, G4); } ce->add_call_info_here(_info_for_exception); }
void MonitorExitStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); if (_compute_lock) { ce->monitor_address(_monitor_ix, _lock_reg); } __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type); __ delayed()->mov_or_nop(_lock_reg.as_register(), G4); __ br(Assembler::always, true, Assembler::pt, _continuation); __ delayed()->nop(); }
void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ set(_bci, G4); __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type); __ delayed()->nop(); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ br(Assembler::always, true, Assembler::pt, _continuation); __ delayed()->nop(); }
void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ mov(_obj_reg->as_register(), G4); if (ce->compilation()->has_fpu_code()) { __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type); } else { __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type); } __ delayed()->mov_or_nop(_lock_reg->as_register(), G5); ce->add_call_info_here(_info); ce->verify_oop_map(_info); __ br(Assembler::always, true, Assembler::pt, _continuation); __ delayed()->nop(); }
// Implementation of SimpleExceptionStub // Note: %g1 and %g3 are already in use void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type); if (_obj->is_valid()) { __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub } else { __ delayed()->mov(G0, G4); } ce->add_call_info_here(_info); #ifdef ASSERT __ should_not_reach_here(); #endif }
// // Generate the on-stack replacement stub, that is used to replace the // interpreter frame // OSRAdapter* SharedRuntime::generate_osr_blob(int frame_size) { ResourceMark rm; // setup code generation tools CodeBuffer* cb = new CodeBuffer(128, 128, 0, 0, 0, false); MacroAssembler* masm = new MacroAssembler(cb); OopMapSet *oop_maps = new OopMapSet(); // frame_size is in words, Oopmap want slots OopMap* map = new OopMap(frame_size * (wordSize / sizeof(jint)), 0 ); oop_maps->add_gc_map(0, true, map); // Continuation point after returning from osr compiled method. // Position a potential integer result for returning from the original interpreted activation. __ mov(O0, I0); __ mov(O1, I1); const Register Gtmp1 = G3_scratch ; // Return from the current method // The caller's SP was adjusted upon method entry to accomodate // the callee's non-argument locals. Undo that adjustment. __ ret(); __ delayed()->restore(IsavedSP, G0, SP); return OSRAdapter::new_osr_adapter(cb, oop_maps, frame_size, 0); }
OopMapSet* Runtime1::generate_stub_call(StubAssembler* sasm, Register result, address target, Register arg1, Register arg2, Register arg3) { // make a frame and preserve the caller's caller-save registers OopMap* oop_map = save_live_registers(sasm); int call_offset; if (arg1 == noreg) { call_offset = __ call_RT(result, noreg, target); } else if (arg2 == noreg) { call_offset = __ call_RT(result, noreg, target, arg1); } else if (arg3 == noreg) { call_offset = __ call_RT(result, noreg, target, arg1, arg2); } else { call_offset = __ call_RT(result, noreg, target, arg1, arg2, arg3); } OopMapSet* oop_maps = NULL; oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); restore_live_registers(sasm); __ ret(); __ delayed()->restore(); return oop_maps; }
void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type); __ delayed()->nop(); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); }
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) { #ifdef COMPILER2 // Stub is fixed up when the corresponding call is converted from calling // compiled code to calling interpreted code. // set (empty), G5 // jmp -1 address mark = cbuf.insts_mark(); // Get mark within main instrs section. MacroAssembler _masm(&cbuf); address base = __ start_a_stub(to_interp_stub_size()*2); if (base == NULL) return; // CodeBuffer::expand failed. // Static stub relocation stores the instruction address of the call. __ relocate(static_stub_Relocation::spec(mark)); __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode())); __ set_inst_mark(); AddressLiteral addrlit(-1); __ JUMP(addrlit, G3, 0); __ delayed()->nop(); // Update current stubs pointer and restore code_end. __ end_a_stub(); #else ShouldNotReachHere(); #endif }
void InterpreterRuntime::SignatureHandlerGenerator::pass_long() { Argument jni_arg(jni_offset(), false); Register Rtmp = O0; #ifdef ASSERT if (TaggedStackInterpreter) { // check at least one tag is okay Label ok; __ ld_ptr(Llocals, Interpreter::local_tag_offset_in_bytes(offset() + 1), Rtmp); __ cmp(Rtmp, G0); __ brx(Assembler::equal, false, Assembler::pt, ok); __ delayed()->nop(); __ stop("Native object has bad tag value"); __ bind(ok); } #endif // ASSERT #ifdef _LP64 __ ldx(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp); __ store_long_argument(Rtmp, jni_arg); #else __ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 1), Rtmp); __ store_argument(Rtmp, jni_arg); __ ld(Llocals, Interpreter::local_offset_in_bytes(offset() + 0), Rtmp); Argument successor(jni_arg.successor()); __ store_argument(Rtmp, successor); #endif }
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) { // Stub is fixed up when the corresponding call is converted from calling // compiled code to calling interpreted code. // set (empty), G5 // jmp -1 if (mark == NULL) { mark = cbuf.insts_mark(); // Get mark within main instrs section. } MacroAssembler _masm(&cbuf); address base = __ start_a_stub(to_interp_stub_size()); if (base == NULL) { return NULL; // CodeBuffer::expand failed. } // Static stub relocation stores the instruction address of the call. __ relocate(static_stub_Relocation::spec(mark)); __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode())); __ set_inst_mark(); AddressLiteral addrlit(-1); __ JUMP(addrlit, G3, 0); __ delayed()->nop(); assert(__ pc() - base <= to_interp_stub_size(), "wrong stub size"); // Update current stubs pointer and restore code_end. __ end_a_stub(); return base; }
void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) { assert_different_registers(Rmark, Roop, Rbox); Label done; Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); assert(mark_addr.disp() == 0, "cas must take a zero displacement"); if (UseBiasedLocking) { // load the object out of the BasicObjectLock ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop); verify_oop(Roop); biased_locking_exit(mark_addr, Rmark, done); } // Test first it it is a fast recursive unlock ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); br_null_short(Rmark, Assembler::pt, done); if (!UseBiasedLocking) { // load object ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop); verify_oop(Roop); } // Check if it is still a light weight lock, this is is true if we see // the stack address of the basicLock in the markOop of the object cas_ptr(mark_addr.base(), Rbox, Rmark); cmp(Rbox, Rmark); brx(Assembler::notEqual, false, Assembler::pn, slow_case); delayed()->nop(); // Done bind(done); }
void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); __ call(SharedRuntime::deopt_blob()->unpack_with_reexecution()); __ delayed()->nop(); ce->add_call_info_here(_info); debug_only(__ should_not_reach_here()); }
void InterpreterRuntime::SignatureHandlerGenerator::pass_object() { Argument jni_arg(jni_offset(), false); Argument java_arg( offset(), true); Register Rtmp1 = O0; Register Rtmp2 = jni_arg.is_register() ? jni_arg.as_register() : O0; Register Rtmp3 = G3_scratch; // the handle for a receiver will never be null bool do_NULL_check = offset() != 0 || is_static(); Address h_arg = Address(Llocals, Interpreter::local_offset_in_bytes(offset())); __ ld_ptr(h_arg, Rtmp1); if (!do_NULL_check) { __ add(h_arg.base(), h_arg.disp(), Rtmp2); } else { if (Rtmp1 == Rtmp2) __ tst(Rtmp1); else __ addcc(G0, Rtmp1, Rtmp2); // optimize mov/test pair Label L; __ brx(Assembler::notZero, true, Assembler::pt, L); __ delayed()->add(h_arg.base(), h_arg.disp(), Rtmp2); __ bind(L); } __ store_ptr_argument(Rtmp2, jni_arg); // this is often a no-op }
void C1_MacroAssembler::allocate_object( Register obj, // result: pointer to object after successful allocation Register t1, // temp register Register t2, // temp register, must be a global register for try_allocate Register t3, // temp register int hdr_size, // object header size in words int obj_size, // object size in words Register klass, // object klass Label& slow_case // continuation point if fast allocation fails ) { assert_different_registers(obj, t1, t2, t3, klass); assert(klass == G5, "must be G5"); // allocate space & initialize header if (!is_simm13(obj_size * wordSize)) { // would need to use extra register to load // object size => go the slow case for now ba(slow_case); delayed()->nop(); return; } try_allocate(obj, noreg, obj_size * wordSize, t2, t3, slow_case); initialize_object(obj, klass, noreg, obj_size * HeapWordSize, t1, t2); }
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); assert(addr()->is_register(), "Precondition."); assert(new_val()->is_register(), "Precondition."); Register addr_reg = addr()->as_pointer_register(); Register new_val_reg = new_val()->as_register(); __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, new_val_reg, _continuation); __ delayed()->nop(); __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id)); __ delayed()->mov(addr_reg, G4); __ br(Assembler::always, false, Assembler::pt, _continuation); __ delayed()->nop(); }
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) { ce->compilation()->null_check_table()->append(_offset, __ offset()); __ bind(_entry); __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id), relocInfo::runtime_call_type); __ delayed()->nop(); ce->add_call_info_here(_info); }
void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { Label no_deopt; Label no_handler; __ verify_not_null_oop(Oexception); // save the exception and issuing pc in the thread __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map) __ mov(I7, L0); __ mov(Oissuing_pc, I7); __ sub(I7, frame::pc_return_offset, I7); int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); // Note: if nmethod has been deoptimized then regardless of // whether it had a handler or not we will deoptimize // by entering the deopt blob with a pending exception. __ tst(O0); __ br(Assembler::zero, false, Assembler::pn, no_handler); __ delayed()->nop(); // restore the registers that were saved at the beginning and jump to the exception handler. restore_live_registers(sasm); __ jmp(O0, 0); __ delayed()->restore(); __ bind(no_handler); __ mov(L0, I7); // restore return address // restore exception oop __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception->after_save()); __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); __ restore(); AddressLiteral exc(Runtime1::entry_for(Runtime1::unwind_exception_id)); __ jump_to(exc, G4); __ delayed()->nop(); oop_maps->add_gc_map(call_offset, oop_map); }
/* * implementation for context_switchback */ void context_switchback() { if (top_context != NULL) { if (current_context != top_context && delayed(top_context)) { context_switch(top_context); } } return; }
void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox, Register Rscratch, Label& slow_case) { assert_different_registers(Rmark, Roop, Rbox, Rscratch); Label done; Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); // The following move must be the first instruction of emitted since debug // information may be generated for it. // Load object header ld_ptr(mark_addr, Rmark); verify_oop(Roop); // save object being locked into the BasicObjectLock st_ptr(Roop, Rbox, BasicObjectLock::obj_offset_in_bytes()); if (UseBiasedLocking) { biased_locking_enter(Roop, Rmark, Rscratch, done, &slow_case); } // Save Rbox in Rscratch to be used for the cas operation mov(Rbox, Rscratch); // and mark it unlocked or3(Rmark, markOopDesc::unlocked_value, Rmark); // save unlocked object header into the displaced header location on the stack st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); // compare object markOop with Rmark and if equal exchange Rscratch with object markOop assert(mark_addr.disp() == 0, "cas must take a zero displacement"); cas_ptr(mark_addr.base(), Rmark, Rscratch); // if compare/exchange succeeded we found an unlocked object and we now have locked it // hence we are done cmp(Rmark, Rscratch); brx(Assembler::equal, false, Assembler::pt, done); delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot // we did not find an unlocked object so see if this is a recursive case // sub(Rscratch, SP, Rscratch); assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); andcc(Rscratch, 0xfffff003, Rscratch); brx(Assembler::notZero, false, Assembler::pn, slow_case); delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); bind(done); }
void C1_MacroAssembler::initialize_body(Register base, Register index) { assert_different_registers(base, index); Label loop; bind(loop); subcc(index, HeapWordSize, index); brx(Assembler::greaterEqual, true, Assembler::pt, loop); delayed()->st_ptr(G0, base, index); }