void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { // make sure registers are spilled spill_values_on_stack(x->state()); if (x->state_before() != NULL) { spill_values_on_stack(x->state_before()); } Values* dims = x->dims(); int i = dims->length(); LIRItemList* items = new LIRItemList(); while (i-- > 0) { LIRItem* size = new LIRItem(dims->at(i), this); items->at_put(i, size); assert(!size->is_register() || x->state_before() == NULL, "shouldn't be since it was spilled above"); } // need to get the info before, as the items may become invalid through item_free CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); } i = dims->length(); while (i-- > 0) { LIRItem* size = items->at(i); size->load_item(); emit()->store_stack_parameter(size->result(), i); } RInfo reg = set_with_result_register(x)->rinfo(); CodeEmitInfo* info = state_for(x, x->state()); emit()->new_multi_array(reg, x->klass(), x->rank(), norinfo, info, patching_info); }
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { LIRItem length(x->length(), this); // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // and therefore provide the state before the parameters have been consumed CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); } CodeEmitInfo* info = state_for(x, x->state()); const LIR_Opr reg = result_register_for(x->type()); LIR_Opr tmp1 = FrameMap::rcx_oop_opr; LIR_Opr tmp2 = FrameMap::rsi_oop_opr; LIR_Opr tmp3 = FrameMap::rdi_oop_opr; LIR_Opr tmp4 = reg; LIR_Opr klass_reg = FrameMap::rdx_oop_opr; length.load_item_force(FrameMap::rbx_opr); LIR_Opr len = length.result(); CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass()); if (obj == ciEnv::unloaded_ciobjarrayklass()) { BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); } jobject2reg_with_patching(klass_reg, obj, patching_info); __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); LIR_Opr result = rlock_result(x); __ move(reg, result); }
void LIRGenerator::do_CheckCast(CheckCast* x) { LIRItem obj(x->obj(), this); CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (the latter is for deoptimization) patching_info = state_for(x, x->state_before()); } obj.load_item(); // info for exceptions CodeEmitInfo* info_for_exception = state_for(x); CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == NULL, "can't patch this"); stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else { stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; if (!x->klass()->is_loaded() || UseCompressedOops) { tmp3 = new_register(objectType); } __ checkcast(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci()); }
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { // make sure registers are spilled spill_values_on_stack(x->state()); LIRItem length(x->length(), this); // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // and therefore provide the state before the parameters have been consumed CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); } length.load_item(); RInfo reg = set_with_result_register(x)->rinfo(); RInfo tmp1; RInfo tmp2; RInfo tmp3; RInfo tmp4; { LIRHideReg hr1(this, objectType); tmp1 = hr1.reg(); LIRHideReg hr2(this, objectType); tmp2 = hr2.reg(); LIRHideReg hr3(this, objectType); tmp3 = hr3.reg(); LIRHideReg hr4(this, objectType); tmp4 = hr4.reg(); } CodeEmitInfo* info = state_for(x, x->state()); emit()->new_object_array(reg, x->klass(), length.result(), tmp1, tmp2, tmp3, tmp4, norinfo, info, patching_info); }
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { assert(x->is_pinned(),""); LIRItem obj(x->obj(), this); obj.load_item(); set_no_result(x); // "lock" stores the address of the monitor stack slot, so this is not an oop LIR_Opr lock = new_register(T_INT); // Need a scratch register for biased locking on x86 LIR_Opr scratch = LIR_OprFact::illegalOpr; if (UseBiasedLocking) { scratch = new_register(T_INT); } CodeEmitInfo* info_for_exception = NULL; if (x->needs_null_check()) { info_for_exception = state_for(x); } // this CodeEmitInfo must not have the xhandlers because here the // object is already locked (xhandlers expect object to be unlocked) CodeEmitInfo* info = state_for(x, x->state(), true); monitor_enter(obj.result(), lock, syncTempOpr(), scratch, x->monitor_no(), info_for_exception, info); }
void LIRGenerator::do_CheckCast(CheckCast* x) { LIRItem obj(x->obj(), this); CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (so x->obj()->item() is valid for creating a debug info location) patching_info = state_for(x, x->state_before()); } obj.load_item(); LIR_Opr out_reg = rlock_result(x); CodeStub* stub; CodeEmitInfo* info_for_exception = state_for(x, x->state()->copy_locks()); if (x->is_incompatible_class_change_check()) { assert(patching_info == NULL, "can't patch this"); stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else { stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr; __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(), info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci()); }
void LIRGenerator::do_CheckCast(CheckCast* x) { // all values are spilled spill_values_on_stack(x->state()); LIRItem obj(x->obj(), this); obj.set_destroys_register(); CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (the latter is for deoptimization) patching_info = state_for(x, x->state_before()); } obj.load_item(); RInfo in_reg = obj.result()->rinfo(); // info for exceptions CodeEmitInfo* info_for_exception = state_for(x, x->state()->copy_locks()); RInfo reg = rlock_result(x)->rinfo(); RInfo tmp1 = new_register(objectType)->rinfo(); RInfo tmp2 = new_register(objectType)->rinfo(); if (patching_info != NULL) { patching_info->add_register_oop(in_reg); } emit()->checkcast_op(LIR_OprFact::rinfo(reg, T_OBJECT), obj.result(), x->klass(), tmp1, tmp2, x->direct_compare(), info_for_exception, patching_info); }
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { Values* dims = x->dims(); int i = dims->length(); LIRItemList* items = new LIRItemList(dims->length(), NULL); while (i-- > 0) { LIRItem* size = new LIRItem(dims->at(i), this); items->at_put(i, size); } // need to get the info before, as the items may become invalid through item_free CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); // cannot re-use same xhandlers for multiple CodeEmitInfos, so // clone all handlers x->set_exception_handlers(new XHandlers(x->exception_handlers())); } i = dims->length(); while (i-- > 0) { LIRItem* size = items->at(i); // if a patching_info was generated above then debug information for the state before // the call is going to be emitted. The LIRGenerator calls above may have left some values // in registers and that's been recorded in the CodeEmitInfo. In that case the items // for those values can't simply be freed if they are registers because the values // might be destroyed by store_stack_parameter. So in the case of patching, delay the // freeing of the items that already were in registers size->load_item(); store_stack_parameter (size->result(), in_ByteSize(STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize + i * sizeof(jint))); } // This instruction can be deoptimized in the slow path : use // O0 as result register. const LIR_Opr reg = result_register_for(x->type()); CodeEmitInfo* info = state_for(x, x->state()); jobject2reg_with_patching(reg, x->klass(), patching_info); LIR_Opr rank = FrameMap::O1_opr; __ move(LIR_OprFact::intConst(x->rank()), rank); LIR_Opr varargs = FrameMap::as_pointer_opr(O2); int offset_from_sp = (frame::memory_parameter_word_sp_offset * wordSize) + STACK_BIAS; __ add(FrameMap::SP_opr, LIR_OprFact::intptrConst(offset_from_sp), varargs); LIR_OprList* args = new LIR_OprList(3); args->append(reg); args->append(rank); args->append(varargs); __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); LIR_Opr result = rlock_result(x); __ move(reg, result); }
void LIRGenerator::do_If(If* x) { assert(x->number_of_sux() == 2, "inconsistency"); ValueTag tag = x->x()->type()->tag(); bool is_safepoint = x->is_safepoint(); If::Condition cond = x->cond(); LIRItem xitem(x->x(), this); LIRItem yitem(x->y(), this); LIRItem* xin = &xitem; LIRItem* yin = &yitem; if (tag == longTag) { // for longs, only conditions "eql", "neq", "lss", "geq" are valid; // mirror for other conditions if (cond == If::gtr || cond == If::leq) { cond = Instruction::mirror(cond); xin = &yitem; yin = &xitem; } xin->set_destroys_register(); } xin->load_item(); if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { // inline long zero yin->dont_load_item(); } else if (tag == longTag || tag == floatTag || tag == doubleTag) { // longs cannot handle constants at right side yin->load_item(); } else { yin->dont_load_item(); } // add safepoint before generating condition code so it can be recomputed if (x->is_safepoint()) { // increment backedge counter if needed increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); } set_no_result(x); LIR_Opr left = xin->result(); LIR_Opr right = yin->result(); __ cmp(lir_cond(cond), left, right); // Generate branch profiling. Profiling code doesn't kill flags. profile_branch(x, cond); move_to_phi(x->state()); if (x->x()->type()->is_float_kind()) { __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); } else { __ branch(lir_cond(cond), right->type(), x->tsux()); } assert(x->default_sux() == x->fsux(), "wrong destination above"); __ jump(x->default_sux()); }
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { Values* dims = x->dims(); int i = dims->length(); LIRItemList* items = new LIRItemList(dims->length(), NULL); while (i-- > 0) { LIRItem* size = new LIRItem(dims->at(i), this); items->at_put(i, size); } // Evaluate state_for early since it may emit code. CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); // cannot re-use same xhandlers for multiple CodeEmitInfos, so // clone all handlers. This is handled transparently in other // places by the CodeEmitInfo cloning logic but is handled // specially here because a stub isn't being used. x->set_exception_handlers(new XHandlers(x->exception_handlers())); } CodeEmitInfo* info = state_for(x, x->state()); i = dims->length(); while (i-- > 0) { LIRItem* size = items->at(i); size->load_nonconstant(); store_stack_parameter(size->result(), in_ByteSize(i*4)); } LIR_Opr reg = result_register_for(x->type()); jobject2reg_with_patching(reg, x->klass(), patching_info); LIR_Opr rank = FrameMap::rbx_opr; __ move(LIR_OprFact::intConst(x->rank()), rank); LIR_Opr varargs = FrameMap::rcx_opr; __ move(FrameMap::rsp_opr, varargs); LIR_OprList* args = new LIR_OprList(3); args->append(reg); args->append(rank); args->append(varargs); __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); LIR_Opr result = rlock_result(x); __ move(reg, result); }
void LIRGenerator::invoke_do_result(Invoke* x, bool needs_null_check, LIR_Opr receiver) { // setup result register if (x->type()->is_void()) { set_no_result(x); } else { RInfo reg = set_with_result_register(x)->rinfo(); } // emit invoke code CodeEmitInfo* info = state_for(x, x->state()); bool optimized = x->target_is_loaded() && x->target_is_final(); // The current backend doesn't support vtable calls, so pass -1 // instead of x->vtable_index(); emit()->call_op(x->code(), NULL, -1, info, optimized, needs_null_check, receiverRInfo(), x->operand()); // does add_safepoint if (x->type()->is_float() || x->type()->is_double()) { emit()->set_fpu_result(x->operand()->rinfo()); // Force rounding of results from non-strictfp when in strictfp // scope (or when we don't know the strictness of the callee, to // be safe.) if (method()->is_strict()) { if (!x->target_is_loaded() || !x->target_is_strictfp()) { round_item(x->operand()); } } } }
void LIRGenerator::do_ArrayCopy(Intrinsic* x) { assert(x->number_of_arguments() == 5, "wrong type"); // Note: spill caller save before setting the item LIRItem src (x->argument_at(0), this); LIRItem src_pos (x->argument_at(1), this); LIRItem dst (x->argument_at(2), this); LIRItem dst_pos (x->argument_at(3), this); LIRItem length (x->argument_at(4), this); // load all values in callee_save_registers, as this makes the // parameter passing to the fast case simpler src.load_item_force (rlock_callee_saved(T_OBJECT)); src_pos.load_item_force (rlock_callee_saved(T_INT)); dst.load_item_force (rlock_callee_saved(T_OBJECT)); dst_pos.load_item_force (rlock_callee_saved(T_INT)); length.load_item_force (rlock_callee_saved(T_INT)); int flags; ciArrayKlass* expected_type; arraycopy_helper(x, &flags, &expected_type); CodeEmitInfo* info = state_for(x, x->state()); __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), rlock_callee_saved(T_INT), expected_type, flags, info); set_no_result(x); }
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { spill_values_on_stack(x->state()); assert(x->is_root(),""); LIRItem obj(x->obj(), this); obj.load_item(); set_no_result(x); RInfo lock = new_register(T_OBJECT)->rinfo(); CodeEmitInfo* info_for_exception = NULL; if (x->needs_null_check()) { info_for_exception = state_for(x, x->lock_stack_before()); } CodeEmitInfo* info = state_for(x, x->state()); emit()->monitor_enter(obj.get_register(), lock, syncTempRInfo(), norinfo, x->monitor_no(), info_for_exception, info); }
void LIRGenerator::do_If(If* x) { assert(x->number_of_sux() == 2, "inconsistency"); ValueTag tag = x->x()->type()->tag(); bool is_safepoint = x->is_safepoint(); If::Condition cond = x->cond(); LIRItem xitem(x->x(), this); LIRItem yitem(x->y(), this); LIRItem* xin = &xitem; LIRItem* yin = &yitem; if (tag == longTag) { // for longs, only conditions "eql", "neq", "lss", "geq" are valid; // mirror for other conditions if (cond == If::gtr || cond == If::leq) { cond = Instruction::mirror(cond); xin = &yitem; yin = &xitem; } xin->set_destroys_register(); } if (tag == floatTag || tag == doubleTag) { xin->set_destroys_register(); yin->set_destroys_register(); } xin->load_item(); if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { // inline long zero yin->dont_load_item(); } else if (tag == longTag || tag == floatTag || tag == doubleTag) { // longs cannot handle constants at right side yin->load_item(); } else { yin->dont_load_item(); } // note that the condition test must happen before the // moves into Phi area happen, and that the control flow // jump must happen after the moves into the phi area set_no_result(x); if (x->is_safepoint()) { CodeEmitInfo* info_before = state_for(x, x->state_before()); emit()->safepoint_nop(info_before); } emit()->if_op(1, cond, xin->result(), yin->result(), x->tsux(), x->fsux(), x->usux()); move_to_phi(x->state()); emit()->if_op(2, cond, xin->result(), yin->result(), x->tsux(), x->fsux(), x->usux()); goto_default_successor(x); }
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { assert(x->is_root(),""); LIRItem obj(x->obj(), this); obj.load_item(); set_no_result(x); LIR_Opr lock = FrameMap::G1_opr; LIR_Opr scratch = FrameMap::G3_opr; LIR_Opr hdr = FrameMap::G4_opr; CodeEmitInfo* info_for_exception = NULL; if (x->needs_null_check()) { info_for_exception = state_for(x, x->lock_stack_before()); } // this CodeEmitInfo must not have the xhandlers because here the // object is already locked (xhandlers expects object to be unlocked) CodeEmitInfo* info = state_for(x, x->state(), true); monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info); }
// for _ladd, _lmul, _lsub, _ldiv, _lrem void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { switch (x->op()) { case Bytecodes::_lrem: case Bytecodes::_lmul: case Bytecodes::_ldiv: { if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { LIRItem right(x->y(), this); right.load_item(); CodeEmitInfo* info = state_for(x); LIR_Opr item = right.result(); assert(item->is_register(), "must be"); __ cmp(lir_cond_equal, item, LIR_OprFact::longConst(0)); __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); } address entry; switch (x->op()) { case Bytecodes::_lrem: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); break; // check if dividend is 0 is done elsewhere case Bytecodes::_ldiv: entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); break; // check if dividend is 0 is done elsewhere case Bytecodes::_lmul: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); break; default: ShouldNotReachHere(); } // order of arguments to runtime call is reversed. LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL); set_result(x, result); break; } case Bytecodes::_ladd: case Bytecodes::_lsub: { LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_item(); rlock_result(x); arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); break; } default: ShouldNotReachHere(); } }
void LIRGenerator::do_InstanceOf(InstanceOf* x) { LIRItem obj(x->obj(), this); CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); } // ensure the result register is not the input register because the result is initialized before the patching safepoint obj.load_item(); LIR_Opr out_reg = rlock_result(x); LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr; __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(), patching_info); }
void LIRGenerator::do_ArrayCopy(Intrinsic* x) { assert(x->number_of_arguments() == 5, "wrong type"); // Make all state_for calls early since they can emit code CodeEmitInfo* info = state_for(x, x->state()); LIRItem src(x->argument_at(0), this); LIRItem src_pos(x->argument_at(1), this); LIRItem dst(x->argument_at(2), this); LIRItem dst_pos(x->argument_at(3), this); LIRItem length(x->argument_at(4), this); // operands for arraycopy must use fixed registers, otherwise // LinearScan will fail allocation (because arraycopy always needs a // call) #ifndef _LP64 src.load_item_force (FrameMap::rcx_oop_opr); src_pos.load_item_force (FrameMap::rdx_opr); dst.load_item_force (FrameMap::rax_oop_opr); dst_pos.load_item_force (FrameMap::rbx_opr); length.load_item_force (FrameMap::rdi_opr); LIR_Opr tmp = (FrameMap::rsi_opr); #else // The java calling convention will give us enough registers // so that on the stub side the args will be perfect already. // On the other slow/special case side we call C and the arg // positions are not similar enough to pick one as the best. // Also because the java calling convention is a "shifted" version // of the C convention we can process the java args trivially into C // args without worry of overwriting during the xfer src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); length.load_item_force (FrameMap::as_opr(j_rarg4)); LIR_Opr tmp = FrameMap::as_opr(j_rarg5); #endif // LP64 set_no_result(x); int flags; ciArrayKlass* expected_type; arraycopy_helper(x, &flags, &expected_type); __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint }
// for _ladd, _lmul, _lsub, _ldiv, _lrem void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { // this is a call LIRItem left(x->x(), this); LIRItem right(x->y(), this); right.set_destroys_register(); // actually: only the div_by_zero destroys register left.load_item(); emit()->push_item(left.result()); right.load_item(); emit()->push_item(right.result()); CodeEmitInfo* info = state_for(x); emit()->explicit_div_by_zero_check(right.result(), info); set_with_result_register(x); emit()->arithmetic_call_op(x->op(), norinfo); } else if (x->op() == Bytecodes::_lmul) { // missing test if instr is commutative and if we should swap LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.set_destroys_register(); left.load_item(); right.load_item(); // WARNING: for longs, we must guarantee (algorithmically) that the locked lo result // register is not the same as the left-HI register , otherwise we // would overwrite the results. // set_result(x, FrameMap::_eax_edxRInfo); emit()->arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); } else { // missing test if instr is commutative and if we should swap LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.set_destroys_register(); left.load_item(); right.load_item(); // WARNING: for longs, we must guarantee (algorithmically) that the locked lo result // register is not the same as the left-HI register , otherwise we // would overwrite the results. // RInfo reg = rlock_result(x)->rinfo(); emit()->arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); } }
void LIRGenerator::do_NewInstance(NewInstance* x) { #ifndef PRODUCT if (PrintNotLoaded && !x->klass()->is_loaded()) { tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); } #endif CodeEmitInfo* info = state_for(x, x->state()); LIR_Opr reg = result_register_for(x->type()); LIR_Opr klass_reg = new_register(objectType); new_instance(reg, x->klass(), FrameMap::rcx_oop_opr, FrameMap::rdi_oop_opr, FrameMap::rsi_oop_opr, LIR_OprFact::illegalOpr, FrameMap::rdx_oop_opr, info); LIR_Opr result = rlock_result(x); __ move(reg, result); }
void LIRGenerator::do_NewInstance(NewInstance* x) { // This instruction can be deoptimized in the slow path : use // O0 as result register. const LIR_Opr reg = result_register_for(x->type()); if (PrintNotLoaded && !x->klass()->is_loaded()) { tty->print_cr(" ###class not loaded at new bci %d", x->bci()); } CodeEmitInfo* info = state_for(x, x->state()); LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr; LIR_Opr tmp4 = FrameMap::O1_oop_opr; LIR_Opr klass_reg = FrameMap::G5_oop_opr; new_instance(reg, x->klass(), tmp1, tmp2, tmp3, tmp4, klass_reg, info); LIR_Opr result = rlock_result(x); __ move(reg, result); }
void LIRGenerator::do_InstanceOf(InstanceOf* x) { LIRItem obj(x->obj(), this); // result and test object may not be in same register LIR_Opr reg = rlock_result(x); CodeEmitInfo* patching_info = NULL; if ((!x->klass()->is_loaded() || PatchALot)) { // must do this before locking the destination register as an oop register patching_info = state_for(x, x->state_before()); } obj.load_item(); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; if (!x->klass()->is_loaded() || UseCompressedOops) { tmp3 = new_register(objectType); } __ instanceof(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); }
void LIRGenerator::do_ArrayCopy(Intrinsic* x) { spill_values_on_stack(x->state()); assert(x->number_of_arguments() == 5, "wrong type"); LIRItem src(x->argument_at(0), this); LIRItem src_pos(x->argument_at(1), this); LIRItem dst(x->argument_at(2), this); LIRItem dst_pos(x->argument_at(3), this); LIRItem length(x->argument_at(4), this); src.load_item(); src_pos.load_item(); dst.load_item(); dst_pos.load_item(); length.load_item(); RInfo tmp = new_register(T_INT)->rinfo(); set_no_result(x); CodeEmitInfo* info = state_for(x, x->state()); // we may want to have stack (deoptimization?) emit()->arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, false, NULL, info); // does add_safepoint }
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { // make sure registers are spilled spill_values_on_stack(x->state()); LIRItem length(x->length(), this); length.load_item(); RInfo reg = set_with_result_register(x)->rinfo(); RInfo tmp1; RInfo tmp2; RInfo tmp3; RInfo klassR; { LIRHideReg hr1(this, objectType); tmp1 = hr1.reg(); LIRHideReg hr2(this, objectType); tmp2 = hr2.reg(); LIRHideReg hr3(this, objectType); tmp3 = hr3.reg(); LIRHideReg hr4(this, objectType); klassR = hr4.reg(); } CodeEmitInfo* info = state_for(x, x->state()); emit()->new_type_array(reg, x->elt_type(), length.result(), tmp1, tmp2, tmp3, norinfo, klassR, info); }
void LIRGenerator::do_InstanceOf(InstanceOf* x) { spill_values_on_stack(x->state()); LIRItem obj(x->obj(), this); obj.set_destroys_register(); // result and test object may not be in same register RInfo reg = rlock_result(x)->rinfo(); CodeEmitInfo* patching_info = NULL; if ((!x->klass()->is_loaded() || PatchALot)) { // must do this before locking the destination register as an oop register patching_info = state_for(x, x->state_before()); } obj.load_item(); // do not include tmp in the oop map if (patching_info != NULL) { // object must be part of the oop map patching_info->add_register_oop(obj.result()->rinfo()); } RInfo tmp = new_register(objectType)->rinfo(); emit()->instanceof_op(LIR_OprFact::rinfo(reg, T_OBJECT), obj.result(), x->klass(), tmp, obj.result()->rinfo(), x->direct_compare(), patching_info); }
// for: _iadd, _imul, _isub, _idiv, _irem void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem; LIRItem left(x->x(), this); LIRItem right(x->y(), this); // missing test if instr is commutative and if we should swap right.load_nonconstant(); assert(right.is_constant() || right.is_register(), "wrong state of right"); left.load_item(); rlock_result(x); if (is_div_rem) { CodeEmitInfo* info = state_for(x); LIR_Opr tmp = FrameMap::G1_opr; if (x->op() == Bytecodes::_irem) { __ irem(left.result(), right.result(), x->operand(), tmp, info); } else if (x->op() == Bytecodes::_idiv) { __ idiv(left.result(), right.result(), x->operand(), tmp, info); } } else { arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::G1_opr); } }
void LIRGenerator::do_NewInstance(NewInstance* x) { // make sure registers are spilled spill_values_on_stack(x->state()); RInfo reg = set_with_result_register(x)->rinfo(); RInfo tmp1; RInfo tmp2; RInfo tmp3; RInfo klassR; { LIRHideReg hr1(this, objectType); tmp1 = hr1.reg(); LIRHideReg hr2(this, objectType); tmp2 = hr2.reg(); LIRHideReg hr3(this, objectType); tmp3 = hr3.reg(); LIRHideReg hr4(this, objectType); klassR = hr4.reg(); } if (PrintNotLoaded && !x->klass()->is_loaded()) { tty->print_cr(" ###class not loaded at new bci %d", x->bci()); } CodeEmitInfo* info = state_for(x, x->state()); emit()->new_instance(reg, x->klass(), tmp1, tmp2, tmp3, klassR, info); }
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { LIRItem length(x->length(), this); length.load_item(); LIR_Opr reg = result_register_for(x->type()); LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr; LIR_Opr tmp4 = FrameMap::O1_oop_opr; LIR_Opr klass_reg = FrameMap::G5_oop_opr; LIR_Opr len = length.result(); BasicType elem_type = x->elt_type(); __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg); CodeEmitInfo* info = state_for(x, x->state()); CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); LIR_Opr result = rlock_result(x); __ move(reg, result); }
// for: _iadd, _imul, _isub, _idiv, _irem void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) { if (x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem) { // The requirements for division and modulo // input : rax,: dividend min_int // reg: divisor (may not be rax,/rdx) -1 // // output: rax,: quotient (= rax, idiv reg) min_int // rdx: remainder (= rax, irem reg) 0 // rax, and rdx will be destroyed // Note: does this invalidate the spec ??? LIRItem right(x->y(), this); LIRItem left(x->x() , this); // visit left second, so that the is_register test is valid // call state_for before load_item_force because state_for may // force the evaluation of other instructions that are needed for // correct debug info. Otherwise the live range of the fix // register might be too long. CodeEmitInfo* info = state_for(x); left.load_item_force(divInOpr()); right.load_item(); LIR_Opr result = rlock_result(x); LIR_Opr result_reg; if (x->op() == Bytecodes::_idiv) { result_reg = divOutOpr(); } else { result_reg = remOutOpr(); } if (!ImplicitDiv0Checks) { __ cmp(lir_cond_equal, right.result(), LIR_OprFact::intConst(0)); __ branch(lir_cond_equal, T_INT, new DivByZeroStub(info)); } LIR_Opr tmp = FrameMap::rdx_opr; // idiv and irem use rdx in their implementation if (x->op() == Bytecodes::_irem) { __ irem(left.result(), right.result(), result_reg, tmp, info); } else if (x->op() == Bytecodes::_idiv) { __ idiv(left.result(), right.result(), result_reg, tmp, info); } else { ShouldNotReachHere(); } __ move(result_reg, result); } else { // missing test if instr is commutative and if we should swap LIRItem left(x->x(), this); LIRItem right(x->y(), this); LIRItem* left_arg = &left; LIRItem* right_arg = &right; if (x->is_commutative() && left.is_stack() && right.is_register()) { // swap them if left is real stack (or cached) and right is real register(not cached) left_arg = &right; right_arg = &left; } left_arg->load_item(); // do not need to load right, as we can handle stack and constants if (x->op() == Bytecodes::_imul ) { // check if we can use shift instead bool use_constant = false; bool use_tmp = false; if (right_arg->is_constant()) { int iconst = right_arg->get_jint_constant(); if (iconst > 0) { if (is_power_of_2(iconst)) { use_constant = true; } else if (is_power_of_2(iconst - 1) || is_power_of_2(iconst + 1)) { use_constant = true; use_tmp = true; } } } if (use_constant) { right_arg->dont_load_item(); } else { right_arg->load_item(); } LIR_Opr tmp = LIR_OprFact::illegalOpr; if (use_tmp) { tmp = new_register(T_INT); } rlock_result(x); arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); } else { right_arg->dont_load_item(); rlock_result(x); LIR_Opr tmp = LIR_OprFact::illegalOpr; arithmetic_op_int(x->op(), x->operand(), left_arg->result(), right_arg->result(), tmp); } } }
// for _ladd, _lmul, _lsub, _ldiv, _lrem void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { // long division is implemented as a direct call into the runtime LIRItem left(x->x(), this); LIRItem right(x->y(), this); // the check for division by zero destroys the right operand right.set_destroys_register(); BasicTypeList signature(2); signature.append(T_LONG); signature.append(T_LONG); CallingConvention* cc = frame_map()->c_calling_convention(&signature); // check for division by zero (destroys registers of right operand!) CodeEmitInfo* info = state_for(x); const LIR_Opr result_reg = result_register_for(x->type()); left.load_item_force(cc->at(1)); right.load_item(); __ move(right.result(), cc->at(0)); __ cmp(lir_cond_equal, right.result(), LIR_OprFact::longConst(0)); __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); address entry; switch (x->op()) { case Bytecodes::_lrem: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); break; // check if dividend is 0 is done elsewhere case Bytecodes::_ldiv: entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); break; // check if dividend is 0 is done elsewhere case Bytecodes::_lmul: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); break; default: ShouldNotReachHere(); } LIR_Opr result = rlock_result(x); __ call_runtime_leaf(entry, getThreadTemp(), result_reg, cc->args()); __ move(result_reg, result); } else if (x->op() == Bytecodes::_lmul) { // missing test if instr is commutative and if we should swap LIRItem left(x->x(), this); LIRItem right(x->y(), this); // right register is destroyed by the long mul, so it must be // copied to a new register. right.set_destroys_register(); left.load_item(); right.load_item(); LIR_Opr reg = FrameMap::long0_opr; arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL); LIR_Opr result = rlock_result(x); __ move(reg, result); } else { // missing test if instr is commutative and if we should swap LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); // don't load constants to save register right.load_nonconstant(); rlock_result(x); arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); } }