// Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval() void LIRGenerator::do_IfOp(IfOp* x) { #ifdef ASSERT { ValueTag xtag = x->x()->type()->tag(); ValueTag ttag = x->tval()->type()->tag(); assert(xtag == intTag || xtag == objectTag, "cannot handle others"); assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others"); assert(ttag == x->fval()->type()->tag(), "cannot handle others"); } #endif LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_item(); emit()->ifop_phase1(x->cond(), left.result(), right.result()); LIRItem t_val(x->tval(), this); LIRItem f_val(x->fval(), this); t_val.dont_load_item(); f_val.dont_load_item(); RInfo reg; if (x->fval()->type()->tag() == longTag) { // must lock before releasing reg = rlock_result(x)->rinfo(); } if (x->fval()->type()->tag() != longTag) { reg = rlock_result(x)->rinfo(); } emit()->ifop_phase2(reg, t_val.result(), f_val.result(), x->cond()); }
// for _fadd, _fmul, _fsub, _fdiv, _frem // _dadd, _dmul, _dsub, _ddiv, _drem void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { // Because of the way we simulate FPU register location in code emitter, we must // mark both items as being destroyed(i.e., they are going to be removed from the FPU stack) LIRItem left(x->x(), this); LIRItem right(x->y(), this); right.set_destroys_register(); left.set_destroys_register(); LIRItem* left_arg = &left; LIRItem* right_arg = &right; if (x->is_commutative() && left.is_stack() && right.is_register()) { // swap them if left is real stack (or cached) and right is real register(not cached) left_arg = &right; right_arg = &left; } left_arg->load_item(); // do not need to load right, as we can handle stack and constants if ( x->y()->type()->is_constant() || x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { // cannot handle inlined constants right_arg->load_item(); } else { right_arg->dont_load_item(); } RInfo reg = rlock_result(x)->rinfo(); emit()->arithmetic_op_fpu(x->op(), x->operand(), left_arg->result(), right_arg->result(), x->is_strictfp()); round_item(x->operand()); }
void LIRGenerator::do_CheckCast(CheckCast* x) { // all values are spilled spill_values_on_stack(x->state()); LIRItem obj(x->obj(), this); obj.set_destroys_register(); CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (the latter is for deoptimization) patching_info = state_for(x, x->state_before()); } obj.load_item(); RInfo in_reg = obj.result()->rinfo(); // info for exceptions CodeEmitInfo* info_for_exception = state_for(x, x->state()->copy_locks()); RInfo reg = rlock_result(x)->rinfo(); RInfo tmp1 = new_register(objectType)->rinfo(); RInfo tmp2 = new_register(objectType)->rinfo(); if (patching_info != NULL) { patching_info->add_register_oop(in_reg); } emit()->checkcast_op(LIR_OprFact::rinfo(reg, T_OBJECT), obj.result(), x->klass(), tmp1, tmp2, x->direct_compare(), info_for_exception, patching_info); }
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f // _i2b, _i2c, _i2s void LIRGenerator::do_Convert(Convert* x) { if (x->op() == Bytecodes::_f2i || x->op() == Bytecodes::_d2i) { LIRItem value(x->value(), this); value.set_destroys_register(); value.load_item(); RInfo reg = set_with_result_register(x)->rinfo(); emit()->convert_op(x->op(), value.result(), reg, is_32bit_mode()); } else { LIRItem value(x->value(), this); value.handle_float_kind(); if (value.is_constant()) { value.load_item(); } else if (x->op() != Bytecodes::_i2f && x->op() != Bytecodes::_i2d && x->op() != Bytecodes::_l2f && x->op() != Bytecodes::_l2d) { value.load_item(); } else { value.dont_load_item(); } RInfo reg; if (x->op() == Bytecodes::_f2l || x->op() == Bytecodes::_d2l) { reg = FrameMap::_eax_edxRInfo; set_result(x, reg); } else { reg = rlock_result(x)->rinfo(); } emit()->convert_op(x->op(), value.result(), reg); round_item(x->operand()); } }
void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { LIRItem length(x->length(), this); // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction // and therefore provide the state before the parameters have been consumed CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); } CodeEmitInfo* info = state_for(x, x->state()); const LIR_Opr reg = result_register_for(x->type()); LIR_Opr tmp1 = FrameMap::rcx_oop_opr; LIR_Opr tmp2 = FrameMap::rsi_oop_opr; LIR_Opr tmp3 = FrameMap::rdi_oop_opr; LIR_Opr tmp4 = reg; LIR_Opr klass_reg = FrameMap::rdx_oop_opr; length.load_item_force(FrameMap::rbx_opr); LIR_Opr len = length.result(); CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass()); if (obj == ciEnv::unloaded_ciobjarrayklass()) { BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error"); } jobject2reg_with_patching(klass_reg, obj, patching_info); __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path); LIR_Opr result = rlock_result(x); __ move(reg, result); }
void LIRGenerator::do_CheckCast(CheckCast* x) { LIRItem obj(x->obj(), this); CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (the latter is for deoptimization) patching_info = state_for(x, x->state_before()); } obj.load_item(); // info for exceptions CodeEmitInfo* info_for_exception = state_for(x); CodeStub* stub; if (x->is_incompatible_class_change_check()) { assert(patching_info == NULL, "can't patch this"); stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else { stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr reg = rlock_result(x); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; if (!x->klass()->is_loaded() || UseCompressedOops) { tmp3 = new_register(objectType); } __ checkcast(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci()); }
// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr void LIRGenerator::do_ShiftOp(ShiftOp* x) { // count must always be in ecx LIRItem value(x->x(), this); LIRItem count(x->y(), this); value.set_destroys_register(); ValueTag elemType = x->type()->tag(); bool must_load_count = !count.is_constant() || elemType == longTag; if (must_load_count) { // count for long must be in register count.load_item(); } else { count.dont_load_item(); } value.load_item(); RInfo tmp; if (elemType == intTag && count.is_register()) { // in case we cache the count, we want always to have a register free // (without caching, the count may be loaded in ecx; with the caching count, // the count register may not be ecx tmp = new_register(T_INT)->rinfo(); } RInfo reg = rlock_result(x)->rinfo(); emit()->shift_op(x->op(), reg, value.result(), count.result(), tmp); }
void LIRGenerator::do_AttemptUpdate(Intrinsic* x) { assert(x->number_of_arguments() == 3, "wrong type"); LIRItem obj (x->argument_at(0), this); // AtomicLong object LIRItem cmp_value (x->argument_at(1), this); // value to compare with field LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value // compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction cmp_value.load_item_force(FrameMap::long0_opr); // new value must be in rcx,ebx (hi,lo) new_value.load_item_force(FrameMap::long1_opr); // object pointer register is overwritten with field address obj.load_item(); // generate compare-and-swap; produces zero condition if swap occurs int value_offset = sun_misc_AtomicLongCSImpl::value_offset(); LIR_Opr addr = new_pointer_register(); __ leal(LIR_OprFact::address(new LIR_Address(obj.result(), value_offset, T_LONG)), addr); LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2); // generate conditional move of boolean result LIR_Opr result = rlock_result(x); __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG); }
void LIRGenerator::do_CheckCast(CheckCast* x) { LIRItem obj(x->obj(), this); CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) { // must do this before locking the destination register as an oop register, // and before the obj is loaded (so x->obj()->item() is valid for creating a debug info location) patching_info = state_for(x, x->state_before()); } obj.load_item(); LIR_Opr out_reg = rlock_result(x); CodeStub* stub; CodeEmitInfo* info_for_exception = state_for(x, x->state()->copy_locks()); if (x->is_incompatible_class_change_check()) { assert(patching_info == NULL, "can't patch this"); stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception); } else { stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); } LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr; __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(), info_for_exception, patching_info, stub, x->profiled_method(), x->profiled_bci()); }
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { Values* dims = x->dims(); int i = dims->length(); LIRItemList* items = new LIRItemList(dims->length(), NULL); while (i-- > 0) { LIRItem* size = new LIRItem(dims->at(i), this); items->at_put(i, size); } // need to get the info before, as the items may become invalid through item_free CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); // cannot re-use same xhandlers for multiple CodeEmitInfos, so // clone all handlers x->set_exception_handlers(new XHandlers(x->exception_handlers())); } i = dims->length(); while (i-- > 0) { LIRItem* size = items->at(i); // if a patching_info was generated above then debug information for the state before // the call is going to be emitted. The LIRGenerator calls above may have left some values // in registers and that's been recorded in the CodeEmitInfo. In that case the items // for those values can't simply be freed if they are registers because the values // might be destroyed by store_stack_parameter. So in the case of patching, delay the // freeing of the items that already were in registers size->load_item(); store_stack_parameter (size->result(), in_ByteSize(STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize + i * sizeof(jint))); } // This instruction can be deoptimized in the slow path : use // O0 as result register. const LIR_Opr reg = result_register_for(x->type()); CodeEmitInfo* info = state_for(x, x->state()); jobject2reg_with_patching(reg, x->klass(), patching_info); LIR_Opr rank = FrameMap::O1_opr; __ move(LIR_OprFact::intConst(x->rank()), rank); LIR_Opr varargs = FrameMap::as_pointer_opr(O2); int offset_from_sp = (frame::memory_parameter_word_sp_offset * wordSize) + STACK_BIAS; __ add(FrameMap::SP_opr, LIR_OprFact::intptrConst(offset_from_sp), varargs); LIR_OprList* args = new LIR_OprList(3); args->append(reg); args->append(rank); args->append(varargs); __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); LIR_Opr result = rlock_result(x); __ move(reg, result); }
// _ineg, _lneg, _fneg, _dneg void LIRGenerator::do_NegateOp(NegateOp* x) { LIRItem value(x->x(), this); value.set_destroys_register(); value.load_item(); RInfo reg = rlock_result(x)->rinfo(); emit()->negate(reg, value.result()); round_item(x->operand()); }
// _iand, _land, _ior, _lor, _ixor, _lxor void LIRGenerator::do_LogicOp(LogicOp* x) { LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_nonconstant(); LIR_Opr reg = rlock_result(x); logic_op(x->op(), reg, left.result(), right.result()); }
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { assert(x->number_of_arguments() == 1, "wrong type"); LIRItem value(x->argument_at(0), this); value.set_destroys_register(); value.load_item(); RInfo reg = rlock_result(x)->rinfo(); emit()->math_intrinsic(x->id(), reg, value.result()); round_item(x->operand()); }
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { switch (x->id()) { case vmIntrinsics::_dabs: case vmIntrinsics::_dsqrt: { assert(x->number_of_arguments() == 1, "wrong type"); LIRItem value(x->argument_at(0), this); value.load_item(); LIR_Opr dst = rlock_result(x); switch (x->id()) { case vmIntrinsics::_dsqrt: { __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr); break; } case vmIntrinsics::_dabs: { __ abs(value.result(), dst, LIR_OprFact::illegalOpr); break; } } break; } case vmIntrinsics::_dlog10: // fall through case vmIntrinsics::_dlog: // fall through case vmIntrinsics::_dsin: // fall through case vmIntrinsics::_dtan: // fall through case vmIntrinsics::_dcos: { assert(x->number_of_arguments() == 1, "wrong type"); address runtime_entry = NULL; switch (x->id()) { case vmIntrinsics::_dsin: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break; case vmIntrinsics::_dcos: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break; case vmIntrinsics::_dtan: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break; case vmIntrinsics::_dlog: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog); break; case vmIntrinsics::_dlog10: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); break; default: ShouldNotReachHere(); } LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL); set_result(x, result); } } }
// _iand, _land, _ior, _lor, _ixor, _lxor void LIRGenerator::do_LogicOp(LogicOp* x) { // missing test if instr is commutative and if we should swap LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.set_destroys_register(); left.load_item(); right.load_nonconstant(); RInfo reg = rlock_result(x)->rinfo(); emit()->logic_op(x->op(), reg, left.result(), right.result()); }
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { assert(x->number_of_arguments() == 1, "wrong type"); LIRItem value(x->argument_at(0), this); bool use_fpu = false; if (UseSSE >= 2) { switch(x->id()) { case vmIntrinsics::_dsin: case vmIntrinsics::_dcos: case vmIntrinsics::_dtan: case vmIntrinsics::_dlog: case vmIntrinsics::_dlog10: use_fpu = true; } } else { value.set_destroys_register(); } value.load_item(); LIR_Opr calc_input = value.result(); LIR_Opr calc_result = rlock_result(x); // sin and cos need two free fpu stack slots, so register two temporary operands LIR_Opr tmp1 = FrameMap::caller_save_fpu_reg_at(0); LIR_Opr tmp2 = FrameMap::caller_save_fpu_reg_at(1); if (use_fpu) { LIR_Opr tmp = FrameMap::fpu0_double_opr; __ move(calc_input, tmp); calc_input = tmp; calc_result = tmp; tmp1 = FrameMap::caller_save_fpu_reg_at(1); tmp2 = FrameMap::caller_save_fpu_reg_at(2); } switch(x->id()) { case vmIntrinsics::_dabs: __ abs (calc_input, calc_result, LIR_OprFact::illegalOpr); break; case vmIntrinsics::_dsqrt: __ sqrt (calc_input, calc_result, LIR_OprFact::illegalOpr); break; case vmIntrinsics::_dsin: __ sin (calc_input, calc_result, tmp1, tmp2); break; case vmIntrinsics::_dcos: __ cos (calc_input, calc_result, tmp1, tmp2); break; case vmIntrinsics::_dtan: __ tan (calc_input, calc_result, tmp1, tmp2); break; case vmIntrinsics::_dlog: __ log (calc_input, calc_result, tmp1); break; case vmIntrinsics::_dlog10: __ log10(calc_input, calc_result, tmp1); break; default: ShouldNotReachHere(); } if (use_fpu) { __ move(calc_result, x->operand()); } }
// for _ladd, _lmul, _lsub, _ldiv, _lrem void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { switch (x->op()) { case Bytecodes::_lrem: case Bytecodes::_lmul: case Bytecodes::_ldiv: { if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { LIRItem right(x->y(), this); right.load_item(); CodeEmitInfo* info = state_for(x); LIR_Opr item = right.result(); assert(item->is_register(), "must be"); __ cmp(lir_cond_equal, item, LIR_OprFact::longConst(0)); __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); } address entry; switch (x->op()) { case Bytecodes::_lrem: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); break; // check if dividend is 0 is done elsewhere case Bytecodes::_ldiv: entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); break; // check if dividend is 0 is done elsewhere case Bytecodes::_lmul: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); break; default: ShouldNotReachHere(); } // order of arguments to runtime call is reversed. LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL); set_result(x, result); break; } case Bytecodes::_ladd: case Bytecodes::_lsub: { LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_item(); rlock_result(x); arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); break; } default: ShouldNotReachHere(); } }
void LIRGenerator::do_InstanceOf(InstanceOf* x) { LIRItem obj(x->obj(), this); CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); } // ensure the result register is not the input register because the result is initialized before the patching safepoint obj.load_item(); LIR_Opr out_reg = rlock_result(x); LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr; __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3, x->direct_compare(), patching_info); }
void LIRGenerator::do_NewMultiArray(NewMultiArray* x) { Values* dims = x->dims(); int i = dims->length(); LIRItemList* items = new LIRItemList(dims->length(), NULL); while (i-- > 0) { LIRItem* size = new LIRItem(dims->at(i), this); items->at_put(i, size); } // Evaluate state_for early since it may emit code. CodeEmitInfo* patching_info = NULL; if (!x->klass()->is_loaded() || PatchALot) { patching_info = state_for(x, x->state_before()); // cannot re-use same xhandlers for multiple CodeEmitInfos, so // clone all handlers. This is handled transparently in other // places by the CodeEmitInfo cloning logic but is handled // specially here because a stub isn't being used. x->set_exception_handlers(new XHandlers(x->exception_handlers())); } CodeEmitInfo* info = state_for(x, x->state()); i = dims->length(); while (i-- > 0) { LIRItem* size = items->at(i); size->load_nonconstant(); store_stack_parameter(size->result(), in_ByteSize(i*4)); } LIR_Opr reg = result_register_for(x->type()); jobject2reg_with_patching(reg, x->klass(), patching_info); LIR_Opr rank = FrameMap::rbx_opr; __ move(LIR_OprFact::intConst(x->rank()), rank); LIR_Opr varargs = FrameMap::rcx_opr; __ move(FrameMap::rsp_opr, varargs); LIR_OprList* args = new LIR_OprList(3); args->append(reg); args->append(rank); args->append(varargs); __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id), LIR_OprFact::illegalOpr, reg, args, info); LIR_Opr result = rlock_result(x); __ move(reg, result); }
// _iand, _land, _ior, _lor, _ixor, _lxor void LIRGenerator::do_LogicOp(LogicOp* x) { // when an operand with use count 1 is the left operand, then it is // likely that no move for 2-operand-LIR-form is necessary if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) { x->swap_operands(); } LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_nonconstant(); LIR_Opr reg = rlock_result(x); logic_op(x->op(), reg, left.result(), right.result()); }
// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg void LIRGenerator::do_CompareOp(CompareOp* x) { LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_item(); LIR_Opr reg = rlock_result(x); if (x->x()->type()->is_float_kind()) { Bytecodes::Code code = x->op(); __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl)); } else if (x->x()->type()->tag() == longTag) { __ lcmp2int(left.result(), right.result(), reg); } else { Unimplemented(); } }
// for _ladd, _lmul, _lsub, _ldiv, _lrem void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem ) { // this is a call LIRItem left(x->x(), this); LIRItem right(x->y(), this); right.set_destroys_register(); // actually: only the div_by_zero destroys register left.load_item(); emit()->push_item(left.result()); right.load_item(); emit()->push_item(right.result()); CodeEmitInfo* info = state_for(x); emit()->explicit_div_by_zero_check(right.result(), info); set_with_result_register(x); emit()->arithmetic_call_op(x->op(), norinfo); } else if (x->op() == Bytecodes::_lmul) { // missing test if instr is commutative and if we should swap LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.set_destroys_register(); left.load_item(); right.load_item(); // WARNING: for longs, we must guarantee (algorithmically) that the locked lo result // register is not the same as the left-HI register , otherwise we // would overwrite the results. // set_result(x, FrameMap::_eax_edxRInfo); emit()->arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); } else { // missing test if instr is commutative and if we should swap LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.set_destroys_register(); left.load_item(); right.load_item(); // WARNING: for longs, we must guarantee (algorithmically) that the locked lo result // register is not the same as the left-HI register , otherwise we // would overwrite the results. // RInfo reg = rlock_result(x)->rinfo(); emit()->arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); } }
// _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg void LIRGenerator::do_CompareOp(CompareOp* x) { LIRItem left(x->x(), this); LIRItem right(x->y(), this); ValueTag tag = x->x()->type()->tag(); if (tag == floatTag || tag == doubleTag) { left.set_destroys_register(); right.set_destroys_register(); } else { if (tag == longTag) { left.set_destroys_register(); } } left.load_item(); right.load_item(); RInfo reg = rlock_result(x)->rinfo(); emit()->compare_op(x->op(), reg, left.result(), right.result()); }
void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { assert(x->number_of_arguments() == 4, "wrong type"); LIRItem obj (x->argument_at(0), this); // object LIRItem offset(x->argument_at(1), this); // offset of field LIRItem cmp (x->argument_at(2), this); // value to compare with field LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp // Use temps to avoid kills LIR_Opr t1 = FrameMap::G1_opr; LIR_Opr t2 = FrameMap::G3_opr; LIR_Opr addr = new_pointer_register(); // get address of field obj.load_item(); offset.load_item(); cmp.load_item(); val.load_item(); __ add(obj.result(), offset.result(), addr); if (type == objectType) { // Write-barrier needed for Object fields. pre_barrier(obj.result(), false, NULL); } if (type == objectType) __ cas_obj(addr, cmp.result(), val.result(), t1, t2); else if (type == intType) __ cas_int(addr, cmp.result(), val.result(), t1, t2); else if (type == longType) __ cas_long(addr, cmp.result(), val.result(), t1, t2); else { ShouldNotReachHere(); } // generate conditional move of boolean result LIR_Opr result = rlock_result(x); __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result); if (type == objectType) { // Write-barrier needed for Object fields. #ifdef PRECISE_CARDMARK post_barrier(addr, val.result()); #else post_barrier(obj.result(), val.result()); #endif // PRECISE_CARDMARK } }
void LIRGenerator::do_NewInstance(NewInstance* x) { #ifndef PRODUCT if (PrintNotLoaded && !x->klass()->is_loaded()) { tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci()); } #endif CodeEmitInfo* info = state_for(x, x->state()); LIR_Opr reg = result_register_for(x->type()); LIR_Opr klass_reg = new_register(objectType); new_instance(reg, x->klass(), FrameMap::rcx_oop_opr, FrameMap::rdi_oop_opr, FrameMap::rsi_oop_opr, LIR_OprFact::illegalOpr, FrameMap::rdx_oop_opr, info); LIR_Opr result = rlock_result(x); __ move(reg, result); }
// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr void LIRGenerator::do_ShiftOp(ShiftOp* x) { // count must always be in rcx LIRItem value(x->x(), this); LIRItem count(x->y(), this); ValueTag elemType = x->type()->tag(); bool must_load_count = !count.is_constant() || elemType == longTag; if (must_load_count) { // count for long must be in register count.load_item_force(shiftCountOpr()); } else { count.dont_load_item(); } value.load_item(); LIR_Opr reg = rlock_result(x); shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr); }
void LIRGenerator::do_NewInstance(NewInstance* x) { // This instruction can be deoptimized in the slow path : use // O0 as result register. const LIR_Opr reg = result_register_for(x->type()); if (PrintNotLoaded && !x->klass()->is_loaded()) { tty->print_cr(" ###class not loaded at new bci %d", x->bci()); } CodeEmitInfo* info = state_for(x, x->state()); LIR_Opr tmp1 = FrameMap::G1_oop_opr; LIR_Opr tmp2 = FrameMap::G3_oop_opr; LIR_Opr tmp3 = FrameMap::G4_oop_opr; LIR_Opr tmp4 = FrameMap::O1_oop_opr; LIR_Opr klass_reg = FrameMap::G5_oop_opr; new_instance(reg, x->klass(), tmp1, tmp2, tmp3, tmp4, klass_reg, info); LIR_Opr result = rlock_result(x); __ move(reg, result); }
void LIRGenerator::do_InstanceOf(InstanceOf* x) { LIRItem obj(x->obj(), this); // result and test object may not be in same register LIR_Opr reg = rlock_result(x); CodeEmitInfo* patching_info = NULL; if ((!x->klass()->is_loaded() || PatchALot)) { // must do this before locking the destination register as an oop register patching_info = state_for(x, x->state_before()); } obj.load_item(); LIR_Opr tmp3 = LIR_OprFact::illegalOpr; if (!x->klass()->is_loaded() || UseCompressedOops) { tmp3 = new_register(objectType); } __ instanceof(reg, obj.result(), x->klass(), new_register(objectType), new_register(objectType), tmp3, x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); }
// _ishl, _lshl, _ishr, _lshr, _iushr, _lushr void LIRGenerator::do_ShiftOp(ShiftOp* x) { LIRItem value(x->x(), this); LIRItem count(x->y(), this); // Long shift destroys count register if (value.type()->is_long()) { count.set_destroys_register(); } value.load_item(); // the old backend doesn't support this if (count.is_constant() && count.type()->as_IntConstant() != NULL && value.type()->is_int()) { jint c = count.get_jint_constant() & 0x1f; assert(c >= 0 && c < 32, "should be small"); count.dont_load_item(); } else { count.load_item(); } LIR_Opr reg = rlock_result(x); shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr); }
void LIRGenerator::do_InstanceOf(InstanceOf* x) { spill_values_on_stack(x->state()); LIRItem obj(x->obj(), this); obj.set_destroys_register(); // result and test object may not be in same register RInfo reg = rlock_result(x)->rinfo(); CodeEmitInfo* patching_info = NULL; if ((!x->klass()->is_loaded() || PatchALot)) { // must do this before locking the destination register as an oop register patching_info = state_for(x, x->state_before()); } obj.load_item(); // do not include tmp in the oop map if (patching_info != NULL) { // object must be part of the oop map patching_info->add_register_oop(obj.result()->rinfo()); } RInfo tmp = new_register(objectType)->rinfo(); emit()->instanceof_op(LIR_OprFact::rinfo(reg, T_OBJECT), obj.result(), x->klass(), tmp, obj.result()->rinfo(), x->direct_compare(), patching_info); }