void LIRGenerator::invoke_do_result(Invoke* x, bool needs_null_check, LIR_Opr receiver) { // setup result register if (x->type()->is_void()) { set_no_result(x); } else { RInfo reg = set_with_result_register(x)->rinfo(); } // emit invoke code CodeEmitInfo* info = state_for(x, x->state()); bool optimized = x->target_is_loaded() && x->target_is_final(); // The current backend doesn't support vtable calls, so pass -1 // instead of x->vtable_index(); emit()->call_op(x->code(), NULL, -1, info, optimized, needs_null_check, receiverRInfo(), x->operand()); // does add_safepoint if (x->type()->is_float() || x->type()->is_double()) { emit()->set_fpu_result(x->operand()->rinfo()); // Force rounding of results from non-strictfp when in strictfp // scope (or when we don't know the strictness of the callee, to // be safe.) if (method()->is_strict()) { if (!x->target_is_loaded() || !x->target_is_strictfp()) { round_item(x->operand()); } } } }
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f // _i2b, _i2c, _i2s void LIRGenerator::do_Convert(Convert* x) { if (x->op() == Bytecodes::_f2i || x->op() == Bytecodes::_d2i) { LIRItem value(x->value(), this); value.set_destroys_register(); value.load_item(); RInfo reg = set_with_result_register(x)->rinfo(); emit()->convert_op(x->op(), value.result(), reg, is_32bit_mode()); } else { LIRItem value(x->value(), this); value.handle_float_kind(); if (value.is_constant()) { value.load_item(); } else if (x->op() != Bytecodes::_i2f && x->op() != Bytecodes::_i2d && x->op() != Bytecodes::_l2f && x->op() != Bytecodes::_l2d) { value.load_item(); } else { value.dont_load_item(); } RInfo reg; if (x->op() == Bytecodes::_f2l || x->op() == Bytecodes::_d2l) { reg = FrameMap::_eax_edxRInfo; set_result(x, reg); } else { reg = rlock_result(x)->rinfo(); } emit()->convert_op(x->op(), value.result(), reg); round_item(x->operand()); } }
// for _fadd, _fmul, _fsub, _fdiv, _frem // _dadd, _dmul, _dsub, _ddiv, _drem void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { // Because of the way we simulate FPU register location in code emitter, we must // mark both items as being destroyed(i.e., they are going to be removed from the FPU stack) LIRItem left(x->x(), this); LIRItem right(x->y(), this); right.set_destroys_register(); left.set_destroys_register(); LIRItem* left_arg = &left; LIRItem* right_arg = &right; if (x->is_commutative() && left.is_stack() && right.is_register()) { // swap them if left is real stack (or cached) and right is real register(not cached) left_arg = &right; right_arg = &left; } left_arg->load_item(); // do not need to load right, as we can handle stack and constants if ( x->y()->type()->is_constant() || x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem) { // cannot handle inlined constants right_arg->load_item(); } else { right_arg->dont_load_item(); } RInfo reg = rlock_result(x)->rinfo(); emit()->arithmetic_op_fpu(x->op(), x->operand(), left_arg->result(), right_arg->result(), x->is_strictfp()); round_item(x->operand()); }
// _ineg, _lneg, _fneg, _dneg void LIRGenerator::do_NegateOp(NegateOp* x) { LIRItem value(x->x(), this); value.set_destroys_register(); value.load_item(); LIR_Opr reg = rlock(x); __ negate(value.result(), reg); set_result(x, round_item(reg)); }
// _ineg, _lneg, _fneg, _dneg void LIRGenerator::do_NegateOp(NegateOp* x) { LIRItem value(x->x(), this); value.set_destroys_register(); value.load_item(); RInfo reg = rlock_result(x)->rinfo(); emit()->negate(reg, value.result()); round_item(x->operand()); }
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) { assert(x->number_of_arguments() == 1, "wrong type"); LIRItem value(x->argument_at(0), this); value.set_destroys_register(); value.load_item(); RInfo reg = rlock_result(x)->rinfo(); emit()->math_intrinsic(x->id(), reg, value.result()); round_item(x->operand()); }
// for _fadd, _fmul, _fsub, _fdiv, _frem // _dadd, _dmul, _dsub, _ddiv, _drem void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) { LIRItem left(x->x(), this); LIRItem right(x->y(), this); LIRItem* left_arg = &left; LIRItem* right_arg = &right; assert(!left.is_stack() || !right.is_stack(), "can't both be memory operands"); bool must_load_both = (x->op() == Bytecodes::_frem || x->op() == Bytecodes::_drem); if (left.is_register() || x->x()->type()->is_constant() || must_load_both) { left.load_item(); } else { left.dont_load_item(); } // do not load right operand if it is a constant. only 0 and 1 are // loaded because there are special instructions for loading them // without memory access (not needed for SSE2 instructions) bool must_load_right = false; if (right.is_constant()) { LIR_Const* c = right.result()->as_constant_ptr(); assert(c != NULL, "invalid constant"); assert(c->type() == T_FLOAT || c->type() == T_DOUBLE, "invalid type"); if (c->type() == T_FLOAT) { must_load_right = UseSSE < 1 && (c->is_one_float() || c->is_zero_float()); } else { must_load_right = UseSSE < 2 && (c->is_one_double() || c->is_zero_double()); } } if (must_load_both) { // frem and drem destroy also right operand, so move it to a new register right.set_destroys_register(); right.load_item(); } else if (right.is_register() || must_load_right) { right.load_item(); } else { right.dont_load_item(); } LIR_Opr reg = rlock(x); LIR_Opr tmp = LIR_OprFact::illegalOpr; if (x->is_strictfp() && (x->op() == Bytecodes::_dmul || x->op() == Bytecodes::_ddiv)) { tmp = new_register(T_DOUBLE); } if ((UseSSE >= 1 && x->op() == Bytecodes::_frem) || (UseSSE >= 2 && x->op() == Bytecodes::_drem)) { // special handling for frem and drem: no SSE instruction, so must use FPU with temporary fpu stack slots LIR_Opr fpu0, fpu1; if (x->op() == Bytecodes::_frem) { fpu0 = LIR_OprFact::single_fpu(0); fpu1 = LIR_OprFact::single_fpu(1); } else { fpu0 = LIR_OprFact::double_fpu(0); fpu1 = LIR_OprFact::double_fpu(1); } __ move(right.result(), fpu1); // order of left and right operand is important! __ move(left.result(), fpu0); __ rem (fpu0, fpu1, fpu0); __ move(fpu0, reg); } else { arithmetic_op_fpu(x->op(), reg, left.result(), right.result(), x->is_strictfp(), tmp); } set_result(x, round_item(reg)); }