void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { assert(x->is_pinned(),""); LIRItem obj(x->obj(), this); obj.load_item(); set_no_result(x); // "lock" stores the address of the monitor stack slot, so this is not an oop LIR_Opr lock = new_register(T_INT); // Need a scratch register for biased locking on x86 LIR_Opr scratch = LIR_OprFact::illegalOpr; if (UseBiasedLocking) { scratch = new_register(T_INT); } CodeEmitInfo* info_for_exception = NULL; if (x->needs_null_check()) { info_for_exception = state_for(x); } // this CodeEmitInfo must not have the xhandlers because here the // object is already locked (xhandlers expect object to be unlocked) CodeEmitInfo* info = state_for(x, x->state(), true); monitor_enter(obj.result(), lock, syncTempOpr(), scratch, x->monitor_no(), info_for_exception, info); }
void LIRGenerator::invoke_do_result(Invoke* x, bool needs_null_check, LIR_Opr receiver) { // setup result register if (x->type()->is_void()) { set_no_result(x); } else { RInfo reg = set_with_result_register(x)->rinfo(); } // emit invoke code CodeEmitInfo* info = state_for(x, x->state()); bool optimized = x->target_is_loaded() && x->target_is_final(); // The current backend doesn't support vtable calls, so pass -1 // instead of x->vtable_index(); emit()->call_op(x->code(), NULL, -1, info, optimized, needs_null_check, receiverRInfo(), x->operand()); // does add_safepoint if (x->type()->is_float() || x->type()->is_double()) { emit()->set_fpu_result(x->operand()->rinfo()); // Force rounding of results from non-strictfp when in strictfp // scope (or when we don't know the strictness of the callee, to // be safe.) if (method()->is_strict()) { if (!x->target_is_loaded() || !x->target_is_strictfp()) { round_item(x->operand()); } } } }
void LIRGenerator::do_ArrayCopy(Intrinsic* x) { assert(x->number_of_arguments() == 5, "wrong type"); // Note: spill caller save before setting the item LIRItem src (x->argument_at(0), this); LIRItem src_pos (x->argument_at(1), this); LIRItem dst (x->argument_at(2), this); LIRItem dst_pos (x->argument_at(3), this); LIRItem length (x->argument_at(4), this); // load all values in callee_save_registers, as this makes the // parameter passing to the fast case simpler src.load_item_force (rlock_callee_saved(T_OBJECT)); src_pos.load_item_force (rlock_callee_saved(T_INT)); dst.load_item_force (rlock_callee_saved(T_OBJECT)); dst_pos.load_item_force (rlock_callee_saved(T_INT)); length.load_item_force (rlock_callee_saved(T_INT)); int flags; ciArrayKlass* expected_type; arraycopy_helper(x, &flags, &expected_type); CodeEmitInfo* info = state_for(x, x->state()); __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), rlock_callee_saved(T_INT), expected_type, flags, info); set_no_result(x); }
void LIRGenerator::do_If(If* x) { assert(x->number_of_sux() == 2, "inconsistency"); ValueTag tag = x->x()->type()->tag(); bool is_safepoint = x->is_safepoint(); If::Condition cond = x->cond(); LIRItem xitem(x->x(), this); LIRItem yitem(x->y(), this); LIRItem* xin = &xitem; LIRItem* yin = &yitem; if (tag == longTag) { // for longs, only conditions "eql", "neq", "lss", "geq" are valid; // mirror for other conditions if (cond == If::gtr || cond == If::leq) { cond = Instruction::mirror(cond); xin = &yitem; yin = &xitem; } xin->set_destroys_register(); } xin->load_item(); if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { // inline long zero yin->dont_load_item(); } else if (tag == longTag || tag == floatTag || tag == doubleTag) { // longs cannot handle constants at right side yin->load_item(); } else { yin->dont_load_item(); } // add safepoint before generating condition code so it can be recomputed if (x->is_safepoint()) { // increment backedge counter if needed increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); } set_no_result(x); LIR_Opr left = xin->result(); LIR_Opr right = yin->result(); __ cmp(lir_cond(cond), left, right); // Generate branch profiling. Profiling code doesn't kill flags. profile_branch(x, cond); move_to_phi(x->state()); if (x->x()->type()->is_float_kind()) { __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); } else { __ branch(lir_cond(cond), right->type(), x->tsux()); } assert(x->default_sux() == x->fsux(), "wrong destination above"); __ jump(x->default_sux()); }
void LIRGenerator::do_MonitorExit(MonitorExit* x) { assert(x->is_pinned(),""); LIRItem obj(x->obj(), this); obj.dont_load_item(); LIR_Opr lock = new_register(T_INT); LIR_Opr obj_temp = new_register(T_INT); set_no_result(x); monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no()); }
void LIRGenerator::do_MonitorExit(MonitorExit* x) { assert(x->is_root(),""); LIRItem obj(x->obj(), this); obj.dont_load_item(); set_no_result(x); LIR_Opr lock = FrameMap::G1_opr; LIR_Opr hdr = FrameMap::G3_opr; LIR_Opr obj_temp = FrameMap::G4_opr; monitor_exit(obj_temp, lock, hdr, x->monitor_no()); }
void LIRGenerator::do_If(If* x) { assert(x->number_of_sux() == 2, "inconsistency"); ValueTag tag = x->x()->type()->tag(); bool is_safepoint = x->is_safepoint(); If::Condition cond = x->cond(); LIRItem xitem(x->x(), this); LIRItem yitem(x->y(), this); LIRItem* xin = &xitem; LIRItem* yin = &yitem; if (tag == longTag) { // for longs, only conditions "eql", "neq", "lss", "geq" are valid; // mirror for other conditions if (cond == If::gtr || cond == If::leq) { cond = Instruction::mirror(cond); xin = &yitem; yin = &xitem; } xin->set_destroys_register(); } if (tag == floatTag || tag == doubleTag) { xin->set_destroys_register(); yin->set_destroys_register(); } xin->load_item(); if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { // inline long zero yin->dont_load_item(); } else if (tag == longTag || tag == floatTag || tag == doubleTag) { // longs cannot handle constants at right side yin->load_item(); } else { yin->dont_load_item(); } // note that the condition test must happen before the // moves into Phi area happen, and that the control flow // jump must happen after the moves into the phi area set_no_result(x); if (x->is_safepoint()) { CodeEmitInfo* info_before = state_for(x, x->state_before()); emit()->safepoint_nop(info_before); } emit()->if_op(1, cond, xin->result(), yin->result(), x->tsux(), x->fsux(), x->usux()); move_to_phi(x->state()); emit()->if_op(2, cond, xin->result(), yin->result(), x->tsux(), x->fsux(), x->usux()); goto_default_successor(x); }
void LIRGenerator::do_MonitorExit(MonitorExit* x) { spill_values_on_stack(x->state()); assert(x->is_root(),""); LIRItem obj(x->obj(), this); obj.dont_load_item(); RInfo lock = new_register(T_INT)->rinfo(); RInfo obj_temp = new_register(T_INT)->rinfo(); set_no_result(x); emit()->monitor_exit(obj_temp, lock, syncTempRInfo(), x->monitor_no()); }
void LIRGenerator::do_ArrayCopy(Intrinsic* x) { assert(x->number_of_arguments() == 5, "wrong type"); // Make all state_for calls early since they can emit code CodeEmitInfo* info = state_for(x, x->state()); LIRItem src(x->argument_at(0), this); LIRItem src_pos(x->argument_at(1), this); LIRItem dst(x->argument_at(2), this); LIRItem dst_pos(x->argument_at(3), this); LIRItem length(x->argument_at(4), this); // operands for arraycopy must use fixed registers, otherwise // LinearScan will fail allocation (because arraycopy always needs a // call) #ifndef _LP64 src.load_item_force (FrameMap::rcx_oop_opr); src_pos.load_item_force (FrameMap::rdx_opr); dst.load_item_force (FrameMap::rax_oop_opr); dst_pos.load_item_force (FrameMap::rbx_opr); length.load_item_force (FrameMap::rdi_opr); LIR_Opr tmp = (FrameMap::rsi_opr); #else // The java calling convention will give us enough registers // so that on the stub side the args will be perfect already. // On the other slow/special case side we call C and the arg // positions are not similar enough to pick one as the best. // Also because the java calling convention is a "shifted" version // of the C convention we can process the java args trivially into C // args without worry of overwriting during the xfer src.load_item_force (FrameMap::as_oop_opr(j_rarg0)); src_pos.load_item_force (FrameMap::as_opr(j_rarg1)); dst.load_item_force (FrameMap::as_oop_opr(j_rarg2)); dst_pos.load_item_force (FrameMap::as_opr(j_rarg3)); length.load_item_force (FrameMap::as_opr(j_rarg4)); LIR_Opr tmp = FrameMap::as_opr(j_rarg5); #endif // LP64 set_no_result(x); int flags; ciArrayKlass* expected_type; arraycopy_helper(x, &flags, &expected_type); __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint }
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { spill_values_on_stack(x->state()); assert(x->is_root(),""); LIRItem obj(x->obj(), this); obj.load_item(); set_no_result(x); RInfo lock = new_register(T_OBJECT)->rinfo(); CodeEmitInfo* info_for_exception = NULL; if (x->needs_null_check()) { info_for_exception = state_for(x, x->lock_stack_before()); } CodeEmitInfo* info = state_for(x, x->state()); emit()->monitor_enter(obj.get_register(), lock, syncTempRInfo(), norinfo, x->monitor_no(), info_for_exception, info); }
void LIRGenerator::do_Return(Return* x) { if (x->type()->is_void()) { if (x->is_synchronized()) { emit()->return_op_prolog(x->monitor_no()); } emit()->return_op(LIR_OprFact::illegalOpr); } else { RInfo reg = result_register_for(x->type())->rinfo(); LIRItem result(x->result(), this); result.handle_float_kind(); if (x->is_synchronized()) { emit()->return_op_prolog(x->monitor_no()); } result.load_item_force(reg); emit()->return_op(result.result()); } set_no_result(x); }
void LIRGenerator::do_ArrayCopy(Intrinsic* x) { spill_values_on_stack(x->state()); assert(x->number_of_arguments() == 5, "wrong type"); LIRItem src(x->argument_at(0), this); LIRItem src_pos(x->argument_at(1), this); LIRItem dst(x->argument_at(2), this); LIRItem dst_pos(x->argument_at(3), this); LIRItem length(x->argument_at(4), this); src.load_item(); src_pos.load_item(); dst.load_item(); dst_pos.load_item(); length.load_item(); RInfo tmp = new_register(T_INT)->rinfo(); set_no_result(x); CodeEmitInfo* info = state_for(x, x->state()); // we may want to have stack (deoptimization?) emit()->arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, false, NULL, info); // does add_safepoint }
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) { assert(x->is_root(),""); LIRItem obj(x->obj(), this); obj.load_item(); set_no_result(x); LIR_Opr lock = FrameMap::G1_opr; LIR_Opr scratch = FrameMap::G3_opr; LIR_Opr hdr = FrameMap::G4_opr; CodeEmitInfo* info_for_exception = NULL; if (x->needs_null_check()) { info_for_exception = state_for(x, x->lock_stack_before()); } // this CodeEmitInfo must not have the xhandlers because here the // object is already locked (xhandlers expects object to be unlocked) CodeEmitInfo* info = state_for(x, x->state(), true); monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info); }
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { assert(x->is_pinned(),""); bool needs_range_check = true; bool use_length = x->length() != NULL; bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL || !get_jobject_constant(x->value())->is_null_object()); LIRItem array(x->array(), this); LIRItem index(x->index(), this); LIRItem value(x->value(), this); LIRItem length(this); array.load_item(); index.load_nonconstant(); if (use_length) { needs_range_check = x->compute_needs_range_check(); if (needs_range_check) { length.set_instruction(x->length()); length.load_item(); } } if (needs_store_check) { value.load_item(); } else { value.load_for_store(x->elt_type()); } set_no_result(x); // the CodeEmitInfo must be duplicated for each different // LIR-instruction because spilling can occur anywhere between two // instructions and so the debug information must be different CodeEmitInfo* range_check_info = state_for(x); CodeEmitInfo* null_check_info = NULL; if (x->needs_null_check()) { null_check_info = new CodeEmitInfo(range_check_info); } // emit array address setup early so it schedules better LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store); if (GenerateRangeChecks && needs_range_check) { if (use_length) { __ cmp(lir_cond_belowEqual, length.result(), index.result()); __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); } else { array_range_check(array.result(), index.result(), null_check_info, range_check_info); // range_check also does the null check null_check_info = NULL; } } if (GenerateArrayStoreCheck && needs_store_check) { LIR_Opr tmp1 = new_register(objectType); LIR_Opr tmp2 = new_register(objectType); LIR_Opr tmp3 = new_register(objectType); CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info); __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info); } if (obj_store) { // Needs GC write barriers. pre_barrier(LIR_OprFact::address(array_addr), false, NULL); __ move(value.result(), array_addr, null_check_info); // Seems to be a precise post_barrier(LIR_OprFact::address(array_addr), value.result()); } else { __ move(value.result(), array_addr, null_check_info); } }
void LIRGenerator::do_If(If* x) { assert(x->number_of_sux() == 2, "inconsistency"); ValueTag tag = x->x()->type()->tag(); LIRItem xitem(x->x(), this); LIRItem yitem(x->y(), this); LIRItem* xin = &xitem; LIRItem* yin = &yitem; If::Condition cond = x->cond(); if (tag == longTag) { // for longs, only conditions "eql", "neq", "lss", "geq" are valid; // mirror for other conditions if (cond == If::gtr || cond == If::leq) { // swap inputs cond = Instruction::mirror(cond); xin = &yitem; yin = &xitem; } xin->set_destroys_register(); } LIR_Opr left = LIR_OprFact::illegalOpr; LIR_Opr right = LIR_OprFact::illegalOpr; xin->load_item(); left = xin->result(); if (is_simm13(yin->result())) { // inline int constants which are small enough to be immediate operands right = LIR_OprFact::value_type(yin->value()->type()); } else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { // inline long zero right = LIR_OprFact::value_type(yin->value()->type()); } else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) { right = LIR_OprFact::value_type(yin->value()->type()); } else { yin->load_item(); right = yin->result(); } set_no_result(x); // add safepoint before generating condition code so it can be recomputed if (x->is_safepoint()) { // increment backedge counter if needed increment_backedge_counter(state_for(x, x->state_before())); __ safepoint(new_register(T_INT), state_for(x, x->state_before())); } __ cmp(lir_cond(cond), left, right); profile_branch(x, cond); move_to_phi(x->state()); if (x->x()->type()->is_float_kind()) { __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); } else { __ branch(lir_cond(cond), right->type(), x->tsux()); } assert(x->default_sux() == x->fsux(), "wrong destination above"); __ jump(x->default_sux()); }
void LIRGenerator::do_StoreIndexed(StoreIndexed* x) { assert(x->is_root(),""); bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT; LIRItem array(x->array(), this); LIRItem index(x->index(), this); LIRItem value(x->value(), this); if (obj_store) { array.set_destroys_register(); } value.handle_float_kind(); array.load_item(); index.load_nonconstant(); bool must_load = true; if (x->elt_type() == T_SHORT || x->elt_type() == T_CHAR) { // there is no immediate move of word values in asembler_i486.?pp must_load = true; } else { if (obj_store && value.is_constant() && !get_jobject_constant(x->value())->is_loaded()) { // do nothing; do not load (NULL object) must_load = false; } else if (value.is_constant() && !obj_store) { // array store check needs a register, otherwise do not load a constant must_load = false; } } if (must_load) { // for T_BYTE element type, we must have a byte register free if (x->elt_type() == T_BYTE || x->elt_type() == T_BOOLEAN) { value.load_byte_item(); } else { value.load_item(); } } set_no_result(x); CodeEmitInfo* range_check_info = state_for(x); CodeEmitInfo* null_check_info = NULL; bool needs_null_check = x->needs_null_check(); if (needs_null_check) { null_check_info = range_check_info; } if (GenerateRangeChecks) { emit()->array_range_check(array.result(), index.result(), null_check_info, range_check_info); // range_check also does the null check needs_null_check = false; } if (obj_store) { if (value.is_constant() && !get_jobject_constant(x->value())->is_loaded()) { // skip store check } else if (GenerateArrayStoreCheck) { RInfo tmp1 = new_register(objectType)->rinfo(); RInfo tmp2 = new_register(objectType)->rinfo(); RInfo tmp3 = new_register(objectType)->rinfo(); emit()->array_store_check(array.result(), value.result(), tmp1, tmp2, tmp3, range_check_info); } } emit()->indexed_store(x->elt_type(), array.result(), index.result(), value.result(), norinfo, null_check_info); }