// Returns if item is an int constant that can be represented by a simm13 static bool is_simm13(LIR_Opr item) { if (item->is_constant() && item->type() == T_INT) { return Assembler::is_simm13(item->as_constant_ptr()->as_jint()); } else { return false; } }
// Version that _does_ generate a load of the previous value from addr. // addr (the address of the field to be read) must be a LIR_Address // pre_val (a temporary register) must be a register; G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) : _addr(addr), _pre_val(pre_val), _do_load(true), _patch_code(patch_code), _info(info) { assert(_pre_val->is_register(), "should be temporary register"); assert(_addr->is_address(), "should be the address of the field"); }
FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_size) { assert(_init_done, "should already be completed"); _framesize = -1; _num_spills = -1; assert(monitors >= 0, "not set"); _num_monitors = monitors; assert(reserved_argument_area_size >= 0, "not set"); _reserved_argument_area_size = MAX2(4, reserved_argument_area_size) * BytesPerWord; _argcount = method->arg_size(); _argument_locations = new intArray(_argcount, -1); _incoming_arguments = java_calling_convention(signature_type_array_for(method), false); _oop_map_arg_count = _incoming_arguments->reserved_stack_slots(); int java_index = 0; for (int i = 0; i < _incoming_arguments->length(); i++) { LIR_Opr opr = _incoming_arguments->at(i); if (opr->is_address()) { LIR_Address* address = opr->as_address_ptr(); _argument_locations->at_put(java_index, address->disp() - STACK_BIAS); _incoming_arguments->args()->at_put(i, LIR_OprFact::stack(java_index, as_BasicType(as_ValueType(address->type())))); } java_index += type2size[opr->type()]; } }
void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) { assert((src->is_single_fpu() && dest->is_single_stack()) || (src->is_double_fpu() && dest->is_double_stack()), "round_fp: rounds register -> stack location"); reg2stack (src, dest, src->type(), pop_fpu_stack); }
bool LocalMapping::is_cache_reg(LIR_Opr opr) const { if (opr->is_register()) { return is_cache_reg(opr->rinfo()); } else { return false; } }
void LIRItem::load_nonconstant() { LIR_Opr r = value()->operand(); if (r->is_constant()) { _result = r; } else { load_item(); } }
void set_result(Value x, LIR_Opr opr) { assert(opr->is_valid(), "must set to valid value"); assert(x->operand()->is_illegal(), "operand should never change"); assert(!opr->is_register() || opr->is_virtual(), "should never set result to a physical register"); x->set_operand(opr); assert(opr == x->operand(), "must be"); if (opr->is_virtual()) { _instruction_for_operand.at_put_grow(opr->vreg_number(), x, NULL); } }
void LIRGenerator::do_If(If* x) { assert(x->number_of_sux() == 2, "inconsistency"); ValueTag tag = x->x()->type()->tag(); bool is_safepoint = x->is_safepoint(); If::Condition cond = x->cond(); LIRItem xitem(x->x(), this); LIRItem yitem(x->y(), this); LIRItem* xin = &xitem; LIRItem* yin = &yitem; if (tag == longTag) { // for longs, only conditions "eql", "neq", "lss", "geq" are valid; // mirror for other conditions if (cond == If::gtr || cond == If::leq) { cond = Instruction::mirror(cond); xin = &yitem; yin = &xitem; } xin->set_destroys_register(); } xin->load_item(); if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) { // inline long zero yin->dont_load_item(); } else if (tag == longTag || tag == floatTag || tag == doubleTag) { // longs cannot handle constants at right side yin->load_item(); } else { yin->dont_load_item(); } // add safepoint before generating condition code so it can be recomputed if (x->is_safepoint()) { // increment backedge counter if needed increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci()); __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before())); } set_no_result(x); LIR_Opr left = xin->result(); LIR_Opr right = yin->result(); __ cmp(lir_cond(cond), left, right); // Generate branch profiling. Profiling code doesn't kill flags. profile_branch(x, cond); move_to_phi(x->state()); if (x->x()->type()->is_float_kind()) { __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux()); } else { __ branch(lir_cond(cond), right->type(), x->tsux()); } assert(x->default_sux() == x->fsux(), "wrong destination above"); __ jump(x->default_sux()); }
void LIRItem::load_nonconstant() { LIR_Opr r = value()->operand(); if (_gen->can_inline_as_constant(value())) { if (!r->is_constant()) { r = LIR_OprFact::value_type(value()->type()); } _result = r; } else { load_item(); } }
LIR_Opr result() { assert(_destroys_register==not_destroyed||(!_result->is_register()||_result->is_virtual()), "shouldn't use set_destroys_register with physical regsiters"); if(_destroys_register==awaiting_copy&&_result->is_register()){ LIR_Opr new_result=_gen->new_register(type())->set_destroyed(); gen()->lir()->move(_result, new_result); _destroys_register = destroyed; _result=new_result; } return _result; }
LIR_Opr FpuStackAllocator::to_fpu_stack(LIR_Opr opr) { assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise"); int stack_offset = tos_offset(opr); if (opr->is_single_fpu()) { return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset(); } else { assert(opr->is_double_fpu(), "shouldn't call this otherwise"); return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset(); } }
LIR_Opr FpuStackAllocator::to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset) { assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise"); assert(dont_check_offset || tos_offset(opr) == 0, "operand is not on stack top"); int stack_offset = 0; if (opr->is_single_fpu()) { return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset(); } else { assert(opr->is_double_fpu(), "shouldn't call this otherwise"); return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset(); } }
// Handle fanout of fpu registers: return false if no fanout; // If fanout than, we copy the value of float register into a new one, // so that the new FPU register has ref-count 1 bool LIRGenerator::fpu_fanout_handled() { if (result()->is_register() && (value()->type()->is_float_kind())) { // The item is float or double register with a use_count > 1 LIR_Opr reg = rlock(value()); emit()->copy_fpu_item(reg->rinfo(), result()); set_result(value(), reg); return true; } else { return false; } }
void FpuStackAllocator::clear_fpu_stack(LIR_Opr preserve) { int result_stack_size = (preserve->is_fpu_register() && !preserve->is_xmm_register() ? 1 : 0); while (sim()->stack_size() > result_stack_size) { assert(!sim()->slot_is_empty(0), "not allowed"); if (result_stack_size == 0 || sim()->get_slot(0) != fpu_num(preserve)) { insert_free(0); } else { // move "preserve" to bottom of stack so that all other stack slots can be popped insert_exchange(sim()->stack_size() - 1); } } }
// for _ladd, _lmul, _lsub, _ldiv, _lrem void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) { switch (x->op()) { case Bytecodes::_lrem: case Bytecodes::_lmul: case Bytecodes::_ldiv: { if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) { LIRItem right(x->y(), this); right.load_item(); CodeEmitInfo* info = state_for(x); LIR_Opr item = right.result(); assert(item->is_register(), "must be"); __ cmp(lir_cond_equal, item, LIR_OprFact::longConst(0)); __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info)); } address entry; switch (x->op()) { case Bytecodes::_lrem: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem); break; // check if dividend is 0 is done elsewhere case Bytecodes::_ldiv: entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv); break; // check if dividend is 0 is done elsewhere case Bytecodes::_lmul: entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul); break; default: ShouldNotReachHere(); } // order of arguments to runtime call is reversed. LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL); set_result(x, result); break; } case Bytecodes::_ladd: case Bytecodes::_lsub: { LIRItem left(x->x(), this); LIRItem right(x->y(), this); left.load_item(); right.load_item(); rlock_result(x); arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL); break; } default: ShouldNotReachHere(); } }
// Item will be loaded into a byte register; Intel only void LIRItem::load_byte_item() { load_item(); LIR_Opr res = result(); if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) { // make sure that it is a byte register assert(!value()->type()->is_float() && !value()->type()->is_double(), "can't load floats in byte register"); LIR_Opr reg = _gen->rlock_byte(T_BYTE); __ move(res, reg); _result = reg; } }
LIR_Opr result() { assert(!_destroys_register || (!_result->is_register() || _result->is_virtual()), "shouldn't use set_destroys_register with physical regsiters"); if (_destroys_register && _result->is_register()) { if (_new_result->is_illegal()) { _new_result = _gen->new_register(type()); gen()->lir()->move(_result, _new_result); } return _new_result; } else { return _result; } return _result; }
virtual void visit(LIR_OpVisitState* visitor) { visitor->do_slow_case(_info); visitor->do_input(_klass_reg); visitor->do_input(_length); assert(_result->is_valid(), "must be valid"); visitor->do_output(_result); }
void LocalMapping::print() const { tty->print("cached"); for (int i = 0 ; i < length(); i++) { RInfo reg = get_cache_reg(i); if (reg.is_valid()) { if (reg.is_double() || reg.is_long()) { tty->print(" [dbl_stack:%d]=", i); } else { tty->print(" [stack:%d]=", i); } LIR_Opr opr = LIR_OprFact::rinfo(reg); opr->print(); } } tty->cr(); }
void FpuStackAllocator::handle_opCall(LIR_OpCall* opCall) { LIR_Opr res = opCall->result_opr(); // clear fpu-stack before call // it may contain dead values that could not have been remved by previous operations clear_fpu_stack(LIR_OprFact::illegalOpr); assert(sim()->is_empty(), "fpu stack must be empty now"); // compute debug information before (possible) fpu result is pushed compute_debug_information(opCall); if (res->is_fpu_register() && !res->is_xmm_register()) { do_push(res); opCall->set_result_opr(to_fpu_stack_top(res)); } }
LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) { LIR_Opr opr; switch (type->tag()) { case intTag: opr = FrameMap::rax_opr; break; case objectTag: opr = FrameMap::rax_oop_opr; break; case longTag: opr = FrameMap::long0_opr; break; case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break; case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break; case addressTag: default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr; } assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch"); return opr; }
void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile) { if (is_volatile && type == T_LONG) { LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE); LIR_Opr tmp = new_register(T_DOUBLE); LIR_Opr spill = new_register(T_DOUBLE); set_vreg_flag(spill, must_start_in_memory); __ move(data, spill); __ move(spill, tmp); __ move(tmp, addr); } else { LIR_Address* addr = new LIR_Address(src, offset, type); bool is_obj = (type == T_ARRAY || type == T_OBJECT); if (is_obj) { // Do the pre-write barrier, if any. pre_barrier(LIR_OprFact::address(addr), false, NULL); __ move(data, addr); assert(src->is_register(), "must be register"); // Seems to be a precise address post_barrier(LIR_OprFact::address(addr), data); } else { __ move(data, addr); } } }
CallingConvention* FrameMap::c_calling_convention(const BasicTypeArray* signature) { // compute the size of the arguments first. The signature array // that java_calling_convention takes includes a T_VOID after double // work items but our signatures do not. int i; int sizeargs = 0; for (i = 0; i < signature->length(); i++) { sizeargs += type2size[signature->at(i)]; } BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs); VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs); int sig_index = 0; for (i = 0; i < sizeargs; i++, sig_index++) { sig_bt[i] = signature->at(sig_index); if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { sig_bt[i + 1] = T_VOID; i++; } } intptr_t out_preserve = SharedRuntime::c_calling_convention(sig_bt, regs, NULL, sizeargs); LIR_OprList* args = new LIR_OprList(signature->length()); for (i = 0; i < sizeargs;) { BasicType t = sig_bt[i]; assert(t != T_VOID, "should be skipping these"); // C calls are always outgoing bool outgoing = true; LIR_Opr opr = map_to_opr(t, regs + i, outgoing); // they might be of different types if for instance floating point // values are passed in cpu registers, but the sizes must match. assert(type2size[opr->type()] == type2size[t], "type mismatch"); args->append(opr); if (opr->is_address()) { LIR_Address* addr = opr->as_address_ptr(); out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4); } i += type2size[t]; } assert(args->length() == signature->length(), "size mismatch"); out_preserve += SharedRuntime::out_preserve_stack_slots(); update_reserved_argument_area_size(out_preserve * BytesPerWord); return new CallingConvention(args, out_preserve); }
void FpuStackAllocator::pop_if_last_use(LIR_Op* op, LIR_Opr opr) { assert(op->fpu_pop_count() == 0, "fpu_pop_count alredy set"); assert(tos_offset(opr) == 0, "can only pop stack top"); if (opr->is_last_use()) { op->set_fpu_pop_count(1); sim()->pop(); } }
CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signature, bool outgoing) { // compute the size of the arguments first. The signature array // that java_calling_convention takes includes a T_VOID after double // work items but our signatures do not. int i; int sizeargs = 0; for (i = 0; i < signature->length(); i++) { sizeargs += type2size[signature->at(i)]; } BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs); VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs); int sig_index = 0; for (i = 0; i < sizeargs; i++, sig_index++) { sig_bt[i] = signature->at(sig_index); if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { sig_bt[i + 1] = T_VOID; i++; } } intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, outgoing); LIR_OprList* args = new LIR_OprList(signature->length()); for (i = 0; i < sizeargs;) { BasicType t = sig_bt[i]; assert(t != T_VOID, "should be skipping these"); LIR_Opr opr = map_to_opr(t, regs + i, outgoing); args->append(opr); if (opr->is_address()) { LIR_Address* addr = opr->as_address_ptr(); assert(addr->disp() == (int)addr->disp(), "out of range value"); out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4); } i += type2size[t]; } assert(args->length() == signature->length(), "size mismatch"); out_preserve += SharedRuntime::out_preserve_stack_slots(); if (outgoing) { // update the space reserved for arguments. update_reserved_argument_area_size(out_preserve * BytesPerWord); } return new CallingConvention(args, out_preserve); }
void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) { BasicType t = item->type(); LIR_Opr sp_opr = FrameMap::SP_opr; if ((t == T_LONG || t == T_DOUBLE) && ((in_bytes(offset_from_sp) - STACK_BIAS) % 8 != 0)) { __ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t)); } else { __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t)); } }
void LIR_OopMapGenerator::clear_all(LIR_Opr cache_reg) { for (int i = local_mapping()->local_names_begin(); i < local_mapping()->local_names_end(); i++) { if (local_mapping()->is_local_name_cached_in_reg(i, cache_reg)) { clear(i); if (cache_reg->is_double_word()) { clear(1 + i); } } } }
void traverse(BlockBegin* bb, LIR_OpList* inst) { int length = inst->length(); for (int i = 0; i < length; i++) { LIR_Op* op = inst->at(i); _state.visit(op); for (LIR_OpVisitState::OprMode mode = LIR_OpVisitState::firstMode; mode < LIR_OpVisitState::numModes; mode = (LIR_OpVisitState::OprMode)(mode + 1)) { for (int i = 0; i < _state.opr_count(mode); i++) { LIR_Opr opr = _state.opr_at(mode, i); if (opr->is_register()) { _info->lock(opr->as_rinfo()); } } } if (_state.has_call()) { _had_call = true; } } }
void LIRGenerator::do_Convert(Convert* x) { // flags that vary for the different operations and different SSE-settings bool fixed_input, fixed_result, round_result, needs_stub; switch (x->op()) { case Bytecodes::_i2l: // fall through case Bytecodes::_l2i: // fall through case Bytecodes::_i2b: // fall through case Bytecodes::_i2c: // fall through case Bytecodes::_i2s: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; case Bytecodes::_f2d: fixed_input = UseSSE == 1; fixed_result = false; round_result = false; needs_stub = false; break; case Bytecodes::_d2f: fixed_input = false; fixed_result = UseSSE == 1; round_result = UseSSE < 1; needs_stub = false; break; case Bytecodes::_i2f: fixed_input = false; fixed_result = false; round_result = UseSSE < 1; needs_stub = false; break; case Bytecodes::_i2d: fixed_input = false; fixed_result = false; round_result = false; needs_stub = false; break; case Bytecodes::_f2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; case Bytecodes::_d2i: fixed_input = false; fixed_result = false; round_result = false; needs_stub = true; break; case Bytecodes::_l2f: fixed_input = false; fixed_result = UseSSE >= 1; round_result = UseSSE < 1; needs_stub = false; break; case Bytecodes::_l2d: fixed_input = false; fixed_result = UseSSE >= 2; round_result = UseSSE < 2; needs_stub = false; break; case Bytecodes::_f2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; case Bytecodes::_d2l: fixed_input = true; fixed_result = true; round_result = false; needs_stub = false; break; default: ShouldNotReachHere(); } LIRItem value(x->value(), this); value.load_item(); LIR_Opr input = value.result(); LIR_Opr result = rlock(x); // arguments of lir_convert LIR_Opr conv_input = input; LIR_Opr conv_result = result; ConversionStub* stub = NULL; if (fixed_input) { conv_input = fixed_register_for(input->type()); __ move(input, conv_input); } assert(fixed_result == false || round_result == false, "cannot set both"); if (fixed_result) { conv_result = fixed_register_for(result->type()); } else if (round_result) { result = new_register(result->type()); set_vreg_flag(result, must_start_in_memory); } if (needs_stub) { stub = new ConversionStub(x->op(), conv_input, conv_result); } __ convert(x->op(), conv_input, conv_result, stub); if (result != conv_result) { __ move(conv_result, result); } assert(result->is_virtual(), "result must be virtual register"); set_result(x, result); }
void C1_MacroAssembler::build_frame(FrameMap* frame_map) { // offset from the expected fixed sp within the method int sp_offset = in_bytes(frame_map->framesize_in_bytes()) - 8; // call pushed the return IP if( frame_map->num_callee_saves() > 0 ) { int callee_save_num = frame_map->num_callee_saves()-1; int callee_saves = frame_map->callee_saves(); // bitmap for (int i=LinearScan::nof_cpu_regs-1; i>=0; i--) { if ((callee_saves & 1<<i) != 0) { int wanted_sp_offset = frame_map->address_for_callee_save(callee_save_num)._disp; assert0( sp_offset-8 == wanted_sp_offset ); push((Register)i); sp_offset -= 8; callee_save_num--; assert0( callee_save_num >= -1 ); } } #ifdef ASSERT for(int i=0;i<LinearScan::nof_xmm_regs;i++){ int reg = LinearScan::nof_cpu_regs+i; assert ((callee_saves & 1<<reg) == 0, "Unexpected callee save XMM register"); } #endif } if (sp_offset != 0) { // make sp equal expected sp for method if (sp_offset == 8) push (RCX); // push reg as smaller encoding than sub8i else sub8i (RSP, sp_offset ); } if( should_verify_oop(MacroAssembler::OopVerify_IncomingArgument) ) { int args_len = frame_map->incoming_arguments()->length(); for(int i=0; i < args_len; i++) { LIR_Opr arg = frame_map->incoming_arguments()->at(i); if (arg->is_valid() && arg->is_oop()) { VOopReg::VR oop = frame_map->oopregname(arg); verify_oop(oop, MacroAssembler::OopVerify_IncomingArgument); } } } }