void Compiler::gen_jsr(unsigned target) { AR gr = valloc(jobj); const JInst& jinst = *m_curr_inst; movp(gr, jinst.next, m_bbinfo->start); vpush(Val(jretAddr, gr)); gen_bb_leave(target); br(cond_none, target, m_bbinfo->start); }
void Compiler::gen_goto(unsigned target) { if (target <= m_pc) { // Back branch gen_prof_be(); gen_gc_safe_point(); } gen_bb_leave(target); br(cond_none, target, m_bbinfo->start); }
void Compiler::gen_if(JavaByteCodes opcod, unsigned target) { if (target <= m_pc) { // have back branch here gen_prof_be(); gen_gc_safe_point(); } jtype jt = i32; if (opcod == OPCODE_IFNULL) { opcod = OPCODE_IFEQ; jt = jobj; } else if (opcod == OPCODE_IFNONNULL) { opcod = OPCODE_IFNE; jt = jobj; } OpndKind kind = m_jframe->dip(0).kind(); bool forceReg = (kind == opnd_imm) || (jt == jobj && g_refs_squeeze); Opnd op1 = vstack(0, forceReg).as_opnd(); vpop(); rlock(op1); COND cond = to_cond(opcod); static const Opnd zero((int)0); if (jt == jobj && g_refs_squeeze) { AR ar = valloc(jobj); movp(ar, NULL_REF); alu(alu_cmp, Opnd(jobj, ar), op1); } else if (opcod == OPCODE_IFEQ || opcod == OPCODE_IFNE) { if (op1.is_reg()) { alu(alu_test, op1, op1); } else { alu(alu_cmp, op1, zero); } } else { alu(alu_cmp, op1, zero); } runlock(op1); gen_bb_leave(target); br(cond, target, m_bbinfo->start); }
void Compiler::gen_if_icmp(JavaByteCodes opcod, unsigned target) { if (target <= m_pc) { // have back branch here gen_prof_be(); gen_gc_safe_point(); } if (opcod == OPCODE_IF_ACMPEQ) { opcod = OPCODE_IF_ICMPEQ; } else if (opcod == OPCODE_IF_ACMPNE) { opcod = OPCODE_IF_ICMPNE; } Opnd op2 = vstack(0).as_opnd(); vpop(); rlock(op2); OpndKind kind = m_jframe->dip(0).kind(); // 'Bad' combinations are 'm,m' and 'imm,<any, but imm>' - have to force // an item into a register bool forceReg = (op2.is_mem() && kind == opnd_mem) || (op2.is_imm() && kind == opnd_imm); Opnd op1 = vstack(0, forceReg).as_opnd(); vpop(); rlock(op1); COND cond = to_cond(opcod); if ( (op1.is_mem() && op2.is_reg()) || op1.is_imm()) { // here we have 'mem, reg' or 'imm, mem-or-reg' - swap them so it // become 'reg, mem' (more efficient) or 'mem-or-reg, imm' (existent) // operations. change the branch condition appropriately. alu(alu_cmp, op2, op1); cond = flip(cond); } else { alu(alu_cmp, op1, op2); } runlock(op1); runlock(op2); gen_bb_leave(target); br(cond, target, m_bbinfo->start); }
void Compiler::handle_inst(void) { // is it last instruction in basic block ? //const bool last = m_bbinfo->last_pc == jinst.pc; const JInst& jinst = m_insts[m_pc]; unsigned bc_size = m_infoBlock.get_bc_size(); bool lastInBB = jinst.next>=bc_size || (m_insts[jinst.next].flags & OPF_STARTS_BB); if (is_set(DBG_CHECK_STACK)) { gen_dbg_check_stack(true); } // First test if this is a magic. If not, then proceed with regular // code gen. if (!gen_magic()) { const InstrDesc& idesc = instrs[jinst.opcode]; switch (idesc.ik) { case ik_a: handle_ik_a(jinst); break; case ik_cf: handle_ik_cf(jinst); break; case ik_cnv: handle_ik_cnv(jinst); break; case ik_ls: handle_ik_ls(jinst); break; case ik_meth: handle_ik_meth(jinst); break; case ik_obj: handle_ik_obj(jinst); break; case ik_stack: handle_ik_stack(jinst); break; case ik_throw: gen_athrow(); break; default: assert(jinst.opcode == OPCODE_NOP); break; } // ~switch(opcodegroup) } else { // if (!gen_magic()) { // no op. Just check stack (if applicable) and do mem manipulations } if (is_set(DBG_CHECK_STACK)) { gen_dbg_check_stack(false); } if (g_jvmtiMode) { // Do not allow values to cross instruction boundaries // on a temporary registers vpark(); // We must have GC info at every bytecode instruction // to support possible enumeration at a breakpoint gen_gc_stack(-1, false); } const bool has_fall_through = !jinst.is_set(OPF_DEAD_END); if (lastInBB && has_fall_through && jinst.get_num_targets() == 0) { gen_bb_leave(jinst.next); } }
void Compiler::gen_ret(unsigned idx) { Opnd ret = vlocal(jretAddr, idx, false).as_opnd(); gen_bb_leave(NOTHING); br(ret); }
void Compiler::gen_switch(const JInst & jinst) { assert(jinst.opcode == OPCODE_LOOKUPSWITCH || jinst.opcode == OPCODE_TABLESWITCH); Opnd val = vstack(0, true).as_opnd(); vpop(); rlock(val); gen_bb_leave(NOTHING); if (jinst.opcode == OPCODE_LOOKUPSWITCH) { unsigned n = jinst.get_num_targets(); for (unsigned i = 0; i < n; i++) { Opnd key(jinst.key(i)); unsigned pc = jinst.get_target(i); alu(alu_cmp, val, key); br(eq, pc, m_bbinfo->start); } runlock(val); br(cond_none, jinst.get_def_target(), m_bbinfo->start); return; } // // TABLESWITCH // alu(alu_cmp, val, jinst.high()); br(gt, jinst.get_def_target(), m_bbinfo->start); alu(alu_cmp, val, jinst.low()); br(lt, jinst.get_def_target(), m_bbinfo->start); AR gr_tabl = valloc(jobj); movp(gr_tabl, DATA_SWITCH_TABLE | m_curr_inst->pc, m_bbinfo->start); #ifdef _EM64T_ // On EM64T, we operate with I_32 value in a register, but the // register will be used as 64 bit in address form - have to extend sx(Opnd(i64, val.reg()), Opnd(i32, val.reg())); #endif // Here, we need to extract 'index-=low()' - can pack this into // complex address form: // [table + index*sizeof(void*) - low()*sizeof(void*)], // but only if low()*sizeof(void*) does fit into displacement ... int tmp = -jinst.low(); const int LO_BOUND = INT_MIN/(int)sizeof(void*); const int UP_BOUND = INT_MAX/(int)sizeof(void*); if (LO_BOUND<=tmp && tmp<=UP_BOUND) { ld(jobj, gr_tabl, gr_tabl, -jinst.low()*sizeof(void*), val.reg(), sizeof(void*)); } else { // ... otherwise subtract explicitly, but only if the register // is not used anywhere else if (rrefs(val.reg()) !=0) { Opnd vtmp(i32, valloc(i32)); mov(vtmp, val); // make a copy of val runlock(val); val = vtmp; rlock(val); } alu(alu_sub, val, jinst.low()); ld(jobj, gr_tabl, gr_tabl, 0, val.reg(), sizeof(void*)); } runlock(val); br(gr_tabl); }