Exemple #1
0
/**
 * Provides fine-tuned implementation for IDIV/IREM operations on IA32-compatible platforms, 
 * in replacement of common arithmetic helper (see arith_rt.h).
 */
bool CodeGen::gen_a_platf(JavaByteCodes op, jtype jt)
{
    if (jt != i32) return false;
    if (op != OPCODE_IDIV && op != OPCODE_IREM) {
        return false;
    }
    //
    // The method is supposed to be platform-depended, and may not have 
    // Encoder support - leaving as-is, without implementing general 
    // support in Encoder
    //
    
    vpark(eax.reg());
    vpark(edx.reg());
    rlock(eax);
    rlock(edx);
    Val& v1 = vstack(1, vis_imm(1));
    Val& v2 = vstack(0, true);
    alu(alu_cmp, v2.as_opnd(), Opnd(-1));
    unsigned br_normal = br(ne, 0, 0);
    alu(alu_cmp, v1.as_opnd(), Opnd(INT_MIN));
    unsigned br_exit = NOTHING;
    if (op == OPCODE_IREM) {
        do_mov(edx, Opnd(0)); // prepare exit value for the corner case
        br_exit = br(eq, 0, 0);
    }
    else {
        do_mov(eax, v1);
        br_exit = br(eq, 0, 0);
    }
    patch(br_normal, ip());
    do_mov(eax, v1);
    //
    // The method is supposed to be platform-depended, and may not have 
    // Encoder support - leaving as-is, without implementing general 
    // support in Encoder
    //
    
    //CDQ
    EncoderBase::Operands args0(RegName_EDX, RegName_EAX);
    ip(EncoderBase::encode(ip(), Mnemonic_CDQ, args0));
    //IDIV
    EncoderBase::Operands args(RegName_EDX, RegName_EAX, 
                               devirt(v2.reg(), i32));
    ip(EncoderBase::encode(ip(), Mnemonic_IDIV, args));
    patch(br_exit, ip());

    vpop();
    vpop();
    vpush(op == OPCODE_IREM ? edx : eax);
    runlock(eax);
    runlock(edx);
    return true;
}
Exemple #2
0
bool lr_parser::try_parse_ahead()
{
  /* create a virtual stack from the real parse stack */
  virtual_stack  vstack(stack);

  /* parse until we fail or get past the lookahead input */
  while (true)
    {
      /* look up the action from the current state (on top of stack) */
      int act = get_action(vstack.top(), cur_err_token()->sym);

      /* if its an error, we fail */
      if (act == 0) return false;

      /* > 0 encodes a shift */
      if (act > 0)
        {
          /* push the new state on the stack */
          vstack.push(act-1);

          DEBUG_LOG("# Parse-ahead shifts symbol #"
                    <<  cur_err_token()->sym
                    << " into state #" << (act-1));

          /* advance simulated input, if we run off the end, we are done */
          if (!advance_lookahead())
            return true;
        }
      /* < 0 encodes a reduce */
      else
        {
          /* if this is a reduce with the start production we are done */
          if ((-act)-1 == start_production())
            {
              DEBUG_LOG("# Parse-ahead accepts");
              return true;
            }

          /* get the lhs symbol and the rhs size */
          short lhs      = production_tab[(-act)-1].lhs_sym;
          short rhs_size = production_tab[(-act)-1].rhs_size;

          /* pop handle off the stack */
          for (int i = 0; i < rhs_size; i++)
            vstack.pop();

          DEBUG_LOG("# Parse-ahead reduces: handle size = "
                    << rhs_size << " lhs = #" << lhs
                    << " from state #" << vstack.top());

          /* look up goto and push it onto the stack */
          vstack.push(get_reduce(vstack.top(), lhs));

          DEBUG_LOG("# Goto state #" << vstack.top());
        }
    }
}
Exemple #3
0
void Compiler::gen_if_icmp(JavaByteCodes opcod, unsigned target)
{
    if (target <= m_pc) {
        // have back branch here
        gen_prof_be();
        gen_gc_safe_point();
    }
    if (opcod == OPCODE_IF_ACMPEQ) {
        opcod = OPCODE_IF_ICMPEQ;
    }
    else if (opcod == OPCODE_IF_ACMPNE) {
        opcod = OPCODE_IF_ICMPNE;
    }
    Opnd op2 = vstack(0).as_opnd();
    vpop();
    rlock(op2);
    OpndKind kind = m_jframe->dip(0).kind();
    // 'Bad' combinations are 'm,m' and 'imm,<any, but imm>' - have to force
    // an item into a register
    bool forceReg = (op2.is_mem() && kind == opnd_mem) || 
                    (op2.is_imm() && kind == opnd_imm);
    Opnd op1 = vstack(0, forceReg).as_opnd();
    vpop();
    rlock(op1);
    
    COND cond = to_cond(opcod);
    if ( (op1.is_mem() && op2.is_reg()) || op1.is_imm()) {
        // here we have 'mem, reg' or 'imm, mem-or-reg' - swap them so it
        // become 'reg, mem' (more efficient) or 'mem-or-reg, imm' (existent)
        // operations. change the branch condition appropriately.
        alu(alu_cmp, op2, op1);
        cond = flip(cond);
    }
    else {
        alu(alu_cmp, op1, op2);
    }
    
    runlock(op1);
    runlock(op2);
    gen_bb_leave(target);
    br(cond, target, m_bbinfo->start);
}
Exemple #4
0
void Compiler::gen_if(JavaByteCodes opcod, unsigned target)
{
    if (target <= m_pc) {
        // have back branch here
        gen_prof_be();
        gen_gc_safe_point();
    }
    jtype jt = i32;
    if (opcod == OPCODE_IFNULL) {
        opcod = OPCODE_IFEQ;
        jt = jobj;
    }
    else if (opcod == OPCODE_IFNONNULL) {
        opcod = OPCODE_IFNE;
        jt = jobj;
    }
    OpndKind kind = m_jframe->dip(0).kind();
    bool forceReg = (kind == opnd_imm) || (jt == jobj && g_refs_squeeze);
    Opnd op1 = vstack(0, forceReg).as_opnd();
    vpop();
    rlock(op1);
    COND cond = to_cond(opcod);
    static const Opnd zero((int)0);
    if (jt == jobj && g_refs_squeeze) {
        AR ar = valloc(jobj);
        movp(ar, NULL_REF);
        alu(alu_cmp, Opnd(jobj, ar), op1);
    }
    else if (opcod == OPCODE_IFEQ || opcod == OPCODE_IFNE) {
        if (op1.is_reg()) {
            alu(alu_test, op1, op1);
        }
        else {
            alu(alu_cmp, op1, zero);
        }
    }
    else {
        alu(alu_cmp, op1, zero);
    }
    runlock(op1);
    gen_bb_leave(target);
    br(cond, target, m_bbinfo->start);
}
Exemple #5
0
void CodeGen::gen_a(JavaByteCodes op, jtype jt)
{
    if (gen_a_platf(op, jt)) {
        return;
    }
    
    if (gen_a_generic(op, jt)) {
        return;
    }
    
    if (is_f(jt) && gen_a_f(op, jt)) {
        return;
    }
    
    if (jt == i32 && gen_a_i32(op)) {
        return;
    }

    unsigned stackFix = 0;
    bool shft = op == OPCODE_ISHL || op == OPCODE_ISHR || op == OPCODE_IUSHR;
    const CallSig* rcs = NULL;
    if (is_f(jt)) {
        assert(jt == dbl64 || jt == flt32);
        char * helper = NULL;
        bool is_dbl = jt == dbl64;
        if (op == OPCODE_INEG) {
            SYNC_FIRST(static const CallSig cs_dbl(CCONV_STDCALL, dbl64, dbl64));
            SYNC_FIRST(static const CallSig cs_flt(CCONV_STDCALL, flt32, flt32));
            rcs = is_dbl? &cs_dbl : &cs_flt;
            stackFix = gen_stack_to_args(true, *rcs, 0, 1);
            helper = is_dbl ? (char*)&rt_h_neg_dbl64 : (char*)&rt_h_neg_flt32;
            gen_call_novm(*rcs, helper, 1);
            runlock(*rcs);
        }
        else {
            //if (m_jframe->dip(1).stype == st_imm && )
            SYNC_FIRST(static const CallSig cs_dbl(CCONV_STDCALL, dbl64, dbl64, dbl64, i32));
            SYNC_FIRST(static const CallSig cs_flt(CCONV_STDCALL, flt32, flt32, flt32, i32));
            rcs = is_dbl? &cs_dbl : &cs_flt;
            stackFix = gen_stack_to_args(true, *rcs, 0, 2);
            helper = is_dbl ? (char*)&rt_h_dbl_a : (char*)&rt_h_flt_a;
            gen_call_novm(*rcs, helper, 2, op);
            runlock(*rcs);
        }
    }
    else if (jt==i64) {
        if (op == OPCODE_INEG) {
            SYNC_FIRST(static const CallSig cs(CCONV_STDCALL, i64, i64));
            rcs = &cs;
            stackFix = gen_stack_to_args(true, *rcs, 0, 1);
            gen_call_novm(*rcs, (void*)&rt_h_neg_i64, 1);
            runlock(*rcs);
        }
        else if (shft) {
            SYNC_FIRST(static const CallSig cs(CCONV_STDCALL, i64, i64, i32, i32));
            rcs = &cs;
            stackFix = gen_stack_to_args(true, *rcs, 0, 2);
            gen_call_novm(*rcs, (void*)&rt_h_i64_shift, 2, op);
            runlock(*rcs);
        }
        else {
            SYNC_FIRST(static const CallSig cs(CCONV_STDCALL, i64, i64, i64, i32));
            rcs = &cs;
            stackFix = gen_stack_to_args(true, *rcs, 0, 2);
            gen_call_novm(*rcs, (void*)&rt_h_i64_a, 2, op);
            runlock(*rcs);
        }
    }
    else {
        assert(jt==i32);
        if (op == OPCODE_INEG) {
            SYNC_FIRST(static const CallSig cs(CCONV_STDCALL, i32, i32));
            rcs = &cs;
            stackFix = gen_stack_to_args(true, *rcs, 0, 1);
            gen_call_novm(*rcs, (void*)&rt_h_neg_i32, 1);
            runlock(*rcs);
        }
        else if (op == OPCODE_IADD || op == OPCODE_ISUB) {
            const Val& op2 = vstack(0);
            vpop();
            rlock(op2);
            const Val& op1 = vstack(0);
            vpop();
            rlock(op1);
            AR ar = valloc(i32);
            Opnd reg(i32, ar);
            //TODO: may eliminate additional register allocation
            mov(reg, op1.as_opnd());
            alu(op == OPCODE_IADD ? alu_add : alu_sub, reg, op2.as_opnd());
            runlock(op1);
            runlock(op2);
            vpush(Val(i32, ar));
            return;
        }
        else {
            SYNC_FIRST(static const CallSig cs(CCONV_STDCALL, i32, i32, i32, i32));
            rcs = &cs;
            stackFix = gen_stack_to_args(true, *rcs, 0, 2);
            gen_call_novm(*rcs, (void*)&rt_h_i32_a, 2, op);
            runlock(*rcs);
        }
    }
    assert(rcs != NULL);
    gen_save_ret(*rcs);
    if (stackFix != 0) {
        alu(alu_sub, sp, stackFix);
    }
}
Exemple #6
0
bool CodeGen::gen_a_generic(JavaByteCodes op, jtype jt)
{
    if (op == OPCODE_INEG) {
        return false; // later
    }
    if (jt == i32) {
        bool v2_imm = vis_imm(0);
        if (v2_imm &&
            (op == OPCODE_ISHL || op == OPCODE_ISHL || op == OPCODE_IUSHR)) {
            // accept it
        }
        /*else if (v2_imm && (op == OPCODE_IMUL || op == OPCODE_IDIV)) {
            // accept it
        }
        else if (op == OPCODE_IMUL) {
            // accept it
        }*/
        else if (op == OPCODE_IADD || op == OPCODE_ISUB) {
            // accept it
        }
        else if (op == OPCODE_IOR || op == OPCODE_IAND || op == OPCODE_IXOR) {
            // accept it
        }
        else if (vis_imm(0) && m_jframe->size()>1 && vis_imm(1)) {
            // accept it
        }
        else {
            return false;
        }
    }
    else if (is_f(jt)) {
        if (op != OPCODE_IADD && op != OPCODE_ISUB && 
            op != OPCODE_IMUL && op != OPCODE_IDIV) {
            return false;    
        }
    }
    else {
        return false;
    }
    
    bool is_dbl = jt == dbl64;
    unsigned v1_depth = is_dbl?2:1;
    
    if (vis_imm(v1_depth) && vis_imm(0)) {
        const Val& v1 = m_jframe->dip(v1_depth);
        const Val& v2 = m_jframe->dip(0);
        Val res;
        if (jt==dbl64) {
            double d = rt_h_dbl_a(v1.dval(), v2.dval(), op);
            res = Val(d);
        }
        else if (jt==flt32) {
            float f = rt_h_flt_a(v1.fval(), v2.fval(), op);
            res = Val(f);
        }
        else {
            assert(jt==i32);
            int i = rt_h_i32_a(v1.ival(), v2.ival(), op);
            res = Val(i);
        }
        vpop();
        vpop();
        vpush(res);
        return true;
    } // if v1.is_imm() && v2.is_imm()
    

    const Val& v1 = vstack(v1_depth, true);
    Opnd res = v1.as_opnd();
    if (rrefs(v1.reg()) > 1) {
        rlock(v1);
        AR ar = valloc(jt);
        runlock(v1);
        Opnd reg(jt, ar);
        mov(reg, v1.as_opnd());
        res = reg;
    }
    rlock(res);
    rlock(v1);
    const Val& v2 = m_jframe->dip(0);
/*    if (false )v2.
        
#ifdef _IA32_
        // on IA32 can use address in a displacement
        alu(to_alu(op), v1, ar_x, (int)v2.addr());
#else
        AR addr = valloc(jobj); rlock(addr);
        movp(addr, v2.addr());
        alu_mem(jt, to_alu(op), r1, addr);
        runlock(addr);
#endif
    }
    else */
    if(v2.is_mem()) {
        // Everyone can do 'reg, mem' operation
        alu(to_alu(op), res, v2.as_opnd());
    }
    else if(v2.is_imm() && jt==i32) {
        // 'reg, imm' is only for i32 operations
        alu(to_alu(op), res, v2.ival());
    }
    else {
        Opnd v2 = vstack(0, true).as_opnd();
        alu(to_alu(op), res, v2);
    }
    vpop();
    vpop();
    runlock(v1);
    runlock(res);
    vpush(res);

    return true;
}
Exemple #7
0
Fichier : cmd.c Projet : 8l/inferno
void*
oscmd(char **args, int nice, char *dir, int *fd)
{
	Targ *t;
	int spin, *spinptr, fd0[2], fd1[2], fd2[2], wfd[2], n;
	Dir *d;

	up->genbuf[0] = 0;
	t = mallocz(sizeof(*t), 1);
	if(t == nil)
		return nil;
	t->args = args;
	t->dir = dir;
	t->nice = nice;
	fd0[0] = fd0[1] = -1;
	fd1[0] = fd1[1] = -1;
	fd2[0] = fd2[1] = -1;
	wfd[0] = wfd[1] = -1;
	if(dir != nil){
		d = dirstat(dir);
		if(d == nil)
			goto Error;
		free(d);
	}
	if(pipe(fd0) < 0 || pipe(fd1) < 0 || pipe(fd2) < 0 || pipe(wfd) < 0)
		goto Error;

	spinptr = &spin;
	spin = 1;

	t->fd[0] = fd0[0];
	t->fd[1] = fd1[1];
	t->fd[2] = fd2[1];
	t->wfd = wfd[1];
	t->spin = spinptr;
	switch(rfork(RFPROC|RFMEM|RFREND|RFNOTEG|RFFDG|RFNAMEG|RFENVG)) {
	case -1:
		goto Error;
	case 0:
		/* if child returns first from rfork, its call to vstack replaces ... */
		vstack(t);
		/* ... parent's return address from rfork and parent returns here */
	default:
		/* if parent returns first from rfork, it comes here */
		/* can't call anything: on shared stack until child releases spin in exectramp */
		while(*spinptr)
			;
		break;
	}

	close(fd0[0]);
	close(fd1[1]);
	close(fd2[1]);
	close(wfd[1]);

	n = read(wfd[0], up->genbuf, sizeof(up->genbuf)-1);
	close(wfd[0]);
	if(n > 0){
		close(fd0[1]);
		close(fd1[0]);
		close(fd2[0]);
		up->genbuf[n] = 0;
		errstr(up->genbuf, sizeof(up->genbuf));
		free(t);
		return nil;
	}

	fd[0] = fd0[1];
	fd[1] = fd1[0];
	fd[2] = fd2[0];
	return t;

Error:
	errstr(up->genbuf, sizeof(up->genbuf));	/* save the message before close */
	close(fd0[0]);
	close(fd0[1]);
	close(fd1[0]);
	close(fd1[1]);
	close(fd2[0]);
	close(fd2[1]);
	close(wfd[0]);
	close(wfd[1]);
	free(t);
	errstr(up->genbuf, sizeof(up->genbuf));
	return nil;
}
Exemple #8
0
void Compiler::gen_switch(const JInst & jinst)
{
    assert(jinst.opcode == OPCODE_LOOKUPSWITCH 
           || jinst.opcode == OPCODE_TABLESWITCH);
    Opnd val = vstack(0, true).as_opnd();
    vpop();
    rlock(val);
    gen_bb_leave(NOTHING);

    if (jinst.opcode == OPCODE_LOOKUPSWITCH) {
        unsigned n = jinst.get_num_targets();
        for (unsigned i = 0; i < n; i++) {
            Opnd key(jinst.key(i));
            unsigned pc = jinst.get_target(i);
            alu(alu_cmp, val, key);
            br(eq, pc, m_bbinfo->start);
        }
        runlock(val);
        br(cond_none, jinst.get_def_target(), m_bbinfo->start);
        return;
    }
    //
    // TABLESWITCH
    //
    alu(alu_cmp, val, jinst.high());
    br(gt, jinst.get_def_target(), m_bbinfo->start);
    
    alu(alu_cmp, val, jinst.low());
    br(lt, jinst.get_def_target(), m_bbinfo->start);
    
    AR gr_tabl = valloc(jobj);
    movp(gr_tabl, DATA_SWITCH_TABLE | m_curr_inst->pc, m_bbinfo->start);
#ifdef _EM64T_
    // On EM64T, we operate with I_32 value in a register, but the 
    // register will be used as 64 bit in address form - have to extend
    sx(Opnd(i64, val.reg()), Opnd(i32, val.reg()));
#endif
    // Here, we need to extract 'index-=low()' - can pack this into 
    // complex address form:
    //      [table + index*sizeof(void*) - low()*sizeof(void*)],
    // but only if low()*sizeof(void*) does fit into displacement ...
    int tmp = -jinst.low();
    const int LO_BOUND = INT_MIN/(int)sizeof(void*);
    const int UP_BOUND = INT_MAX/(int)sizeof(void*);
    if (LO_BOUND<=tmp && tmp<=UP_BOUND) {
        ld(jobj, gr_tabl, gr_tabl, -jinst.low()*sizeof(void*), 
        val.reg(), sizeof(void*));
    }
    else {
        // ... otherwise subtract explicitly, but only if the register
        // is not used anywhere else
        if (rrefs(val.reg()) !=0) {
            Opnd vtmp(i32, valloc(i32));
            mov(vtmp, val); // make a copy of val
            runlock(val);
            val = vtmp;
            rlock(val);
        }
        alu(alu_sub, val, jinst.low());
        ld(jobj, gr_tabl, gr_tabl, 0, val.reg(), sizeof(void*));
    }
    runlock(val);
    br(gr_tabl);
}