Esempio n. 1
0
void Vm::execCode(const SCodeObject &c) {
  Vm::pushCodeObject(c);
  // TODO, if coStack_ is empty, push a NIL object
  // This is in case the user tries to assign a value to a void function call
  while (!coStack_.empty()) {
    if (*opId_ >= static_cast<int>(vecOp_->size())) {
      popCodeObject();
      continue;
    }
    SObject i;
    SObject j;

    Op op = (*vecOp_)[*opId_];
    Opc opc = op.opc_;
    switch (opc) {
    case Opc::ADD:
      DEBUG("OP::ADD");
      BIN_OP(+);
      break;
    case Opc::SUB:
      DEBUG("OP::SUB");
      BIN_OP(-);
      break;
    case Opc::UNARY_SUB:
      DEBUG("OP::UNARY_SUB");
      i = VM_POP();
      j = std::make_shared<IntegerObject>(0);
      VM_PUSH(*j - i);
      break;
    case Opc::MOD:
      DEBUG("OP::MOD");
      BIN_OP(% );
      break;
    case Opc::LT:
      DEBUG("OP::LT");
      BIN_OP(< );
      break;
    case Opc::GT:
      DEBUG("OP::GT");
      BIN_OP(> );
      break;
    case Opc::LEQ:
      DEBUG("OP::LEQ");
      BIN_OP(<= );
      break;
    case Opc::GEQ:
      DEBUG("OP::GEQ");
      BIN_OP(>= );
      break;
    case Opc::EQ:
      DEBUG("OP::EQ");
      BIN_OP(== );
      break;
    case Opc::AND:
      DEBUG("OP::AND");
      BIN_OP(&&);
      break;
    case Opc::OR:
      DEBUG("OP::OR");
      BIN_OP(|| );
      break;
    case Opc::NEQ:
      DEBUG("OP::NEQ");
      BIN_OP(!= );
      break;
    case Opc::MULT:
      DEBUG("OP::MULT");
      BIN_OP(*);
      break;
    case Opc::DIV:
      DEBUG("OP::DIV");
      BIN_OP(/ );
      break;
    case Opc::POWER: 
      DEBUG("OP::POWER"); 
      j = VM_POP();
      i = VM_POP();
      VM_PUSH(std::make_shared<DoubleObject>(pow(i->getDouble(), j->getDouble())));
      break;
    case Opc::WHILE: {
      DEBUG("OP::WHILE");
      assert(op.hasArgA());
      SCodeObject co = codeObject_->getChild(op.getArgA());
      co->setParent(codeObject_);
      pushCodeObject(co);
      continue;
    }
    /*TODO, Better to not have BREAK and CONTINUE in the vm. */
    case Opc::BREAK:
      DEBUG("OP::BREAK")
      while (codeObject_->getBlockType() != BlockType::WHILE) {
        popCodeObject();
      }
      popCodeObject();
      break;
    case Opc::CONTINUE:
      DEBUG("OP::CONTINUE")
      while (codeObject_->getBlockType() != BlockType::WHILE) {
        popCodeObject();
      }
      opId_ = 0;
      continue;
    case Opc::LOAD_CONSTANT:
      DEBUG("OP::PUSH_CONSTANT");
      assert(op.hasArgA());
      i = codeObject_->getConst(op.getArgA());
      VM_PUSH(i);
      break;
    case Opc::LOAD_VALUE:
      DEBUG("OP::LOAD_VALUE");
      assert(op.hasStr());
      i = codeObject_->getValue(op.getStr());
      assert(i != nullptr);
      VM_PUSH(i);
      break;
    case Opc::STORE_VALUE:
      DEBUG("OP::STORE_VALUE");
      assert(op.hasStr());
      i = VM_POP();
      codeObject_->storeValue(op.getStr(), i);
      break;
    case Opc::JMP_IF_ELSE: {
      DEBUG("OP::JMP_IF_ELSE");
      INCR_OP();
      assert(op.hasArgA());
      auto v = VM_POP();
      if (v->isTrue()) {
        SCodeObject ic = codeObject_->getChild(op.getArgA());
        ic->setParent(codeObject_);
        pushCodeObject(ic);
      } else {
        if (op.hasArgB()) {
          SCodeObject ec = codeObject_->getChild(op.getArgB());
          ec->setParent(codeObject_);
          pushCodeObject(ec);
        }
      }
      continue;
    }
    case Opc::INIT_INSTANCE: {
      auto classCo = codeObject_->getParent();
      auto classO = std::make_shared<ClassObject>(classCo);
      VM_PUSH(classO);
      INCR_OP();
      continue;
    }
    case Opc::CALL_METHOD: {
      DEBUG("OP::CALL_METHOD");
      INCR_OP();
      auto method = VM_POP();
      auto instance = VM_POP();
      Vm::callMethod(instance, method);
      continue;
    }
    case Opc::DOT: {
      DEBUG("OP::DOT");
      i = VM_POP();
      j = VM_POP();
      Vm::getMethodProp(j, i);
      break;
    }
    case Opc::CALL: {
      DEBUG("OP::CALL");
      assert(op.hasStr());
      auto fnob = codeObject_->getValue(op.getStr());
      assert(fnob != nullptr);
      INCR_OP();
      Vm::callFunc(fnob);
      continue;
    }
    case Opc::RETURN: {
      // TODO, clean the stack
      assert(op.hasArgA());
      DEBUG("OP::RETURN");
      while (codeObject_->getBlockType() != BlockType::FUNCTION) {
        popCodeObject();
      }
      popCodeObject();
      continue;
    }

    default:
      assert(false && "Not Implemented Yet!");
      break;
    }
    INCR_OP(); // increment the op
  }            // end switch, end for
}
Esempio n. 2
0
void JIT::emitPutCallResult(const Op& bytecode)
{
    emitValueProfilingSite(bytecode.metadata(m_codeBlock));
    emitStore(bytecode.m_dst.offset(), regT1, regT0);
}
Esempio n. 3
0
 inline typename right_type<Op>::const_reference right(Op const &op)
 {
     return op.cast().right;
 }
Esempio n. 4
0
 inline typename arg_type<Op>::const_reference arg(Op const &op)
 {
     return op.cast().arg;
 }
Esempio n. 5
0
		Mult(Op OpOne, Op OpTwo){
			Op1.changeVal( OpOne.evaluate() );
			Op2.changeVal( OpTwo.evaluate() );
		}
Esempio n. 6
0
 inline typename left_type<Op>::const_reference left(Op const &op)
 {
     return op.cast().left;
 }
Esempio n. 7
0
		Sqr(Op OpOne)
		{
			Op1.changeVal( OpOne.evaluate() );
		}
Esempio n. 8
0
bool
Emitter::_jit_emit_return(Term *ast, pj_op_context context, Value *value, const PerlJIT::AST::Type *type)
{
  // TODO OPs with OPpTARGET_MY flag are in scalar context even when
  // they should be in void context, so we're emitting an useless push
  // below

  if (context == pj_context_caller) {
    set_error("Caller-determined context not implemented");
    return false;
  }

  if (context != pj_context_scalar)
    return true;

  Op *op = dynamic_cast<Op *>(ast);
  Value *res;

  switch (op->op_class()) {
  case pj_opc_binop: {
    // the assumption here is that the OPf_STACKED assignment
    // has been handled by _jit_emit below, and here we only need
    // to handle cases like '$x = $y += 7'
    if (op->get_op_type() == pj_binop_sassign) {
      if (type->equals(&SCALAR_T)) {
        res = value;
      } else {
        // TODO suboptimal, but correct, need to ask for SCALAR
        // in the call to _jit_emit() above
        res = pa.emit_sv_newmortal();
      }
    } else {
      if (!op->get_perl_op()->op_targ && type->equals(&SCALAR_T)) {
        res = value;
      } else {
        if (!op->get_perl_op()->op_targ) {
          set_error("Binary OP without target");
          return false;
        }
        res = pa.emit_OP_targ();
      }
    }
  }
  case pj_opc_unop:
    if (!op->get_perl_op()->op_targ) {
      set_error("Unary OP without target");
      return false;
    }
    res = pa.emit_OP_targ();
  default:
    res = pa.emit_sv_newmortal();
    break;
  }

  if (res != value)
    if (!_jit_assign_sv(res, value, type))
      return false;

  pa.alloc_sp();
  pa.emit_SPAGAIN();
  pa.emit_XPUSHs(res);
  pa.emit_PUTBACK();

  return true;
}
PUBLIC void
RD_gen_kill (Compound_region* blk, Op* exit_op,
             Reaching_defs_solver& global_rdefa, int rd_idx)
{
   int ii=0;
   
   if (dbg (rdef, 1))
      cdbg << "*\t\t\tRD_gen_kill, block-" << blk->id()
      << " exit_op-" << exit_op->id() << endl;
   
   // da PQS:
   PQS* pqs = get_pqs (blk);
   
   Pred_jar pj(blk)  ;
   
   if (dbg (rdef, 2))
      cdbg << "\t\t\t\tPQS is:" << *pqs;
   
   // get predicate which controls the path 
   Pred_cookie cond_cq = pj.get_lub_path_guard(get_first_region_op_from_subregions(blk), exit_op) ;
   
   
   // create vectors of pred cookies for gen/kill, initialize them:
   Pred_cookie fpq = Pred_jar::get_false() ;
   Pred_cookie tpq = Pred_jar::get_true() ;
   
   if (!pq_gen_ptr) {
      pq_gen_ptr = new Vector<Pred_cookie>(global_rdefa.rdm->def_counter, fpq) ;
   }
   else {
      if ((int)pq_gen_ptr->size() != global_rdefa.rdm->def_counter) {
         delete pq_gen_ptr ;
         pq_gen_ptr = new Vector<Pred_cookie>(global_rdefa.rdm->def_counter, fpq) ;
      }
      else {
         for (int j=0; j < global_rdefa.rdm->def_counter; j++) {
            (*pq_gen_ptr)[j].reset_to_false() ;
         }
      }
   }
   
   
   //
   if (!pq_kill_ptr) {
      pq_kill_ptr = new Vector<Pred_cookie>(global_rdefa.rdm->def_counter, tpq) ;
   }
   else {
      if ((int)pq_kill_ptr->size() != global_rdefa.rdm->def_counter) {
         delete pq_kill_ptr ;
         pq_kill_ptr = new Vector<Pred_cookie>(global_rdefa.rdm->def_counter, tpq) ;
      }
      else {
         for (int j=0; j < global_rdefa.rdm->def_counter; j++) {
            (*pq_kill_ptr)[j].reset_to_true() ;
         }
      }
   }
   
   
   Vector<Pred_cookie>& pq_gen = *pq_gen_ptr ;
   Vector<Pred_cookie>& pq_kill = *pq_kill_ptr ;

   Op_complete_and_pseudo_dests dest_oper ;
   
   //
   // traverse ops in hyperblock in linear order from entry to exit_op
   //
   bool done = false;
   for (Region_ops_C0_order opi2 (blk); done != true; opi2++) {
      // there should be no next time around:
      if ((*opi2) == exit_op)
         done = true;
      
      assert ((*opi2)->is_op());
      Op* op = (Op*)(*opi2);
      
      // TODO:
      // Any additions/changes here must be replicated in Reaching_defs_solver
      // chain construction code too.
      if (is_remap (op)) {
         Pred_cookie remap_cond_cq(true) ;

         List<Operand>* remap_defs = (List<Operand>*) 
            get_generic_attribute (op, "remap_expanded_defs") ;
         
         for (List_iterator<Operand> dest_oper(*remap_defs) ;
         dest_oper != 0; dest_oper++) { 
            // process the defs only:
            Operand oper = (*dest_oper);
            
            if (global_rdefa.is_interesting(oper)) {
               int oi = global_rdefa.rdm->operand_map.value (oper);
               
               for (List_iterator<int> li (global_rdefa.rdm->operand_to_def_map[oi]);
               li != 0; li++) {
                  int idx = *li ;
                  
                  // all defs get killed:
                  pq_gen[idx].lub_diff(remap_cond_cq) ;
                  pq_kill[idx].lub_diff(remap_cond_cq) ;
                  
                  // only the def belonging to this op is genned:
                  if (global_rdefa.rdm->rev_def_map[idx].get_op() == op) {                    
                     pq_gen[idx].lub_sum(remap_cond_cq) ;
                     pq_kill[idx].lub_sum(remap_cond_cq) ;
                  }
               }
            }
         }
         // Add one more kill for vrname[min_omega]
         Operand oper = op->src(SRC1) ;
         int min_omega = oper.min_omega() ;
         Operand new_reg = new Reg(oper);
         new_reg.incr_omega(min_omega);
         int oi = global_rdefa.rdm->operand_map.value(new_reg);
         for (List_iterator<int> li2 (global_rdefa.rdm->operand_to_def_map[oi]);
         li2 != 0; li2++) {
            int idx = *li2 ;
            // all defs get killed:
            pq_gen[idx].lub_diff(remap_cond_cq) ;
            pq_kill[idx].lub_diff(remap_cond_cq) ;
            
         }      
      }
      else if (is_swp_branch (op)) {
         // Special case for SWP branch so that src and dest guards
         // are the staging predicate. This code needs to be fixed once
         // the PQS interface takes care of this internally
         //
         // TODO:(Sumedh+Richard) Sanity checks needed on this piece of code.
         // Any additions/changes here must be replicated in Reaching_defs_solver
         // chain construction code too.
         
         for (dest_oper (op); dest_oper != 0; dest_oper++) {
            Operand oper = (*dest_oper);
            
            if (global_rdefa.is_interesting(oper)) {
               int oi = global_rdefa.rdm->operand_map.value (oper);
               
               for (List_iterator<int> li (global_rdefa.rdm->operand_to_def_map[oi]);
               li != 0; li++) {
                  int idx = *li ;
                  // all defs get killed:
                  pq_gen[idx].lub_diff(cond_cq) ;
                  pq_kill[idx].lub_diff(cond_cq) ;
                  
                  // only the def belonging to this op is genned:
                  if (global_rdefa.rdm->rev_def_map[*li].get_op() == op) {
                     pq_gen[idx].lub_sum(cond_cq) ;
                     pq_kill[idx].lub_sum(cond_cq) ;
                  }
               }
            }
         }
      }
      else {
         for (dest_oper (op); dest_oper != 0; dest_oper++) {
            // process the defs only:
            Operand oper = (*dest_oper);
            El_ref cur_ref = dest_oper.get_ref() ;

            if (global_rdefa.is_interesting(oper)) {
               int oi = global_rdefa.rdm->operand_map.value (oper);
               
	       Op* cur_def_op = cur_ref.get_op() ;
	       Pred_cookie pq1 = pj.get_lub_guard(cur_ref) ;
	       Pred_cookie pq2 = pj.get_glb_guard(cur_ref) ;
	       for (List_iterator<int> li (global_rdefa.rdm->operand_to_def_map[oi]);
		    li != 0; li++) {
                  int idx = *li ;
		  Op* other_def_op = (global_rdefa.rdm->rev_def_map[idx]).get_op() ;
                  // only the def belonging to this op is genned:
                  if (cur_def_op == other_def_op) {
                     pq_gen[idx].lub_sum(pq1) ;
                     pq_kill[idx].lub_sum(pq1) ;
                  }
                  else {
                     // all other defs get killed:
                     pq_gen[idx].lub_diff(pq1) ;
                     pq_kill[idx].lub_diff(pq2) ;
                  }
               }
            }
         }
      }
   }
   // expand local info into global gen Vector:
   if (dbg (rdef, 3))
      cdbg << "*\t\t\t\tIntegrate gen info:" << endl;
   for (ii=1; ii < global_rdefa.rdm->def_counter; ii++) {
      // output if def on any paths
      if (!cond_cq.is_disjoint(pq_gen[ii])) {
         El_ref& ref = global_rdefa.rdm->rev_def_map[ii];
         
         int idx = global_rdefa.rdm->def_to_i_map.value (ref);
         
         if (dbg (rdef, 3))
            cdbg << "\t\t\t\t" << "<" << idx << ", " << ref << ">, " << endl;
         
         global_rdefa.rd_info[rd_idx].rd_gen.set_bit (idx);
      }
   }
   if (dbg (rdef, 3)) cdbg << endl;
   
   // expand local info into global kill Vector:
   if (dbg (rdef, 3))
      cdbg << "*\t\t\t\tIntegrate kill info:" << endl;
   for (ii=1; ii < global_rdefa.rdm->def_counter; ii++) {
      // output if def killed on any path
      if (cond_cq.is_disjoint(pq_kill[ii])) {
         El_ref& ref = global_rdefa.rdm->rev_def_map[ii];
         int idx = global_rdefa.rdm->def_to_i_map.value (ref);
         if (dbg (rdef, 3))
            cdbg << "\t\t\t\t" << "<" << idx << ", " << ref << ">, " << endl;
         global_rdefa.rd_info[rd_idx].rd_kill.set_bit (idx);
      }
   }
   if (dbg (rdef, 3)) cdbg << endl;
}
Esempio n. 10
0
PUBLIC void
liveness_gen_kill (Compound_region* blk, Op* exit_op,
		   Liveness_solver* global_dfa, int live_idx)
{
   if (dbg(dfa))
      cdbg << "liveness_gen_kill, block " << blk->id() << endl;

   if (dbg(dfa,1)) cdbg << "." << flush;

   PQS* pqs = get_pqs(blk);
   VR_map* vr_map = get_local_vr_map (blk);

   if(pqs == NULL || vr_map == NULL) {
     create_local_analysis_info_for_all_hbs_bbs(el_get_enclosing_procedure(blk));
     pqs = get_pqs(blk);
     vr_map = get_local_vr_map (blk);
   }
   Pred_jar pj(blk);

   assert(pqs != NULL && vr_map != NULL);

   int& vr_count = vr_map->vr_count;
   Hash_map<Operand,int>& vr_to_index = vr_map->vr_to_index;
   Vector<Operand>& index_to_vr = vr_map->index_to_vr;

   // This is a hack to get around the inability to analyze modulo-scheduled
   // loops.  Dataflow information is computed before modulo scheduling and
   // saved as an attribute.  Then future dataflow queries get information from
   // this attribute. -KF 2/2005
   if (blk->parent()->is_loopbody() && blk->flag(EL_REGION_ROT_REG_ALLOCATED)) {

     Msched_attr* ms_attr = get_msched_attr(blk->parent());

     if (ms_attr == NULL) {
       El_punt("Cannot perform dataflow analysis on modulo scheduled loops.");
     }
     
     set_bits_from_operand_list(global_dfa, ms_attr->liveness_gen,
                                global_dfa->live_info[live_idx].liveness_gen);
     set_bits_from_operand_list(global_dfa, ms_attr->liveness_kill,
                                global_dfa->live_info[live_idx].liveness_kill);
     set_bits_from_operand_list(global_dfa, ms_attr->down_exposed_use_gen,
                                global_dfa->live_info[live_idx].down_exposed_use_gen);
     set_bits_from_operand_list(global_dfa, ms_attr->down_exposed_use_kill,
                                global_dfa->live_info[live_idx].down_exposed_use_kill);
     set_bits_from_operand_list(global_dfa, ms_attr->up_exposed_def_gen,
                                global_dfa->live_info[live_idx].up_exposed_def_gen);
     set_bits_from_operand_list(global_dfa, ms_attr->up_exposed_def_kill,
                                global_dfa->live_info[live_idx].up_exposed_def_kill);
     set_bits_from_operand_list(global_dfa, ms_attr->down_exposed_def_gen,
                                global_dfa->live_info[live_idx].down_exposed_def_gen);
     set_bits_from_operand_list(global_dfa, ms_attr->down_exposed_def_kill,
                                global_dfa->live_info[live_idx].down_exposed_def_kill);
     return;
   }

   Bitvector interesting(vr_count) ;
   for (int vic = 0 ; vic < vr_count ; vic++) {
      if (global_dfa->is_interesting(index_to_vr[vic])) interesting.set_bit(vic) ;
   }

   Pred_cookie cond_cq = pj.get_lub_path_guard(get_first_region_op_from_subregions(blk), exit_op) ;

   // create vectors of pred expressions for gen/kill

   Pred_cookie fpq = Pred_jar::get_false() ;
   Pred_cookie tpq = Pred_jar::get_true() ;

   if (!pq_up_use_gen_ptr) {
      pq_up_use_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
      pq_up_use_kill_ptr = new Vector<Pred_cookie>(vr_count, tpq) ;
      pq_up_def_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
      pq_up_def_kill_ptr = new Vector<Pred_cookie>(vr_count, tpq) ;
      pq_dn_use_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
      pq_dn_use_kill_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
      pq_dn_def_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
      pq_dn_def_kill_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
   }
   else {
      if ((int)pq_up_use_gen_ptr->size() < vr_count) {
         delete pq_up_use_gen_ptr ;
         delete pq_up_use_kill_ptr ;
         delete pq_up_def_gen_ptr ;
         delete pq_up_def_kill_ptr ;
         delete pq_dn_use_gen_ptr ;
         delete pq_dn_use_kill_ptr ;
         delete pq_dn_def_gen_ptr ;
         delete pq_dn_def_kill_ptr ;
         pq_up_use_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
	 pq_up_use_kill_ptr = new Vector<Pred_cookie>(vr_count, tpq) ;
	 pq_up_def_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
	 pq_up_def_kill_ptr = new Vector<Pred_cookie>(vr_count, tpq) ;
	 pq_dn_use_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
	 pq_dn_use_kill_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
	 pq_dn_def_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
	 pq_dn_def_kill_ptr = new Vector<Pred_cookie>(vr_count, fpq) ;
      }
      else {
	 int j ;
         for (j=0 ; j < vr_count ; j++) {
	    (*pq_up_use_gen_ptr)[j].reset_to_false() ;
	    (*pq_up_use_kill_ptr)[j].reset_to_true() ;
	    (*pq_up_def_gen_ptr)[j].reset_to_false() ;
	    (*pq_up_def_kill_ptr)[j].reset_to_true() ;
	    (*pq_dn_use_gen_ptr)[j].reset_to_false() ;
	    (*pq_dn_use_kill_ptr)[j].reset_to_false() ;
	    (*pq_dn_def_gen_ptr)[j].reset_to_false() ;
	    (*pq_dn_def_kill_ptr)[j].reset_to_false() ;
	 }
      }
   }
   
   Vector<Pred_cookie>& pq_up_use_gen = *pq_up_use_gen_ptr ;
   Vector<Pred_cookie>& pq_up_use_kill = *pq_up_use_kill_ptr;

   Vector<Pred_cookie>& pq_up_def_gen = *pq_up_def_gen_ptr ;
   Vector<Pred_cookie>& pq_up_def_kill = *pq_up_def_kill_ptr ;


   Operand_iter* dest_oper_ptr = NULL ;
   Operand_iter* src_oper_ptr = NULL ;
   if (global_dfa->filter_mask & ANALYZE_MEMVR) {
      dest_oper_ptr = new Op_complete_dests() ;
      src_oper_ptr = new Op_complete_inputs() ;
   }
   else {
      dest_oper_ptr = new Op_all_dests() ;
      src_oper_ptr = new Op_all_inputs() ;
   }
   

   Operand_iter& dest_oper = *dest_oper_ptr ;
   Operand_iter& src_oper = *src_oper_ptr ;   
   
   // traverse ops in hyperblock in reverse linear order from exit_op to entry
   for (Region_ops_reverse_C0_order opi(blk, exit_op); opi != 0; opi++) {
      assert ( (*opi)->is_op() );
      Op* op = (Op*) (*opi);
    
      if (is_remap(op)) {
	 // get VR_name
	 Operand& vr = op->src(SRC1);
	 assert (vr.is_vr_name());

	 // remap uses vr_min .. vr_{max-1}, defs vr_min .. vr_max
	 int idx = vr.min_omega();
	 int max_idx = vr.max_omega();
	 int i ;
	 for (i=idx; i<=max_idx; i++) { // define idx..max_idx
	    Operand def = new Reg(vr);
	    def.incr_omega(i);

	    int ii = vr_to_index.value(def);

	    pq_up_use_gen[ii].lub_diff(cond_cq) ;
	    pq_up_use_kill[ii].lub_diff(cond_cq);
	    pq_up_def_gen[ii].lub_sum(cond_cq) ;

	 }
	 for (i=idx; i<max_idx; i++) { // use idx..max_idx-1
	    Operand use = new Reg(vr);
	    use.incr_omega(i);

	    int ii = vr_to_index.value(use);

	    pq_up_use_gen[ii].lub_sum(cond_cq) ;
	    pq_up_use_kill[ii].lub_sum(cond_cq) ;
	 }

	 // take care of the high end of the evr range (high end for backward)
	 Operand k_ll = new Reg(vr);
	 k_ll.incr_omega(max_idx);
      
	 int kii = vr_to_index.value(k_ll);
      
	 pq_up_use_gen[kii] = fpq ;
	 pq_up_use_kill[kii] = fpq ;      
	 pq_up_def_gen[kii] = fpq ;
	 pq_up_def_kill[kii] = fpq;      
	 pq_up_use_kill[kii].lub_sum(cond_cq);   // live under exit predicate
	 pq_up_def_kill[kii].lub_sum(cond_cq);   // live under exit predicate
      }
      else if (is_swp_branch(op)) {
	 // Special case for SWP branch so that src and dest guards
	 // are the staging predicate. This code needs to be fixed once
	 // the PQS interface takes care of this internally

	 // process defs
	 for (dest_oper(op) ; dest_oper != 0 ; dest_oper++) {
	    Operand oper = (*dest_oper) ;
	
	    if (global_dfa->is_interesting(oper)) {
	       int ii = vr_to_index.value(oper);

               pq_up_use_gen[ii].lub_diff(cond_cq) ;
	       pq_up_use_kill[ii].lub_diff(cond_cq) ;
	       pq_up_def_gen[ii].lub_sum(cond_cq) ;
	    }
	 }
	 // process uses
	 for (src_oper(op) ; src_oper != 0 ; src_oper++) {
	    Operand& oper = (*src_oper) ;

	    if (global_dfa->is_interesting(oper)) {
	       int ii = vr_to_index.value(oper);

	       pq_up_use_gen[ii].lub_sum(cond_cq) ;
	       pq_up_use_kill[ii].lub_sum(cond_cq) ;

	    }
	 }
      }
      else {
	 // for each op, process defs, then uses

	 //process defs
	 for (dest_oper(op) ; dest_oper != 0 ; dest_oper++) {
	    Operand oper = (*dest_oper) ;
	
	    if (global_dfa->is_interesting(oper)) {
               int ii = vr_to_index.value(oper);
               El_ref tmp1 = dest_oper.get_ref() ;
               Pred_cookie pq1 = pj.get_glb_guard(tmp1) ;
               Pred_cookie pq2 = pj.get_lub_guard(tmp1) ;

               pq_up_use_gen[ii].lub_diff(pq1) ;
               pq_up_use_kill[ii].lub_diff(pq1) ;
               pq_up_def_gen[ii].lub_sum(pq2) ;
            }
	 }
	 // process uses
	 for (src_oper(op) ; src_oper != 0 ; src_oper++) {
	    Operand& oper = (*src_oper) ;

	    if (global_dfa->is_interesting(oper)) {
               int ii = vr_to_index.value(oper);
               El_ref tmp1 = src_oper.get_ref() ;
               Pred_cookie pq1 = pj.get_lub_guard(tmp1) ;

               pq_up_use_gen[ii].lub_sum(pq1) ;
               pq_up_use_kill[ii].lub_sum(pq1) ;

	    }
	 }
      }
   }


   Vector<Pred_cookie>& pq_dn_use_gen = *pq_dn_use_gen_ptr ;
   Vector<Pred_cookie>& pq_dn_use_kill = *pq_dn_use_kill_ptr ;

   Vector<Pred_cookie>& pq_dn_def_gen = *pq_dn_def_gen_ptr ;
   Vector<Pred_cookie>& pq_dn_def_kill = *pq_dn_def_kill_ptr ;

   //
   // traverse ops in hyperblock in linear order from entry to exit_op
   //
   for (Region_ops_C0_order opi2(blk); opi2 != 0 ; opi2++) {
      assert ( (*opi2)->is_op() );
      Op* op = (Op*) (*opi2);
    
      if (is_remap(op)) {
	 // get VR_name
	 Operand& vr = op->src(SRC1);
	 assert (vr.is_vr_name());

	 // remap uses vr_min .. vr_{max-1}, defs vr_min .. vr_max

	 int idx = vr.min_omega();
	 int max_idx = vr.max_omega();
	 int i;
	 for (i=idx; i<=max_idx; i++) { // use idx..max_idx-1
	    Operand use = new Reg(vr);
	    use.incr_omega(i);

	    int ii = vr_to_index.value(use);

	    pq_dn_use_gen[ii].lub_sum(cond_cq) ;
	    pq_dn_use_kill[ii].lub_sum(cond_cq) ;
	 }
	 for (i=idx+1 ; i<=max_idx; i++) { // define idx..max_idx
	    Operand def = new Reg(vr);
	    def.incr_omega(i);

	    int ii = vr_to_index.value(def);

	    pq_dn_use_gen[ii].lub_diff(cond_cq) ;
	    pq_dn_use_kill[ii].lub_diff(cond_cq) ; 
	    pq_dn_def_gen[ii].lub_sum(cond_cq) ;

	 }

	 // take care of the low end of the evr range (low end for forward)
	 Operand k_ll = new Reg(vr);
	 k_ll.incr_omega(idx);
      
	 int kii = vr_to_index.value(k_ll);

	 pq_dn_use_gen[kii] = fpq;
	 pq_dn_use_kill[kii] = fpq;
	 pq_dn_def_gen[kii] = fpq;
	 pq_dn_def_kill[kii] = fpq;
	 pq_dn_use_kill[kii].lub_sum(cond_cq);   // down_exposed under exit predicate
	 pq_dn_def_kill[kii].lub_sum(cond_cq);   // down_exposed under exit predicate

      }
      else if (is_swp_branch(op)) {
	 // Special case for SWP branch so that src and dest guards
	 // are the staging predicate. This code needs to be fixed once
	 // the PQS interface takes care of this internally

	 // process uses
	 for (src_oper(op) ; src_oper != 0 ; src_oper++) {
	    Operand& oper = (*src_oper) ;

	    // don't compute dn_use for mem_vr's
	    if (global_dfa->is_interesting(oper)) {
	       int ii = vr_to_index.value(oper);

	       pq_dn_use_gen[ii].lub_sum(cond_cq) ;
	       pq_dn_use_kill[ii].lub_sum(cond_cq) ;
	    }
	 }
	 // process defs
	 for (dest_oper(op) ; dest_oper != 0 ; dest_oper++) {
	    Operand oper = (*dest_oper) ;

	    if (global_dfa->is_interesting(oper)) {
	       int ii = vr_to_index.value(oper);

	       pq_dn_use_gen[ii].lub_diff(cond_cq) ;
	       pq_dn_use_kill[ii].lub_diff(cond_cq) ;
	       pq_dn_def_gen[ii].lub_sum(cond_cq) ;
            }
	 }
      }

      else {
	 // for each op, process uses, then defs

	 // process uses
	 for (src_oper(op) ; src_oper != 0 ; src_oper++) {
	    Operand& oper = (*src_oper) ;

	    // don't compute dn_use for mem_vr's
	    if (oper.is_reg() || oper.is_macro_reg()) {
	       int ii = vr_to_index.value(oper);
               El_ref tmp1 = src_oper.get_ref() ;
               Pred_cookie pq1 = pj.get_lub_guard(tmp1) ;

	       pq_dn_use_gen[ii].lub_sum(pq1) ;
	       pq_dn_use_kill[ii].lub_sum(pq1) ;
	    }
	 }
	 // process defs
	 for (dest_oper(op) ; dest_oper != 0 ; dest_oper++) {
	    Operand oper = (*dest_oper) ;

	    if (global_dfa->is_interesting(oper)) {
	       int ii = vr_to_index.value(oper);
               El_ref tmp1 = dest_oper.get_ref() ;
               Pred_cookie pq1 = pj.get_glb_guard(tmp1) ;
               Pred_cookie pq2 = pj.get_lub_guard(tmp1) ;

	       pq_dn_use_gen[ii].lub_diff(pq1) ;
	       pq_dn_use_kill[ii].lub_diff(pq1) ;
	       pq_dn_def_gen[ii].lub_sum(pq2) ;
	    }
	 }
      }
   }
   
   if (global_dfa == NULL) return;  // when timing pqs, global_dfa is null

   // expand local information into global gen/kill vectors
   if (dbg(dfa,5)) cdbg << "up_use_gen for blk " << blk->id();
   int ii ;
   for (ii = 0; ii<vr_count; ii++) {
      if(interesting.bit(ii)) {
	 Operand& vr = index_to_vr[ii];
	 if (!tpq.is_disjoint(pq_up_use_gen[ii])) {  // gen if live on some path
	    int idx = global_dfa->operand_map.value(vr);
	    if (dbg(dfa,5)) cdbg << " " << vr;
	    global_dfa->live_info[live_idx].liveness_gen.set_bit(idx);
	 }
      }
   }
   if (dbg(dfa,5)) cdbg << "\nup_use_kill for blk " << blk->id();
   for (ii = 0; ii<vr_count; ii++) {
      if(interesting.bit(ii)) {
	 Operand& vr = index_to_vr[ii];
	 if (cond_cq.is_disjoint(pq_up_use_kill[ii])) {  // kill if dead on all paths 
	    int idx = global_dfa->operand_map.value(vr);
	    if (dbg(dfa,5)) cdbg << " " << vr;
	    global_dfa->live_info[live_idx].liveness_kill.set_bit(idx);
	 }
      }
   }
   if (dbg(dfa,5)) cdbg << "\nup_def_gen for blk " << blk->id();
   for (ii = 0; ii<vr_count; ii++) {
      if(interesting.bit(ii)) {
	 Operand& vr = index_to_vr[ii];
         if (!tpq.is_disjoint(pq_up_def_gen[ii])) {  // output if def on any paths
	    int idx = global_dfa->operand_map.value(vr);
	    if (dbg(dfa,5)) cdbg << " " << vr;
	    global_dfa->live_info[live_idx].up_exposed_def_gen.set_bit(idx);
	 }
      }
   }
   if (dbg(dfa,5)) cdbg << "\nup_def_kill for blk " << blk->id();
   for (ii = 0; ii<vr_count; ii++) {
      if(interesting.bit(ii)) {
	 Operand& vr = index_to_vr[ii];
         if (cond_cq.is_disjoint(pq_up_def_kill[ii])) {  // kill if killed on all paths
	    int idx = global_dfa->operand_map.value(vr);
	    if (dbg(dfa,5)) cdbg << " " << vr;
	    global_dfa->live_info[live_idx].up_exposed_def_kill.set_bit(idx);
	 }
      }
   }
   if (dbg(dfa,5)) cdbg << "\ndn_use_gen for blk " << blk->id();
   for (ii = 0; ii<vr_count; ii++) {
      if(interesting.bit(ii)) {
	 Operand& vr = index_to_vr[ii];
         if (!cond_cq.is_disjoint(pq_dn_use_gen[ii])) {  // gen if live on some path
	    int idx = global_dfa->operand_map.value(vr);
	    if (dbg(dfa,5)) cdbg << " " << vr;
	    global_dfa->live_info[live_idx].down_exposed_use_gen.set_bit(idx);
	 }
      }
   }
   if (dbg(dfa,5)) cdbg << "\ndn_use_kill for blk " << blk->id();
   for (ii = 0; ii<vr_count; ii++) {
      if(interesting.bit(ii)) {
	 Operand& vr = index_to_vr[ii];
         if (cond_cq.is_disjoint(pq_dn_use_kill[ii])) {  // kill if dead on all paths
	    int idx = global_dfa->operand_map.value(vr);
            if (dbg(dfa,5)) cdbg << " " << vr;
	    global_dfa->live_info[live_idx].down_exposed_use_kill.set_bit(idx);
	 }
      }
   }
   if (dbg(dfa,5)) cdbg << "\ndn_def_gen for blk " << blk->id();
   for (ii = 0; ii<vr_count; ii++) {
      if(interesting.bit(ii)) {
	 Operand& vr = index_to_vr[ii];
         if (!cond_cq.is_disjoint(pq_dn_def_gen[ii])) {  // output if def on any paths
            int idx = global_dfa->operand_map.value(vr);
	    if (dbg(dfa,5)) cdbg << " " << vr;
	    global_dfa->live_info[live_idx].down_exposed_def_gen.set_bit(idx);
	 }
      }
   }
   if (dbg(dfa,5)) cdbg << "\ndn_def_kill for blk " << blk->id();
   for (ii = 0; ii<vr_count; ii++) {
      if(interesting.bit(ii)) {
	 Operand& vr = index_to_vr[ii];
         if (cond_cq.is_disjoint(pq_dn_def_kill[ii])) {  // kill if killed on all paths
            int idx = global_dfa->operand_map.value(vr);
	    if (dbg(dfa,5)) cdbg << " " << vr;
	    global_dfa->live_info[live_idx].down_exposed_def_kill.set_bit(idx);
         }
      }	 
   }
   if (dbg(dfa,5)) cdbg << endl;
   // ntclark 3/25/03 - fix memory leaks
   delete dest_oper_ptr;
   delete src_oper_ptr;
}
Esempio n. 11
0
PRIVATE VR_map*
create_vr_map_for_block (Compound_region* blk)
{
  assert (blk->is_bb() || blk->is_hb());

  if (dbg(dfa))
    cdbg << "create_vr_map_for_block " << blk->id() << endl;

  VR_map* vr_map = new VR_map;
  int& vr_count = vr_map->vr_count;
  int& vrnum_count = vr_map->vrnum_count;
  Hash_map<Operand,int>& vr_to_index = vr_map->vr_to_index;
  Vector<Operand>& index_to_vr = vr_map->index_to_vr;

  Hash_map<int,int>& vrnum_to_index = vr_map->vrnum_to_index;
  Vector<int>& index_to_vrnum = vr_map->index_to_vrnum;
  List<Operand> local_vr_names ;
  
  vr_count = 0;
  vrnum_count = 0;

  // traverse ops in hyperblock in forward linear order
  for (Region_ops_C0_order opi(blk) ; opi != 0 ; opi++) {
    assert ( (*opi)->is_op() );
    Op* op = (Op*) (*opi);

    // if op is a remap, add its SRC1 to the list of names to be expanded
    if (is_remap(op)) {
       local_vr_names.add_tail(op->src(SRC1)) ;
    }

    // process uses
    for (Op_complete_inputs src_oper(op) ; src_oper != 0 ; src_oper++) {
      Operand tmpo = (*src_oper) ;
      if (!vr_to_index.is_bound(tmpo)) {
	if (tmpo.is_reg() || tmpo.is_mem_vr() || tmpo.is_macro_reg()) {
	  if (dbg(dfa,4)) cdbg << "vr_to_index " << vr_count << " " << tmpo << endl;
	  vr_to_index.bind(tmpo, vr_count++);
	}
      }
      if (tmpo.is_reg() || tmpo.is_mem_vr() || tmpo.is_macro_reg()) {
        if (!vrnum_to_index.is_bound(tmpo.vr_num())) {
	  if (dbg(dfa,4)) cdbg << "vrnum_to_index " << vrnum_count << " " << tmpo.vr_num() << endl;
	  vrnum_to_index.bind(tmpo.vr_num(), vrnum_count++);
	}
      }

    }
    // process defs
    for (Op_complete_dests dest_oper(op) ; dest_oper != 0 ; dest_oper++) {
      Operand tmpo = (*dest_oper) ; 
      if (!vr_to_index.is_bound(tmpo)) {
	if (tmpo.is_reg() || tmpo.is_mem_vr() || tmpo.is_macro_reg()) {
	  if (dbg(dfa,4)) cdbg << "vr_to_index " << vr_count << " " << tmpo << endl;
	  vr_to_index.bind(tmpo, vr_count++);
	}
      }
      if (tmpo.is_reg() || tmpo.is_mem_vr() || tmpo.is_macro_reg()) {
        if (!vrnum_to_index.is_bound(tmpo.vr_num())) {
	  if (dbg(dfa,4)) cdbg << "vrnum_to_index " << vrnum_count << " " << tmpo.vr_num() << endl;
	  vrnum_to_index.bind(tmpo.vr_num(), vrnum_count++);
	}
      }

    }
  }

  //  expand the vr_names over the min...max range
  for (List_iterator<Operand> vri(local_vr_names) ; vri != 0 ; vri++) {
     Operand oper = *vri ;
     int min_omega = oper.min_omega() ;
     int max_omega = oper.max_omega() ;
     for (int i=min_omega ; i<=max_omega ; i++) { // define idx..max_idx
  	Operand new_reg = new Reg(oper);
	new_reg.incr_omega(i);
	if (!vr_to_index.is_bound(new_reg)) {
	   vr_to_index.bind(new_reg, vr_count++);
	}
        // mchu
	if (!vrnum_to_index.is_bound(new_reg.vr_num())) {
	   vrnum_to_index.bind(new_reg.vr_num(), vrnum_count++);
	}

     }
  }
  
  index_to_vr.resize(vr_count);
  index_to_vrnum.resize(vrnum_count);
  for (Hash_map_iterator<Operand, int> mi(vr_to_index); mi != 0; mi++) {
    index_to_vr[(*mi).second] = (*mi).first;
  }

  for (Hash_map_iterator<int, int> mi(vrnum_to_index); mi != 0; mi++) {
    index_to_vrnum[(*mi).second] = (*mi).first;
  }

  return vr_map;
}