void El_compute_maxreduce_derived_preds(Hyperblock *hb, Operand &on_pred, Operand &off_pred, Hash_set<Operand> &cpr_preds, Hash_set<Operand> &derived_on_preds, Hash_set<Operand> &derived_off_preds) { Op *op; Operand pred, dst_pred; derived_on_preds.clear(); derived_off_preds.clear(); derived_on_preds += on_pred; derived_off_preds += off_pred; for (Region_ops_C0_order op_i(hb); op_i!=0; op_i++) { op = *op_i; if (!is_cmpp(op)) continue; pred = op->src(PRED1); if (derived_on_preds.is_member(pred)) { for (Op_explicit_dests dst_i(op); dst_i!=0; dst_i++) { dst_pred = *dst_i; if (! dst_pred.is_reg()) continue; if (cpr_preds.is_member(dst_pred)) continue; derived_on_preds += dst_pred; if (dbg(cpr,3)) cdbg << "Adding " << dst_pred << " to derived ON preds" << endl; } } else if (derived_off_preds.is_member(pred)) { for (Op_explicit_dests dst_i(op); dst_i!=0; dst_i++) { dst_pred = *dst_i; if (! dst_pred.is_reg()) continue; if (cpr_preds.is_member(dst_pred)) continue; derived_off_preds += dst_pred; if (dbg(cpr,3)) cdbg << "Adding " << dst_pred << " to derived OFF preds" << endl; } } } derived_on_preds -= on_pred; derived_off_preds -= off_pred; }
/* * Currently only SBs are frpizable */ bool El_is_frpizable(Hyperblock *hb) { Op *op; Operand pred; /* See if there are any ops guarded by a predicate */ /* Also, no table jumps allowed */ for (Region_ops_C0_order op_i(hb); op_i!=0; op_i++) { op = *op_i; if (op->predicated()) { pred = op->src(PRED1); if (! pred.is_predicate_true()) return (false); } if (op->flag(EL_OPER_TABLE_JUMP)) return (false); } return (true); }
PUBLIC void liveness_gen_kill (Compound_region* blk, Op* exit_op, Liveness_solver* global_dfa, int live_idx) { if (dbg(dfa)) cdbg << "liveness_gen_kill, block " << blk->id() << endl; if (dbg(dfa,1)) cdbg << "." << flush; PQS* pqs = get_pqs(blk); VR_map* vr_map = get_local_vr_map (blk); if(pqs == NULL || vr_map == NULL) { create_local_analysis_info_for_all_hbs_bbs(el_get_enclosing_procedure(blk)); pqs = get_pqs(blk); vr_map = get_local_vr_map (blk); } Pred_jar pj(blk); assert(pqs != NULL && vr_map != NULL); int& vr_count = vr_map->vr_count; Hash_map<Operand,int>& vr_to_index = vr_map->vr_to_index; Vector<Operand>& index_to_vr = vr_map->index_to_vr; // This is a hack to get around the inability to analyze modulo-scheduled // loops. Dataflow information is computed before modulo scheduling and // saved as an attribute. Then future dataflow queries get information from // this attribute. -KF 2/2005 if (blk->parent()->is_loopbody() && blk->flag(EL_REGION_ROT_REG_ALLOCATED)) { Msched_attr* ms_attr = get_msched_attr(blk->parent()); if (ms_attr == NULL) { El_punt("Cannot perform dataflow analysis on modulo scheduled loops."); } set_bits_from_operand_list(global_dfa, ms_attr->liveness_gen, global_dfa->live_info[live_idx].liveness_gen); set_bits_from_operand_list(global_dfa, ms_attr->liveness_kill, global_dfa->live_info[live_idx].liveness_kill); set_bits_from_operand_list(global_dfa, ms_attr->down_exposed_use_gen, global_dfa->live_info[live_idx].down_exposed_use_gen); set_bits_from_operand_list(global_dfa, ms_attr->down_exposed_use_kill, global_dfa->live_info[live_idx].down_exposed_use_kill); set_bits_from_operand_list(global_dfa, ms_attr->up_exposed_def_gen, global_dfa->live_info[live_idx].up_exposed_def_gen); set_bits_from_operand_list(global_dfa, ms_attr->up_exposed_def_kill, global_dfa->live_info[live_idx].up_exposed_def_kill); set_bits_from_operand_list(global_dfa, ms_attr->down_exposed_def_gen, global_dfa->live_info[live_idx].down_exposed_def_gen); set_bits_from_operand_list(global_dfa, ms_attr->down_exposed_def_kill, global_dfa->live_info[live_idx].down_exposed_def_kill); return; } Bitvector interesting(vr_count) ; for (int vic = 0 ; vic < vr_count ; vic++) { if (global_dfa->is_interesting(index_to_vr[vic])) interesting.set_bit(vic) ; } Pred_cookie cond_cq = pj.get_lub_path_guard(get_first_region_op_from_subregions(blk), exit_op) ; // create vectors of pred expressions for gen/kill Pred_cookie fpq = Pred_jar::get_false() ; Pred_cookie tpq = Pred_jar::get_true() ; if (!pq_up_use_gen_ptr) { pq_up_use_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_up_use_kill_ptr = new Vector<Pred_cookie>(vr_count, tpq) ; pq_up_def_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_up_def_kill_ptr = new Vector<Pred_cookie>(vr_count, tpq) ; pq_dn_use_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_dn_use_kill_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_dn_def_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_dn_def_kill_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; } else { if ((int)pq_up_use_gen_ptr->size() < vr_count) { delete pq_up_use_gen_ptr ; delete pq_up_use_kill_ptr ; delete pq_up_def_gen_ptr ; delete pq_up_def_kill_ptr ; delete pq_dn_use_gen_ptr ; delete pq_dn_use_kill_ptr ; delete pq_dn_def_gen_ptr ; delete pq_dn_def_kill_ptr ; pq_up_use_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_up_use_kill_ptr = new Vector<Pred_cookie>(vr_count, tpq) ; pq_up_def_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_up_def_kill_ptr = new Vector<Pred_cookie>(vr_count, tpq) ; pq_dn_use_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_dn_use_kill_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_dn_def_gen_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; pq_dn_def_kill_ptr = new Vector<Pred_cookie>(vr_count, fpq) ; } else { int j ; for (j=0 ; j < vr_count ; j++) { (*pq_up_use_gen_ptr)[j].reset_to_false() ; (*pq_up_use_kill_ptr)[j].reset_to_true() ; (*pq_up_def_gen_ptr)[j].reset_to_false() ; (*pq_up_def_kill_ptr)[j].reset_to_true() ; (*pq_dn_use_gen_ptr)[j].reset_to_false() ; (*pq_dn_use_kill_ptr)[j].reset_to_false() ; (*pq_dn_def_gen_ptr)[j].reset_to_false() ; (*pq_dn_def_kill_ptr)[j].reset_to_false() ; } } } Vector<Pred_cookie>& pq_up_use_gen = *pq_up_use_gen_ptr ; Vector<Pred_cookie>& pq_up_use_kill = *pq_up_use_kill_ptr; Vector<Pred_cookie>& pq_up_def_gen = *pq_up_def_gen_ptr ; Vector<Pred_cookie>& pq_up_def_kill = *pq_up_def_kill_ptr ; Operand_iter* dest_oper_ptr = NULL ; Operand_iter* src_oper_ptr = NULL ; if (global_dfa->filter_mask & ANALYZE_MEMVR) { dest_oper_ptr = new Op_complete_dests() ; src_oper_ptr = new Op_complete_inputs() ; } else { dest_oper_ptr = new Op_all_dests() ; src_oper_ptr = new Op_all_inputs() ; } Operand_iter& dest_oper = *dest_oper_ptr ; Operand_iter& src_oper = *src_oper_ptr ; // traverse ops in hyperblock in reverse linear order from exit_op to entry for (Region_ops_reverse_C0_order opi(blk, exit_op); opi != 0; opi++) { assert ( (*opi)->is_op() ); Op* op = (Op*) (*opi); if (is_remap(op)) { // get VR_name Operand& vr = op->src(SRC1); assert (vr.is_vr_name()); // remap uses vr_min .. vr_{max-1}, defs vr_min .. vr_max int idx = vr.min_omega(); int max_idx = vr.max_omega(); int i ; for (i=idx; i<=max_idx; i++) { // define idx..max_idx Operand def = new Reg(vr); def.incr_omega(i); int ii = vr_to_index.value(def); pq_up_use_gen[ii].lub_diff(cond_cq) ; pq_up_use_kill[ii].lub_diff(cond_cq); pq_up_def_gen[ii].lub_sum(cond_cq) ; } for (i=idx; i<max_idx; i++) { // use idx..max_idx-1 Operand use = new Reg(vr); use.incr_omega(i); int ii = vr_to_index.value(use); pq_up_use_gen[ii].lub_sum(cond_cq) ; pq_up_use_kill[ii].lub_sum(cond_cq) ; } // take care of the high end of the evr range (high end for backward) Operand k_ll = new Reg(vr); k_ll.incr_omega(max_idx); int kii = vr_to_index.value(k_ll); pq_up_use_gen[kii] = fpq ; pq_up_use_kill[kii] = fpq ; pq_up_def_gen[kii] = fpq ; pq_up_def_kill[kii] = fpq; pq_up_use_kill[kii].lub_sum(cond_cq); // live under exit predicate pq_up_def_kill[kii].lub_sum(cond_cq); // live under exit predicate } else if (is_swp_branch(op)) { // Special case for SWP branch so that src and dest guards // are the staging predicate. This code needs to be fixed once // the PQS interface takes care of this internally // process defs for (dest_oper(op) ; dest_oper != 0 ; dest_oper++) { Operand oper = (*dest_oper) ; if (global_dfa->is_interesting(oper)) { int ii = vr_to_index.value(oper); pq_up_use_gen[ii].lub_diff(cond_cq) ; pq_up_use_kill[ii].lub_diff(cond_cq) ; pq_up_def_gen[ii].lub_sum(cond_cq) ; } } // process uses for (src_oper(op) ; src_oper != 0 ; src_oper++) { Operand& oper = (*src_oper) ; if (global_dfa->is_interesting(oper)) { int ii = vr_to_index.value(oper); pq_up_use_gen[ii].lub_sum(cond_cq) ; pq_up_use_kill[ii].lub_sum(cond_cq) ; } } } else { // for each op, process defs, then uses //process defs for (dest_oper(op) ; dest_oper != 0 ; dest_oper++) { Operand oper = (*dest_oper) ; if (global_dfa->is_interesting(oper)) { int ii = vr_to_index.value(oper); El_ref tmp1 = dest_oper.get_ref() ; Pred_cookie pq1 = pj.get_glb_guard(tmp1) ; Pred_cookie pq2 = pj.get_lub_guard(tmp1) ; pq_up_use_gen[ii].lub_diff(pq1) ; pq_up_use_kill[ii].lub_diff(pq1) ; pq_up_def_gen[ii].lub_sum(pq2) ; } } // process uses for (src_oper(op) ; src_oper != 0 ; src_oper++) { Operand& oper = (*src_oper) ; if (global_dfa->is_interesting(oper)) { int ii = vr_to_index.value(oper); El_ref tmp1 = src_oper.get_ref() ; Pred_cookie pq1 = pj.get_lub_guard(tmp1) ; pq_up_use_gen[ii].lub_sum(pq1) ; pq_up_use_kill[ii].lub_sum(pq1) ; } } } } Vector<Pred_cookie>& pq_dn_use_gen = *pq_dn_use_gen_ptr ; Vector<Pred_cookie>& pq_dn_use_kill = *pq_dn_use_kill_ptr ; Vector<Pred_cookie>& pq_dn_def_gen = *pq_dn_def_gen_ptr ; Vector<Pred_cookie>& pq_dn_def_kill = *pq_dn_def_kill_ptr ; // // traverse ops in hyperblock in linear order from entry to exit_op // for (Region_ops_C0_order opi2(blk); opi2 != 0 ; opi2++) { assert ( (*opi2)->is_op() ); Op* op = (Op*) (*opi2); if (is_remap(op)) { // get VR_name Operand& vr = op->src(SRC1); assert (vr.is_vr_name()); // remap uses vr_min .. vr_{max-1}, defs vr_min .. vr_max int idx = vr.min_omega(); int max_idx = vr.max_omega(); int i; for (i=idx; i<=max_idx; i++) { // use idx..max_idx-1 Operand use = new Reg(vr); use.incr_omega(i); int ii = vr_to_index.value(use); pq_dn_use_gen[ii].lub_sum(cond_cq) ; pq_dn_use_kill[ii].lub_sum(cond_cq) ; } for (i=idx+1 ; i<=max_idx; i++) { // define idx..max_idx Operand def = new Reg(vr); def.incr_omega(i); int ii = vr_to_index.value(def); pq_dn_use_gen[ii].lub_diff(cond_cq) ; pq_dn_use_kill[ii].lub_diff(cond_cq) ; pq_dn_def_gen[ii].lub_sum(cond_cq) ; } // take care of the low end of the evr range (low end for forward) Operand k_ll = new Reg(vr); k_ll.incr_omega(idx); int kii = vr_to_index.value(k_ll); pq_dn_use_gen[kii] = fpq; pq_dn_use_kill[kii] = fpq; pq_dn_def_gen[kii] = fpq; pq_dn_def_kill[kii] = fpq; pq_dn_use_kill[kii].lub_sum(cond_cq); // down_exposed under exit predicate pq_dn_def_kill[kii].lub_sum(cond_cq); // down_exposed under exit predicate } else if (is_swp_branch(op)) { // Special case for SWP branch so that src and dest guards // are the staging predicate. This code needs to be fixed once // the PQS interface takes care of this internally // process uses for (src_oper(op) ; src_oper != 0 ; src_oper++) { Operand& oper = (*src_oper) ; // don't compute dn_use for mem_vr's if (global_dfa->is_interesting(oper)) { int ii = vr_to_index.value(oper); pq_dn_use_gen[ii].lub_sum(cond_cq) ; pq_dn_use_kill[ii].lub_sum(cond_cq) ; } } // process defs for (dest_oper(op) ; dest_oper != 0 ; dest_oper++) { Operand oper = (*dest_oper) ; if (global_dfa->is_interesting(oper)) { int ii = vr_to_index.value(oper); pq_dn_use_gen[ii].lub_diff(cond_cq) ; pq_dn_use_kill[ii].lub_diff(cond_cq) ; pq_dn_def_gen[ii].lub_sum(cond_cq) ; } } } else { // for each op, process uses, then defs // process uses for (src_oper(op) ; src_oper != 0 ; src_oper++) { Operand& oper = (*src_oper) ; // don't compute dn_use for mem_vr's if (oper.is_reg() || oper.is_macro_reg()) { int ii = vr_to_index.value(oper); El_ref tmp1 = src_oper.get_ref() ; Pred_cookie pq1 = pj.get_lub_guard(tmp1) ; pq_dn_use_gen[ii].lub_sum(pq1) ; pq_dn_use_kill[ii].lub_sum(pq1) ; } } // process defs for (dest_oper(op) ; dest_oper != 0 ; dest_oper++) { Operand oper = (*dest_oper) ; if (global_dfa->is_interesting(oper)) { int ii = vr_to_index.value(oper); El_ref tmp1 = dest_oper.get_ref() ; Pred_cookie pq1 = pj.get_glb_guard(tmp1) ; Pred_cookie pq2 = pj.get_lub_guard(tmp1) ; pq_dn_use_gen[ii].lub_diff(pq1) ; pq_dn_use_kill[ii].lub_diff(pq1) ; pq_dn_def_gen[ii].lub_sum(pq2) ; } } } } if (global_dfa == NULL) return; // when timing pqs, global_dfa is null // expand local information into global gen/kill vectors if (dbg(dfa,5)) cdbg << "up_use_gen for blk " << blk->id(); int ii ; for (ii = 0; ii<vr_count; ii++) { if(interesting.bit(ii)) { Operand& vr = index_to_vr[ii]; if (!tpq.is_disjoint(pq_up_use_gen[ii])) { // gen if live on some path int idx = global_dfa->operand_map.value(vr); if (dbg(dfa,5)) cdbg << " " << vr; global_dfa->live_info[live_idx].liveness_gen.set_bit(idx); } } } if (dbg(dfa,5)) cdbg << "\nup_use_kill for blk " << blk->id(); for (ii = 0; ii<vr_count; ii++) { if(interesting.bit(ii)) { Operand& vr = index_to_vr[ii]; if (cond_cq.is_disjoint(pq_up_use_kill[ii])) { // kill if dead on all paths int idx = global_dfa->operand_map.value(vr); if (dbg(dfa,5)) cdbg << " " << vr; global_dfa->live_info[live_idx].liveness_kill.set_bit(idx); } } } if (dbg(dfa,5)) cdbg << "\nup_def_gen for blk " << blk->id(); for (ii = 0; ii<vr_count; ii++) { if(interesting.bit(ii)) { Operand& vr = index_to_vr[ii]; if (!tpq.is_disjoint(pq_up_def_gen[ii])) { // output if def on any paths int idx = global_dfa->operand_map.value(vr); if (dbg(dfa,5)) cdbg << " " << vr; global_dfa->live_info[live_idx].up_exposed_def_gen.set_bit(idx); } } } if (dbg(dfa,5)) cdbg << "\nup_def_kill for blk " << blk->id(); for (ii = 0; ii<vr_count; ii++) { if(interesting.bit(ii)) { Operand& vr = index_to_vr[ii]; if (cond_cq.is_disjoint(pq_up_def_kill[ii])) { // kill if killed on all paths int idx = global_dfa->operand_map.value(vr); if (dbg(dfa,5)) cdbg << " " << vr; global_dfa->live_info[live_idx].up_exposed_def_kill.set_bit(idx); } } } if (dbg(dfa,5)) cdbg << "\ndn_use_gen for blk " << blk->id(); for (ii = 0; ii<vr_count; ii++) { if(interesting.bit(ii)) { Operand& vr = index_to_vr[ii]; if (!cond_cq.is_disjoint(pq_dn_use_gen[ii])) { // gen if live on some path int idx = global_dfa->operand_map.value(vr); if (dbg(dfa,5)) cdbg << " " << vr; global_dfa->live_info[live_idx].down_exposed_use_gen.set_bit(idx); } } } if (dbg(dfa,5)) cdbg << "\ndn_use_kill for blk " << blk->id(); for (ii = 0; ii<vr_count; ii++) { if(interesting.bit(ii)) { Operand& vr = index_to_vr[ii]; if (cond_cq.is_disjoint(pq_dn_use_kill[ii])) { // kill if dead on all paths int idx = global_dfa->operand_map.value(vr); if (dbg(dfa,5)) cdbg << " " << vr; global_dfa->live_info[live_idx].down_exposed_use_kill.set_bit(idx); } } } if (dbg(dfa,5)) cdbg << "\ndn_def_gen for blk " << blk->id(); for (ii = 0; ii<vr_count; ii++) { if(interesting.bit(ii)) { Operand& vr = index_to_vr[ii]; if (!cond_cq.is_disjoint(pq_dn_def_gen[ii])) { // output if def on any paths int idx = global_dfa->operand_map.value(vr); if (dbg(dfa,5)) cdbg << " " << vr; global_dfa->live_info[live_idx].down_exposed_def_gen.set_bit(idx); } } } if (dbg(dfa,5)) cdbg << "\ndn_def_kill for blk " << blk->id(); for (ii = 0; ii<vr_count; ii++) { if(interesting.bit(ii)) { Operand& vr = index_to_vr[ii]; if (cond_cq.is_disjoint(pq_dn_def_kill[ii])) { // kill if killed on all paths int idx = global_dfa->operand_map.value(vr); if (dbg(dfa,5)) cdbg << " " << vr; global_dfa->live_info[live_idx].down_exposed_def_kill.set_bit(idx); } } } if (dbg(dfa,5)) cdbg << endl; // ntclark 3/25/03 - fix memory leaks delete dest_oper_ptr; delete src_oper_ptr; }
PUBLIC void RD_gen_kill (Compound_region* blk, Op* exit_op, Reaching_defs_solver& global_rdefa, int rd_idx) { int ii=0; if (dbg (rdef, 1)) cdbg << "*\t\t\tRD_gen_kill, block-" << blk->id() << " exit_op-" << exit_op->id() << endl; // da PQS: PQS* pqs = get_pqs (blk); Pred_jar pj(blk) ; if (dbg (rdef, 2)) cdbg << "\t\t\t\tPQS is:" << *pqs; // get predicate which controls the path Pred_cookie cond_cq = pj.get_lub_path_guard(get_first_region_op_from_subregions(blk), exit_op) ; // create vectors of pred cookies for gen/kill, initialize them: Pred_cookie fpq = Pred_jar::get_false() ; Pred_cookie tpq = Pred_jar::get_true() ; if (!pq_gen_ptr) { pq_gen_ptr = new Vector<Pred_cookie>(global_rdefa.rdm->def_counter, fpq) ; } else { if ((int)pq_gen_ptr->size() != global_rdefa.rdm->def_counter) { delete pq_gen_ptr ; pq_gen_ptr = new Vector<Pred_cookie>(global_rdefa.rdm->def_counter, fpq) ; } else { for (int j=0; j < global_rdefa.rdm->def_counter; j++) { (*pq_gen_ptr)[j].reset_to_false() ; } } } // if (!pq_kill_ptr) { pq_kill_ptr = new Vector<Pred_cookie>(global_rdefa.rdm->def_counter, tpq) ; } else { if ((int)pq_kill_ptr->size() != global_rdefa.rdm->def_counter) { delete pq_kill_ptr ; pq_kill_ptr = new Vector<Pred_cookie>(global_rdefa.rdm->def_counter, tpq) ; } else { for (int j=0; j < global_rdefa.rdm->def_counter; j++) { (*pq_kill_ptr)[j].reset_to_true() ; } } } Vector<Pred_cookie>& pq_gen = *pq_gen_ptr ; Vector<Pred_cookie>& pq_kill = *pq_kill_ptr ; Op_complete_and_pseudo_dests dest_oper ; // // traverse ops in hyperblock in linear order from entry to exit_op // bool done = false; for (Region_ops_C0_order opi2 (blk); done != true; opi2++) { // there should be no next time around: if ((*opi2) == exit_op) done = true; assert ((*opi2)->is_op()); Op* op = (Op*)(*opi2); // TODO: // Any additions/changes here must be replicated in Reaching_defs_solver // chain construction code too. if (is_remap (op)) { Pred_cookie remap_cond_cq(true) ; List<Operand>* remap_defs = (List<Operand>*) get_generic_attribute (op, "remap_expanded_defs") ; for (List_iterator<Operand> dest_oper(*remap_defs) ; dest_oper != 0; dest_oper++) { // process the defs only: Operand oper = (*dest_oper); if (global_rdefa.is_interesting(oper)) { int oi = global_rdefa.rdm->operand_map.value (oper); for (List_iterator<int> li (global_rdefa.rdm->operand_to_def_map[oi]); li != 0; li++) { int idx = *li ; // all defs get killed: pq_gen[idx].lub_diff(remap_cond_cq) ; pq_kill[idx].lub_diff(remap_cond_cq) ; // only the def belonging to this op is genned: if (global_rdefa.rdm->rev_def_map[idx].get_op() == op) { pq_gen[idx].lub_sum(remap_cond_cq) ; pq_kill[idx].lub_sum(remap_cond_cq) ; } } } } // Add one more kill for vrname[min_omega] Operand oper = op->src(SRC1) ; int min_omega = oper.min_omega() ; Operand new_reg = new Reg(oper); new_reg.incr_omega(min_omega); int oi = global_rdefa.rdm->operand_map.value(new_reg); for (List_iterator<int> li2 (global_rdefa.rdm->operand_to_def_map[oi]); li2 != 0; li2++) { int idx = *li2 ; // all defs get killed: pq_gen[idx].lub_diff(remap_cond_cq) ; pq_kill[idx].lub_diff(remap_cond_cq) ; } } else if (is_swp_branch (op)) { // Special case for SWP branch so that src and dest guards // are the staging predicate. This code needs to be fixed once // the PQS interface takes care of this internally // // TODO:(Sumedh+Richard) Sanity checks needed on this piece of code. // Any additions/changes here must be replicated in Reaching_defs_solver // chain construction code too. for (dest_oper (op); dest_oper != 0; dest_oper++) { Operand oper = (*dest_oper); if (global_rdefa.is_interesting(oper)) { int oi = global_rdefa.rdm->operand_map.value (oper); for (List_iterator<int> li (global_rdefa.rdm->operand_to_def_map[oi]); li != 0; li++) { int idx = *li ; // all defs get killed: pq_gen[idx].lub_diff(cond_cq) ; pq_kill[idx].lub_diff(cond_cq) ; // only the def belonging to this op is genned: if (global_rdefa.rdm->rev_def_map[*li].get_op() == op) { pq_gen[idx].lub_sum(cond_cq) ; pq_kill[idx].lub_sum(cond_cq) ; } } } } } else { for (dest_oper (op); dest_oper != 0; dest_oper++) { // process the defs only: Operand oper = (*dest_oper); El_ref cur_ref = dest_oper.get_ref() ; if (global_rdefa.is_interesting(oper)) { int oi = global_rdefa.rdm->operand_map.value (oper); Op* cur_def_op = cur_ref.get_op() ; Pred_cookie pq1 = pj.get_lub_guard(cur_ref) ; Pred_cookie pq2 = pj.get_glb_guard(cur_ref) ; for (List_iterator<int> li (global_rdefa.rdm->operand_to_def_map[oi]); li != 0; li++) { int idx = *li ; Op* other_def_op = (global_rdefa.rdm->rev_def_map[idx]).get_op() ; // only the def belonging to this op is genned: if (cur_def_op == other_def_op) { pq_gen[idx].lub_sum(pq1) ; pq_kill[idx].lub_sum(pq1) ; } else { // all other defs get killed: pq_gen[idx].lub_diff(pq1) ; pq_kill[idx].lub_diff(pq2) ; } } } } } } // expand local info into global gen Vector: if (dbg (rdef, 3)) cdbg << "*\t\t\t\tIntegrate gen info:" << endl; for (ii=1; ii < global_rdefa.rdm->def_counter; ii++) { // output if def on any paths if (!cond_cq.is_disjoint(pq_gen[ii])) { El_ref& ref = global_rdefa.rdm->rev_def_map[ii]; int idx = global_rdefa.rdm->def_to_i_map.value (ref); if (dbg (rdef, 3)) cdbg << "\t\t\t\t" << "<" << idx << ", " << ref << ">, " << endl; global_rdefa.rd_info[rd_idx].rd_gen.set_bit (idx); } } if (dbg (rdef, 3)) cdbg << endl; // expand local info into global kill Vector: if (dbg (rdef, 3)) cdbg << "*\t\t\t\tIntegrate kill info:" << endl; for (ii=1; ii < global_rdefa.rdm->def_counter; ii++) { // output if def killed on any path if (cond_cq.is_disjoint(pq_kill[ii])) { El_ref& ref = global_rdefa.rdm->rev_def_map[ii]; int idx = global_rdefa.rdm->def_to_i_map.value (ref); if (dbg (rdef, 3)) cdbg << "\t\t\t\t" << "<" << idx << ", " << ref << ">, " << endl; global_rdefa.rd_info[rd_idx].rd_kill.set_bit (idx); } } if (dbg (rdef, 3)) cdbg << endl; }
PRIVATE VR_map* create_vr_map_for_block (Compound_region* blk) { assert (blk->is_bb() || blk->is_hb()); if (dbg(dfa)) cdbg << "create_vr_map_for_block " << blk->id() << endl; VR_map* vr_map = new VR_map; int& vr_count = vr_map->vr_count; int& vrnum_count = vr_map->vrnum_count; Hash_map<Operand,int>& vr_to_index = vr_map->vr_to_index; Vector<Operand>& index_to_vr = vr_map->index_to_vr; Hash_map<int,int>& vrnum_to_index = vr_map->vrnum_to_index; Vector<int>& index_to_vrnum = vr_map->index_to_vrnum; List<Operand> local_vr_names ; vr_count = 0; vrnum_count = 0; // traverse ops in hyperblock in forward linear order for (Region_ops_C0_order opi(blk) ; opi != 0 ; opi++) { assert ( (*opi)->is_op() ); Op* op = (Op*) (*opi); // if op is a remap, add its SRC1 to the list of names to be expanded if (is_remap(op)) { local_vr_names.add_tail(op->src(SRC1)) ; } // process uses for (Op_complete_inputs src_oper(op) ; src_oper != 0 ; src_oper++) { Operand tmpo = (*src_oper) ; if (!vr_to_index.is_bound(tmpo)) { if (tmpo.is_reg() || tmpo.is_mem_vr() || tmpo.is_macro_reg()) { if (dbg(dfa,4)) cdbg << "vr_to_index " << vr_count << " " << tmpo << endl; vr_to_index.bind(tmpo, vr_count++); } } if (tmpo.is_reg() || tmpo.is_mem_vr() || tmpo.is_macro_reg()) { if (!vrnum_to_index.is_bound(tmpo.vr_num())) { if (dbg(dfa,4)) cdbg << "vrnum_to_index " << vrnum_count << " " << tmpo.vr_num() << endl; vrnum_to_index.bind(tmpo.vr_num(), vrnum_count++); } } } // process defs for (Op_complete_dests dest_oper(op) ; dest_oper != 0 ; dest_oper++) { Operand tmpo = (*dest_oper) ; if (!vr_to_index.is_bound(tmpo)) { if (tmpo.is_reg() || tmpo.is_mem_vr() || tmpo.is_macro_reg()) { if (dbg(dfa,4)) cdbg << "vr_to_index " << vr_count << " " << tmpo << endl; vr_to_index.bind(tmpo, vr_count++); } } if (tmpo.is_reg() || tmpo.is_mem_vr() || tmpo.is_macro_reg()) { if (!vrnum_to_index.is_bound(tmpo.vr_num())) { if (dbg(dfa,4)) cdbg << "vrnum_to_index " << vrnum_count << " " << tmpo.vr_num() << endl; vrnum_to_index.bind(tmpo.vr_num(), vrnum_count++); } } } } // expand the vr_names over the min...max range for (List_iterator<Operand> vri(local_vr_names) ; vri != 0 ; vri++) { Operand oper = *vri ; int min_omega = oper.min_omega() ; int max_omega = oper.max_omega() ; for (int i=min_omega ; i<=max_omega ; i++) { // define idx..max_idx Operand new_reg = new Reg(oper); new_reg.incr_omega(i); if (!vr_to_index.is_bound(new_reg)) { vr_to_index.bind(new_reg, vr_count++); } // mchu if (!vrnum_to_index.is_bound(new_reg.vr_num())) { vrnum_to_index.bind(new_reg.vr_num(), vrnum_count++); } } } index_to_vr.resize(vr_count); index_to_vrnum.resize(vrnum_count); for (Hash_map_iterator<Operand, int> mi(vr_to_index); mi != 0; mi++) { index_to_vr[(*mi).second] = (*mi).first; } for (Hash_map_iterator<int, int> mi(vrnum_to_index); mi != 0; mi++) { index_to_vrnum[(*mi).second] = (*mi).first; } return vr_map; }