/** * Compute the overhang for a card node * @param c the leading pair of a node * @return the bitset of the overhang or NULL (user to free) */ bitset *card_node_overhang( card *c ) { bitset *bs = bitset_create(); bs = bitset_or( bs, pair_versions(c->p) ); if ( pair_is_hint(c->right->p) ) { pair *pp = c->right->p; bs = bitset_or( bs, pair_versions(pp) ); if ( pair_is_hint(pp) ) bitset_clear_bit(bs,0); c = c->right; } bitset_and_not( bs, pair_versions(c->right->p) ); return bs; }
static void traverse (relation_node i) { relation_node j; relation_node height; VERTICES[++top] = i; INDEX[i] = height = top; if (R[i]) for (j = 0; R[i][j] != END_NODE; ++j) { if (INDEX[R[i][j]] == 0) traverse (R[i][j]); if (INDEX[i] > INDEX[R[i][j]]) INDEX[i] = INDEX[R[i][j]]; bitset_or (F[i], F[i], F[R[i][j]]); } if (INDEX[i] == height) for (;;) { j = VERTICES[top--]; INDEX[j] = infinity; if (i == j) break; bitset_copy (F[j], F[i]); } }
/** * Find a blank after the next node that we can add our blank to * @param c_new the new blank card * @param c the card to start searching from * @return 1 we merged c_new into a blank card already there else 0 */ int card_merge_right( card *c_new, card *c ) { int merged = 0; if ( card_is_blank(c_new) ) { bitset *cnv= pair_versions(c_new->p); bitset *cv= pair_versions(c->p); card *r = card_next(c,cnv,0); card *l = (bitset_intersects(cv,cnv))?c:card_prev(c,cnv); if ( r != NULL && l != NULL ) { card *temp = c->right; while ( temp != NULL && temp != r ) { bitset *tv = pair_versions(temp->p); if ( card_is_blank(temp) && card_prev(temp,tv)==l && card_next(temp,tv,0)==r ) { bitset_or(tv,cnv); merged = 1; break; } temp = temp->right; } } } return merged; }
/** * Add a card AFTER a node. c->right will be displaced by one * card. Although this will increase the node's overhang, the increase * is exactly that of the displaced card's versions, which will reattach * via the overhang, instead of the forced rule. * @param c the incoming pair of a node * @param after the card to add into lp's node. must intersect with lp. * @param verify if 1 then check that the resulting node is OK * @return 1 if the new node is a bona fide node */ int card_add_at_node( card *c, card *after, int verify ) { int res = 0; int dispose = 0; bitset *bs1 = pair_versions(c->p); if ( pair_is_hint(card_pair(c->right)) ) { bs1 = bitset_clone(bs1); if ( bs1 != NULL ) { c = c->right; dispose = 1; // bs1 is the amalgam of leading pair+hint bitset_or( bs1, pair_versions(c->p) ); } } if ( bs1 != NULL ) { after->right = c->right; after->left = c; if ( c->right != NULL ) c->right->left = after; c->right = after; res = (verify)?bitset_intersects(bs1,pair_versions(after->p)):1; if ( dispose ) bitset_dispose( bs1 ); } return res; }
static int count_sr_conflicts (state *s) { int i; int src_count = 0; transitions *trans = s->transitions; reductions *reds = s->reductions; if (!trans) return 0; bitset_zero (lookahead_set); bitset_zero (shift_set); FOR_EACH_SHIFT (trans, i) bitset_set (shift_set, TRANSITION_SYMBOL (trans, i)); for (i = 0; i < reds->num; ++i) bitset_or (lookahead_set, lookahead_set, reds->lookahead_tokens[i]); bitset_and (lookahead_set, lookahead_set, shift_set); src_count = bitset_count (lookahead_set); return src_count; }
static void card_test_overhang( int *passed, int *failed, plugin_log *log ) { card_set_right(cl,cr); card_set_left(cr,cl); bitset *bs = card_node_overhang( cl ); if ( bs != NULL ) { bitset *bsc = bitset_create(); if ( bsc != NULL ) { bsc = bitset_or( bsc, bsl ); bsc = bitset_set( bsc, 7 ); bitset_and_not( bsc, pair_versions(pr) ); if ( !bitset_equals(bsc,bs) ) { fprintf(stderr,"card: overhang text failed\n"); (*failed)++; } else (*passed)++; } else (*failed)++; if ( bsc != NULL ) bitset_dispose( bsc ); } else { fprintf(stderr,"card: failed to compute overhang\n"); (*failed)++; } if ( bs != NULL ) bitset_dispose( bs ); }
static rule * state_default_rule (state *s) { reductions *reds = s->reductions; rule *default_rule = NULL; int cmax = 0; int i; /* No need for a look-ahead. */ if (s->consistent) return reds->rules[0]; /* 1. Each reduction is possibly masked by the look-ahead tokens on which we shift (S/R conflicts)... */ bitset_zero (shift_set); { transitions *trans = s->transitions; FOR_EACH_SHIFT (trans, i) { /* If this state has a shift for the error token, don't use a default rule. */ if (TRANSITION_IS_ERROR (trans, i)) return NULL; bitset_set (shift_set, TRANSITION_SYMBOL (trans, i)); } } /* 2. Each reduction is possibly masked by the look-ahead tokens on which we raise an error (due to %nonassoc). */ { errs *errp = s->errs; for (i = 0; i < errp->num; i++) if (errp->symbols[i]) bitset_set (shift_set, errp->symbols[i]->number); } for (i = 0; i < reds->num; ++i) { int count = 0; /* How many non-masked look-ahead tokens are there for this reduction? */ bitset_andn (look_ahead_set, reds->look_ahead_tokens[i], shift_set); count = bitset_count (look_ahead_set); if (count > cmax) { cmax = count; default_rule = reds->rules[i]; } /* 3. And finally, each reduction is possibly masked by previous reductions (in R/R conflicts, we keep the first reductions). */ bitset_or (shift_set, shift_set, reds->look_ahead_tokens[i]); } return default_rule; }
static void match_push_versions( match *m ) { if ( m->prev_bs != NULL ) { bitset_clear( m->prev_bs ); bitset_or( m->prev_bs, m->bs ); } else m->prev_bs = bitset_clone( m->bs ); }
/* Given a vector BSETV of N bitsets of size N, modify its contents to be the transitive closure of what was given. */ void bitsetv_transitive_closure (bitsetv bsetv) { bitset_bindex i; bitset_bindex j; for (i = 0; bsetv[i]; i++) for (j = 0; bsetv[j]; j++) if (bitset_test (bsetv[j], i)) bitset_or (bsetv[j], bsetv[j], bsetv[i]); }
void build(flow_graph * graph){ flow_node * node = graph->end; bitset * live; rga_move * move; rd_instr * ins; unsigned int def_pos = 0, live_pos = 0; unsigned int p1_id = 0, p2_id = 0; while(node != 0){ ins = node->data; live = bitset_copy(node->liveout); if(ins->type == RD_SET && (ins->p1->type == RD_VAR || ins->p1->type == RD_REG) && (ins->p2->type == RD_VAR || ins->p2->type == RD_REG)){ if(ins->p1->type == RD_VAR){ p1_id = ins->p1->id; }else{ p1_id = num_nodes - num_registers + ins->p1->id; } if(ins->p2->type == RD_VAR){ p2_id = ins->p2->id; }else{ p2_id = num_nodes - num_registers + ins->p2->id; } bitset_sub(live, node->use); if(!rga_move_list_exists(worklistMoves, nodes[p1_id], nodes[p2_id])){ move = rga_move_new(nodes[p1_id], nodes[p2_id]); moveList[p1_id] = rga_move_map_add(moveList[p1_id], move); moveList[p2_id] = rga_move_map_add(moveList[p2_id], move); #ifdef DEBUG_MODE printf("Added move from v%u to v%u\n", p1_id, p2_id); #endif rga_move_list_push(worklistMoves, move); move->set = RGA_WORKLIST_MOVES; } }else if(ins->type == RD_INTERFERE){ add_edge(nodes[ins->p1->id], nodes[num_nodes - num_registers + ins->p2->id]); } bitset_or(live, node->def); #ifdef DEBUG_MODE printf("Live out:"); bitset_show(live); #endif while((def_pos = bitset_first_on(node->def, def_pos)) != ~0){ while((live_pos = bitset_first_on(live, live_pos)) != ~0){ add_edge(nodes[live_pos], nodes[def_pos]); live_pos++; } live_pos = 0; def_pos++; } def_pos = live_pos = 0; node = node->prev; } }
static void set_conflicts (state *s, symbol **errors) { int i; transitions *trans = s->transitions; reductions *reds = s->reductions; int nerrs = 0; if (s->consistent) return; bitset_zero (lookahead_set); FOR_EACH_SHIFT (trans, i) bitset_set (lookahead_set, TRANSITION_SYMBOL (trans, i)); /* Loop over all rules which require lookahead in this state. First check for shift-reduce conflict, and try to resolve using precedence. */ for (i = 0; i < reds->num; ++i) if (reds->rules[i]->prec && reds->rules[i]->prec->prec && !bitset_disjoint_p (reds->lookahead_tokens[i], lookahead_set)) resolve_sr_conflict (s, i, errors, &nerrs); if (nerrs) { /* Some tokens have been explicitly made errors. Allocate a permanent errs structure for this state, to record them. */ state_errs_set (s, nerrs, errors); } if (obstack_object_size (&solved_conflicts_obstack)) { obstack_1grow (&solved_conflicts_obstack, '\0'); s->solved_conflicts = obstack_finish (&solved_conflicts_obstack); } if (obstack_object_size (&solved_conflicts_xml_obstack)) { obstack_1grow (&solved_conflicts_xml_obstack, '\0'); s->solved_conflicts_xml = obstack_finish (&solved_conflicts_xml_obstack); } /* Loop over all rules which require lookahead in this state. Check for conflicts not resolved above. */ for (i = 0; i < reds->num; ++i) { if (!bitset_disjoint_p (reds->lookahead_tokens[i], lookahead_set)) conflicts[s->number] = 1; bitset_or (lookahead_set, lookahead_set, reds->lookahead_tokens[i]); } }
static void match_pop_versions( match *m ) { if ( m->prev_bs == NULL ) { if ( m->bs != NULL ) { bitset_dispose( m->bs ); m->bs = NULL; } } else { bitset_clear( m->bs ); bitset_or( m->bs, m->prev_bs ); } }
/** * OR a set of versions onto ours * @param h the hint * @param bs the versions to add */ void hint_or( hint *h, bitset *bs ) { // may reallocate h->bs h->bs = bitset_or( h->bs, bs ); }
rd_vlist * mem_alloc(rd_instr * code, rd_vlist * vlist, unsigned int match_types, unsigned int ret_type, unsigned int type_size){ //start local variables flow_graph * graph; flow_node * node; rga_node * x, * y; rga_node_map * list; rga_move * move; rd_vlist * new_list; rd_instr * ins; rd_var * v; bitset * live; unsigned int def_pos, live_pos, set_size, last_id, i; bitset * used_colours; //end local variables //perform flow analysis graph = flow_generate_graph(code, vlist, 0, 0, match_types); //allocate space for node and move information initial = rga_node_list_new(); coalescedNodes = rga_node_list_new(); colouredNodes = rga_node_list_new(); coalescedMoves = rga_move_list_new(); constrainedMoves = rga_move_list_new(); worklistMoves = rga_move_list_new(); nodes = new rga_node *[vlist->num_vars]; adjSet = new bitset *[vlist->num_vars]; adjList = new rga_node_map *[vlist->num_vars]; alias = new rga_node *[vlist->num_vars]; colour = new unsigned char[vlist->num_vars]; //initialise memory variables for(i = 0; i < vlist->num_vars; i++){ nodes[i] = rga_node_new(0); rga_node_list_push(initial, nodes[i]); nodes[i]->set = RGA_INITIAL; colour[i] = 0; adjList[i] = 0; adjSet[i] = bitset_new(vlist->num_vars); alias[i] = 0; } //build the graph node = graph->end; def_pos = live_pos = 0; while(node != 0){ ins = node->data; live = bitset_copy(node->liveout); if(ins->type == RD_SET && ins->p1->type & match_types && ins->p2->type & match_types){ bitset_sub(live, node->use); if(!rga_move_list_exists(worklistMoves, nodes[ins->p1->id], nodes[ins->p2->id])){ move = rga_move_new(nodes[ins->p1->id], nodes[ins->p2->id]); rga_move_list_push(worklistMoves, move); move->set = RGA_WORKLIST_MOVES; } } bitset_or(live, node->def); while((def_pos = bitset_first_on(node->def, def_pos)) != ~0){ while((live_pos = bitset_first_on(live, live_pos)) != ~0){ mem_add_edge(nodes[live_pos], nodes[def_pos]); live_pos++; } live_pos = 0; def_pos++; } def_pos = 0; node = node->prev; } //coalesce phase while(!rga_move_list_isempty(worklistMoves)){ move = rga_move_list_pop(worklistMoves); x = get_alias(move->a); y = get_alias(move->b); if(x == y){ rga_move_list_push(coalescedMoves, move); move->set = RGA_COALESCED_MOVES; }else if(bitset_check(adjSet[x->id], y->id) || bitset_check(adjSet[x->id], y->id)){ rga_move_list_push(constrainedMoves, move); move->set = RGA_CONSTRAINED_MOVES; }else{ rga_move_list_push(coalescedMoves, move); move->set = RGA_COALESCED_MOVES; list = adjList[y->id]; rga_node_list_remove(initial, y); rga_node_list_push(coalescedNodes, y); y->set = RGA_COALESCED_NODES; alias[y->id] = x; while(list != 0){ if(list->node->set != RGA_COALESCED_NODES){ mem_add_edge(list->node, x); } list = list->prev; } } } //simplify and select phase new_list = rd_varlist(0); set_size = 0; last_id = 0; used_colours = bitset_new(vlist->num_vars); while(x = rga_node_list_pop(initial)){ bitset_reset(used_colours); list = adjList[x->id]; while(list != 0){ y = get_alias(list->node); if(y->set == RGA_COLOURED_NODES){ bitset_set(used_colours, colour[y->id]); } list = list->prev; } rga_node_list_push(colouredNodes, x); x->set = RGA_COLOURED_NODES; colour[x->id] = bitset_first_off(used_colours, 0); if(rd_vlist_find(new_list, colour[x->id]) == 0){ rd_vlist_add(new_list, colour[x->id], ret_type, 0, 0, type_size); } if(colour[x->id] > last_id){ last_id = colour[x->id]; } #ifdef DEBUG_MODE printf("mem%u gets %u\n", x->id, colour[x->id]); #endif } bitset_delete(used_colours); new_list->num_vars = last_id + 1; //set the colour of coalesced nodes x = coalescedNodes->end; while(x != 0){ colour[x->id] = colour[get_alias(x)->id]; //rd_vlist_add(new_list, colour[x->id], ret_type, 0, 0, type_size); #ifdef DEBUG_MODE printf("mem%u gets %u\n", x->id, colour[x->id]); #endif x = x->prev; } node = graph->end; while(node != 0){ if(node->data->p1 != 0){ if(node->data->p1->type & match_types){ v = rd_vlist_find(new_list, colour[node->data->p1->id]); if(v != 0){ node->data->p1 = v; } } } if(node->data->p2 != 0){ if(node->data->p2->type & match_types){ v = rd_vlist_find(new_list, colour[node->data->p2->id]); if(v != 0){ node->data->p2 = v; } } } if(node->data->p3 != 0){ if(node->data->p3->type & match_types){ v = rd_vlist_find(new_list, colour[node->data->p3->id]); if(v != 0){ node->data->p3 = v; } } } node = node->prev; } //clean up delete initial; delete coalescedNodes; delete colouredNodes; delete coalescedMoves; delete constrainedMoves; delete worklistMoves; for(i = 0; i < vlist->num_vars; i++){ delete nodes[i]; if(adjList[i] != 0){ delete adjList[i]; } bitset_delete(adjSet[i]); } delete[] nodes; delete[] adjSet; delete[] adjList; delete[] alias; delete[] colour; rga_reset(); return new_list; }
/** * \note * - FIXME: It might be an interesting experiment to compare the space and * time efficiency of computing \c item_lookahead_sets either: * - Fully up front. * - Just-in-time, as implemented below. * - Not at all. That is, just let annotations continue even when * unnecessary. */ bool ielr_item_has_lookahead (state *s, symbol_number lhs, size_t item, symbol_number lookahead, state ***predecessors, bitset **item_lookahead_sets) { if (!item_lookahead_sets[s->number]) { size_t i; item_lookahead_sets[s->number] = xnmalloc (s->nitems, sizeof item_lookahead_sets[s->number][0]); for (i = 0; i < s->nitems; ++i) item_lookahead_sets[s->number][i] = NULL; } if (!item_lookahead_sets[s->number][item]) { item_lookahead_sets[s->number][item] = bitset_create (ntokens, BITSET_FIXED); /* If this kernel item is the beginning of a RHS, it must be the kernel item in the start state, and so its LHS has no follows and no goto to check. If, instead, this kernel item is the successor of the start state's kernel item, there are still no follows and no goto. This situation is fortunate because we want to avoid the - 2 below in both cases. Actually, IELR(1) should never invoke this function for either of those cases because (1) follow_kernel_items will never reference a kernel item for this RHS because the end token blocks sight of the lookahead set from the RHS's only nonterminal, and (2) no reduction has a lookback dependency on this lookahead set. Nevertheless, I didn't change this test to an aver just in case the usage of this function evolves to need those two cases. In both cases, the current implementation returns the right result. */ if (s->items[item] > 1) { /* If the LHS symbol of this item isn't known (because this is a top-level invocation), go get it. */ if (!lhs) { unsigned int i; for (i = s->items[item]; !item_number_is_rule_number (ritem[i]); ++i) ; lhs = rules[item_number_as_rule_number (ritem[i])].lhs->number; } /* If this kernel item is next to the beginning of the RHS, then check all predecessors' goto follows for the LHS. */ if (item_number_is_rule_number (ritem[s->items[item] - 2])) { state **predecessor; aver (lhs != accept->number); for (predecessor = predecessors[s->number]; *predecessor; ++predecessor) bitset_or (item_lookahead_sets[s->number][item], item_lookahead_sets[s->number][item], goto_follows[map_goto ((*predecessor)->number, lhs)]); } /* If this kernel item is later in the RHS, then check all predecessor items' lookahead sets. */ else { state **predecessor; for (predecessor = predecessors[s->number]; *predecessor; ++predecessor) { size_t predecessor_item; for (predecessor_item = 0; predecessor_item < (*predecessor)->nitems; ++predecessor_item) if ((*predecessor)->items[predecessor_item] == s->items[item] - 1) break; aver (predecessor_item != (*predecessor)->nitems); ielr_item_has_lookahead (*predecessor, lhs, predecessor_item, 0 /*irrelevant*/, predecessors, item_lookahead_sets); bitset_or (item_lookahead_sets[s->number][item], item_lookahead_sets[s->number][item], item_lookahead_sets[(*predecessor)->number] [predecessor_item]); } } } } return bitset_test (item_lookahead_sets[s->number][item], lookahead); }