void dag_node_footprint_find_largest_residual(struct dag_node *n, struct dag_node *limit) { struct dag_node *node1; list_first_item(n->footprint->residual_nodes); node1 = list_peek_current(n->footprint->residual_nodes); if(n != node1){ n->footprint->residual_size = node1->footprint->residual_size; set_delete(n->footprint->residual_files); n->footprint->residual_files = set_duplicate(node1->footprint->residual_files); } while((node1 = list_next_item(n->footprint->residual_nodes)) && (!limit || node1 != limit)){ if(node1->footprint->footprint_min_size > n->footprint->footprint_min_size){ set_delete(n->footprint->footprint_min_files); n->footprint->footprint_min_size = node1->footprint->footprint_min_size; n->footprint->footprint_min_files = set_duplicate(node1->footprint->footprint_min_files); } if(node1->footprint->footprint_max_size > n->footprint->footprint_max_size){ set_delete(n->footprint->footprint_max_files); n->footprint->footprint_max_size = node1->footprint->footprint_max_size; n->footprint->footprint_max_files = set_duplicate(node1->footprint->footprint_max_files); } } }
/* Function that allows the purpose to be succintly stated. We only * store the max as the true weight of the node, so this allows it to * be clearly expressed. */ void dag_node_footprint_min( struct dag_node *n) { set_delete(n->footprint->footprint_min_files); if(n->footprint->delete_footprint <= n->footprint->prog_min_footprint){ n->footprint->footprint_min_size = n->footprint->delete_footprint; n->footprint->footprint_min_files = set_duplicate(n->footprint->delete_files); n->footprint->footprint_min_type = DAG_NODE_FOOTPRINT_DELETE; n->footprint->run_order = n->footprint->delete_run_order; } else { n->footprint->footprint_min_size = n->footprint->prog_min_footprint; n->footprint->footprint_min_files = set_duplicate(n->footprint->prog_min_files); n->footprint->footprint_min_type = DAG_NODE_FOOTPRINT_DESC; n->footprint->run_order = n->footprint->prog_run_order; } n->footprint->self_res = n->footprint->target_size; if(n->footprint->self_res < n->footprint->footprint_min_size){ n->footprint->self_res = n->footprint->footprint_min_size; } if(n->footprint->run_footprint > n->footprint->footprint_min_size){ set_delete(n->footprint->footprint_min_files); n->footprint->footprint_min_size = n->footprint->run_footprint; n->footprint->footprint_min_files = set_duplicate(n->footprint->run_files); n->footprint->footprint_min_type = DAG_NODE_FOOTPRINT_RUN; } }
int main() { printf("Hello world!\n"); set_t * set_A = set_new(10); set_t * set_B = set_new(7); set_t * set_C; set_add_range(set_A,0,11); set_add_range(set_B,3,5); puts("\n\n Set_A after add"); set_print_out(set_A); set_delete_range(set_A,3,11); puts("\n\n Set_A after delete"); set_print_out(set_A); puts("\n\n Set_B after add"); set_print_out(set_B); set_C = set_merge(set_A,set_B,"none"); puts("\n\n Set_C - result of merge"); set_print_out(set_C); set_delete_range(set_A,0,10); set_add_range(set_A,4,7); puts("\n\n another set_A"); set_print_out(set_A); set_delete(set_C); set_A = _set_intersection(set_A,set_B,"del 1"); puts("\n\n set_A after intersection with set_B"); set_print_out(set_A); puts("\n\n set_A size after intersection \n"); set_print_size(set_A); set_delete_range(set_A,4,5); set_add_range(set_A,1,1); set_add_range(set_A,3,4); puts("\n\n another set_A"); set_print_out(set_A); puts("\n\n set_B"); set_print_out(set_B); set_C = set_absolute_complement(set_A,set_B,"none"); puts("\n\n set_C - absolute_complement of set_A and set_B"); set_print_out(set_C); printf("\n Power of set_C = %i",set_measure_power(set_C)); set_delete(set_C); set_add_range(set_A,0,10); puts("\n\n set_A"); set_print_out(set_A); set_delete_range(set_B,3,5); set_add_range(set_B,2,4); puts("\n\n set_B"); set_print_out(set_B); set_C = set_relative_complement(set_A,set_B,"none"); puts("\n\n set_C - relative_complement of set_A and set_B"); set_print_out(set_C); set_delete(set_A); set_delete(set_B); set_delete(set_C); return 0; }
static void flow_uninit_scan_statements (flownode_t *node, set_t *defs, set_t *uninit) { set_t *stuse; set_t *stdef; statement_t *st; set_iter_t *var_i; flowvar_t *var; operand_t *op; // defs holds only reaching definitions. make it hold only reaching // uninitialized definitions set_intersection (defs, uninit); stuse = set_new (); stdef = set_new (); for (st = node->sblock->statements; st; st = st->next) { flow_analyze_statement (st, stuse, stdef, 0, 0); for (var_i = set_first (stuse); var_i; var_i = set_next (var_i)) { var = node->graph->func->vars[var_i->element]; if (set_is_intersecting (defs, var->define)) { def_t *def = flowvar_get_def (var); if (def) { if (options.warnings.uninited_variable) { warning (st->expr, "%s may be used uninitialized", def->name); } } else { bug (st->expr, "st %d, uninitialized temp %s", st->number, operand_string (var->op)); } } // avoid repeat warnings in this node set_difference (defs, var->define); } for (var_i = set_first (stdef); var_i; var_i = set_next (var_i)) { var = node->graph->func->vars[var_i->element]; // kill any reaching uninitialized definitions for this variable set_difference (defs, var->define); if (var->op->op_type == op_temp) { op = var->op; if (op->o.tempop.alias) { var = op->o.tempop.alias->o.tempop.flowvar; if (var) set_difference (defs, var->define); } for (op = op->o.tempop.alias_ops; op; op = op->next) { var = op->o.tempop.flowvar; if (var) set_difference (defs, var->define); } } } } set_delete (stuse); set_delete (stdef); }
static void delete_node (flownode_t *node) { if (node->predecessors) set_delete (node->predecessors); if (node->successors) set_delete (node->successors); if (node->edges) set_delete (node->edges); if (node->dom) set_delete (node->dom); FREE (nodes, node); }
void dag_node_footprint_set_desc_res_wgt_diff(struct dag_node *n) { struct dag_node *node1, *node2; set_first_element(n->footprint->direct_children); while((node1 = set_next_element(n->footprint->direct_children))){ node2 = list_peek_current(node1->footprint->residual_nodes); /* Add the last residual's residual and terminal files in the branch to the current residual files */ set_insert_set(n->footprint->residual_files, node2->footprint->residual_files); set_insert_set(n->footprint->residual_files, node2->footprint->terminal_files); /* Add the last residual's residual and terminal files in the branch to the branch's first node residual files */ set_insert_set(node1->footprint->res_files, node2->footprint->residual_files); set_insert_set(node1->footprint->res_files, node2->footprint->terminal_files); /* Set branch head's res size */ node1->footprint->res = dag_file_set_size(node1->footprint->res_files); set_insert_set(node1->footprint->wgt_files, node2->footprint->footprint_min_files); node1->footprint->wgt = node2->footprint->footprint_min_size; set_insert_set(node1->footprint->max_wgt_files, node2->footprint->footprint_max_files); node1->footprint->max_wgt = node2->footprint->footprint_max_size; list_next_item(node1->footprint->residual_nodes); while((node2 = list_peek_current(node1->footprint->residual_nodes))){ if(node2->footprint->footprint_min_size >= node1->footprint->wgt){ set_delete(node1->footprint->wgt_files); node1->footprint->wgt_files = set_duplicate(node2->footprint->footprint_min_files); node1->footprint->wgt = node2->footprint->footprint_min_size; } if(node2->footprint->footprint_max_size >= node1->footprint->max_wgt){ set_delete(node1->footprint->max_wgt_files); node1->footprint->max_wgt_files = set_duplicate(node2->footprint->footprint_max_files); node1->footprint->max_wgt = node2->footprint->footprint_max_size; } list_next_item(node1->footprint->residual_nodes); } } n->footprint->residual_size = dag_file_set_size(n->footprint->residual_files); set_first_element(n->footprint->direct_children); while((node1 = set_next_element(n->footprint->direct_children))){ node1->footprint->diff = node1->footprint->wgt - node1->footprint->res; } }
void options_free() { if (options.parameters) { set_delete(options.parameters, free); options.parameters = NULL; } }
static inline void aggregate_free(tuple_t tuple, unsigned char field_aggregate, unsigned char type_aggregate) { switch(type_aggregate) { case AGG_FIRST: case AGG_MIN_INT: case AGG_MAX_INT: case AGG_SUM_INT: case AGG_MIN_FLOAT: case AGG_MAX_FLOAT: case AGG_SUM_FLOAT: /* nothing to do */ break; case AGG_SET_UNION_INT: case AGG_SET_UNION_FLOAT: set_delete(MELD_SET(GET_TUPLE_FIELD(tuple, field_aggregate))); break; case AGG_SUM_LIST_INT: case AGG_SUM_LIST_FLOAT: list_delete(MELD_LIST(GET_TUPLE_FIELD(tuple, field_aggregate))); break; default: assert(0); break; } }
static void load_set(Set *set, int num1, int num2, int inc1, int inc2) { int i; for (i = num1; i <= MAX_ELEMENTS; i += inc1) set_insert(set, i); for (i = num2; i <= MAX_ELEMENTS; i += inc2) set_delete(set, i); }
/* Function that allows the purpose to be succintly stated. We only * store the max as the true weight of the node, so this allows it to * be clearly expressed. */ void dag_node_footprint_max( struct dag_node *n) { if(n->footprint->prog_max_footprint > n->footprint->footprint_max_size){ set_delete(n->footprint->footprint_max_files); n->footprint->footprint_max_size = n->footprint->prog_max_footprint; n->footprint->footprint_max_files = set_duplicate(n->footprint->prog_max_files); } if(n->footprint->delete_footprint > n->footprint->footprint_max_size){ set_delete(n->footprint->footprint_max_files); n->footprint->footprint_max_size = n->footprint->delete_footprint; n->footprint->footprint_max_files = set_duplicate(n->footprint->delete_files); } if(n->footprint->run_footprint > n->footprint->footprint_max_size){ set_delete(n->footprint->footprint_max_files); n->footprint->footprint_max_size = n->footprint->run_footprint; n->footprint->footprint_max_files = set_duplicate(n->footprint->run_files); } }
static void flow_build_dfst (flowgraph_t *graph) { set_t *visited = set_new (); int i; // mark the dummy nodes as visited to keep them out of the dfst set_add (visited, graph->num_nodes); set_add (visited, graph->num_nodes + 1); if (graph->dfo) free (graph->dfo); if (graph->dfst) set_delete (graph->dfst); graph->dfo = calloc (graph->num_nodes, sizeof (int)); graph->dfst = set_new (); i = graph->num_nodes; df_search (graph, visited, &i, 0); set_delete (visited); }
/* The run footprint of a node is defined as its target size and * the size of it inputs. It is the cost needed to run this node. */ void dag_node_footprint_determine_run_footprint(struct dag_node *n) { set_delete(n->footprint->run_files); n->footprint->run_files = set_create(0); set_insert_list(n->footprint->run_files, n->source_files); set_insert_list(n->footprint->run_files, n->target_files); set_insert_set(n->footprint->run_files, n->footprint->terminal_files); set_insert_set(n->footprint->run_files, n->footprint->coexist_files); n->footprint->run_footprint = dag_file_set_size(n->footprint->run_files); }
static inline bool aggregate_changed(int agg_type, void *v1, void *v2) { switch(agg_type) { case AGG_FIRST: return false; case AGG_MIN_INT: case AGG_MAX_INT: case AGG_SUM_INT: return MELD_INT(v1) != MELD_INT(v2); case AGG_MIN_FLOAT: case AGG_MAX_FLOAT: case AGG_SUM_FLOAT: return MELD_FLOAT(v1) != MELD_FLOAT(v2); case AGG_SET_UNION_INT: case AGG_SET_UNION_FLOAT: { Set *setOld = MELD_SET(v1); Set *setNew = MELD_SET(v2); if(!set_equal(setOld, setNew)) return true; /* delete new set union */ set_delete(setNew); return false; } break; case AGG_SUM_LIST_INT: case AGG_SUM_LIST_FLOAT: { List *listOld = MELD_LIST(v1); List *listNew = MELD_LIST(v2); if(!list_equal(listOld, listNew)) return true; /* delete new list */ list_delete(listNew); return false; } break; default: assert(0); return true; } assert(0); while(1); }
void ht_delete(ht_intset_t *set) { int i; for (i = 0; i < maxhtlength; i++) { intset_t *iset = set->buckets[i]; set_delete(iset); free(set->buckets[i]); } free(set->buckets); free(set); }
delete_graph (flowgraph_t *graph) { int i; if (graph->nodes) { for (i = 0; i < graph->num_nodes; i++) delete_node (graph->nodes[i]); free (graph->nodes); } if (graph->edges) free (graph->edges); if (graph->dfst) set_delete (graph->dfst); if (graph->dfo) free (graph->dfo); FREE (graphs, graph); }
static void flow_find_dominators (flowgraph_t *graph) { set_t *work; flownode_t *node; int i; set_iter_t *pred; int changed; if (!graph->num_nodes) return; // First, create a base set for the initial state of the non-initial nodes work = set_new (); for (i = 0; i < graph->num_nodes; i++) set_add (work, i); set_add (graph->nodes[0]->dom, 0); // initialize dom for the non-initial nodes for (i = 1; i < graph->num_nodes; i++) { set_assign (graph->nodes[i]->dom, work); } do { changed = 0; for (i = 1; i < graph->num_nodes; i++) { node = graph->nodes[i]; pred = set_first (node->predecessors); set_empty (work); for (pred = set_first (node->predecessors); pred; pred = set_next (pred)) set_intersection (work, graph->nodes[pred->element]->dom); set_add (work, i); if (!set_is_equivalent (work, node->dom)) changed = 1; set_assign (node->dom, work); } } while (changed); set_delete (work); }
static int set_double_buckets(struct set *s) { struct set *sn = set_create(2 * s->bucket_count); if(!sn) return 0; /* Move elements to new set */ void *element; set_first_element(s); while( (element = set_next_element(s)) ) if(!set_insert(sn, element)) { set_delete(sn); return 0; } /* Delete all elements */ struct entry *e, *f; int i; for(i = 0; i < s->bucket_count; i++) { e = s->buckets[i]; while(e) { f = e->next; free(e); e = f; } } /* Make the old point to the new */ free(s->buckets); s->buckets = sn->buckets; s->bucket_count = sn->bucket_count; s->size = sn->size; /* Delete reference to new, so old is safe */ free(sn); return 1; }
static flowloop_t * make_loop (flowgraph_t *graph, unsigned n, unsigned d) { flowloop_t *loop = new_loop (); flownode_t *node; set_t *stack = set_new (); set_iter_t *pred; loop->head = d; set_add (loop->nodes, d); insert_loop_node (loop, n, stack); while (!set_is_empty (stack)) { set_iter_t *ss = set_first (stack); unsigned m = ss->element; set_del_iter (ss); set_remove (stack, m); node = graph->nodes[m]; for (pred = set_first (node->predecessors); pred; pred = set_next (pred)) insert_loop_node (loop, pred->element, stack); } set_delete (stack); return loop; }
static void flow_uninitialized (flowgraph_t *graph) { int i; flownode_t *node; flowvar_t *var; set_iter_t *var_i; set_t *defs; set_t *uninitialized; uninitialized = set_new (); node = graph->nodes[graph->num_nodes]; set_assign (uninitialized, node->reaching_defs.out); defs = set_new (); for (i = 0; i < graph->num_nodes; i++) { node = graph->nodes[graph->dfo[i]]; set_empty (defs); // collect definitions of all variables "used" in this node. use from // the live vars analysis is perfect for the job for (var_i = set_first (node->live_vars.use); var_i; var_i = set_next (var_i)) { var = graph->func->vars[var_i->element]; set_union (defs, var->define); } // interested in only those defintions that actually reach this node set_intersection (defs, node->reaching_defs.in); // if any of the definitions come from the entry dummy block, then // the statements need to be scanned in case an aliasing definition // kills the dummy definition before the usage, and also so the line // number information can be obtained from the statement. if (set_is_intersecting (defs, uninitialized)) flow_uninit_scan_statements (node, defs, uninitialized); } set_delete (defs); }
/* INVARIANTS: both error[ex1] & error[ex2] must be valid, though * it doesn't matter what set they belong to * all of the weights are feasible & obey the lin. equality * constraint when they come in & only this fn plays with the weights */ int opt_pair(int ex1, int ex2, struct svm_smo_model *ms) { double a1, a2; double ao1, ao2; float C1, C2, C_min; bow_wv **docs; double diff1, diff2; double e1, e2; double eta; /* the value of the second deriv. of the obj */ double k11, k12, k22; int ndocs; double L, H; double *weights; int *yvect; int y1, y2; int i; //printf("opt_pair(%d, %d)\n",ex1,ex2); //printV("",ms->error,ms->ndocs,"\n"); if (ex1 == ex2) { m1 ++; return 0; } ms->n_pair_tot ++; weights = ms->weights; yvect = ms->yvect; C1 = ms->cvect[ex1]; C2 = ms->cvect[ex2]; C_min = MIN(C1, C2); y1 = yvect[ex1]; y2 = yvect[ex2]; a1 = weights[ex1]; a2 = weights[ex2]; if (y1 == y2) { H = a1 + a2; L = H - C1; L = (0 > L) ? 0 : L; H = (C2 < H) ? C2 : H; } else { L = a2 - a1; H = L + C1; L = (0 > L) ? 0 : L; H = (C2 < H) ? C2 : H; } if (L >= H) { m2++; return 0; } docs = ms->docs; ndocs = ms->ndocs; k12 = svm_kernel_cache(docs[ex1],docs[ex2]); k11 = svm_kernel_cache(docs[ex1],docs[ex1]); k22 = svm_kernel_cache(docs[ex2],docs[ex2]); eta = 2*k12 - k11 - k22; //printf("k11,k12,k22,eta:(%f,%f,%f,%f)\n",k11,k12,k22,eta); e1 = ms->error[ex1]; e2 = ms->error[ex2]; ao2 = a2; if (eta < 0) { /* a2 still holds weights[j] */ a2 = a2 - y2*(e1-e2)/eta; if (a2 < L) a2 = L; else if (a2 > H) a2 = H; if (a2 < svm_epsilon_a) { a2 = 0; } else if (a2 > C2 - svm_epsilon_a) { a2 = C2; } } else { a2 = calc_eta_hi(ex1, ex2, L, H, k11, k12, k22, ms); if (a2 == MAXDOUBLE) return 0; } if (fabs(a2 - ao2) < svm_epsilon_a) { //*(a2 + ao2 + svm_epsilon_crit)) { m4 ++; return 0; } ao1 = weights[ex1]; a1 = ao1 + y1*y2*(ao2 - a2); /* we know that a2 can't be out of the feasible range since we expilicitly * tested for this (by clipping) - however, due to prec. problems - a1 * could be out of range - if it is, we need to make it feasible (to the * alpha constraints), since the number is bogus anyway & was caused by * precision problems - there's no reason to alter a2 */ if (a1 < svm_epsilon_a) { a1 = 0.0; } else if (a1 > C1 - svm_epsilon_a) { a1 = C1; } weights[ex1] = a1; weights[ex2] = a2; diff1 = y1*(a1 - ao1); diff2 = y2*(a2 - ao2); /* update the hyperplane */ if (svm_kernel_type == 0) { double *W = ms->W; for (i=0; i<docs[ex1]->num_entries; i++) { W[docs[ex1]->entry[i].wi] += diff1 * docs[ex1]->entry[i].weight; } for (i=0; i<docs[ex2]->num_entries; i++) { W[docs[ex2]->entry[i].wi] += diff2 * docs[ex2]->entry[i].weight; } } /* update the sets (& start to re-evaluate bup & blow) */ { int j, i, y; double a, aold, C, e; struct set *s; ms->bup = MAXDOUBLE; ms->blow = -1*MAXDOUBLE; for (j=0, i=ex1, a=a1, aold=ao1, y=y1, C=C1, e=e1; j<2; j++, i=ex2, a=a2, aold=ao2, y=y2, C=C2, e=e2) { /* the following block also sets bup & blow to preliminary values. * this is so that we don't need to repeat these checks when we're * trying to figure out whether or not some */ if (a < svm_epsilon_a) { if (y == 1) { s = &(ms->I1); if (ms->bup > e) { ms->bup = e; ms->iup = i; } } else { s = &(ms->I4); if (ms->blow < e) { ms->blow = e; ms->ilow = i; } } } else if (a > C - svm_epsilon_a) { if (y == 1) { s = &(ms->I3); if (ms->blow < e) { ms->blow = e; ms->ilow = i; } } else { s = &(ms->I2); if (ms->bup > e) { ms->bup = e; ms->iup = i; } } } else { s = &(ms->I0); if (ms->blow < e) { ms->blow = e; ms->ilow = i; } if (ms->bup > e) { ms->bup = e; ms->iup = i; } } if (set_insert(i, s)) { /* if this was actually inserted, the state of sets has changed, something needs deleted */ int deleted=0; if (aold < svm_epsilon_a) { ms->nsv ++; } else if (a < svm_epsilon_a) { /* if this a changed & its zero now, it used to be an SV */ ms->nsv --; } /* there's 12 different possible ways for the sets to change, * I believe this to be a pretty simple & efficient way to do it... */ if (y == 1) { if (s != &(ms->I1)) deleted = set_delete(i,&(ms->I1)); if (!deleted && s != &(ms->I3)) deleted = set_delete(i,&(ms->I3)); } else if (y == -1) { if (s != &(ms->I2)) deleted = set_delete(i,&(ms->I2)); if (!deleted && s != &(ms->I4)) deleted = set_delete(i,&(ms->I4)); } if (!deleted) { set_delete(i,&(ms->I0)); } } } } ms->n_pair_suc ++; /* much like the build_svm algorithm's s(t) vector, error needs * to be updated every time we set some new alphas */ /* also finish update bup & blow */ { double *error = ms->error; int *items; int nitems; items = ms->I0.items; nitems = ms->I0.ilength; for (i=0; i<nitems; i++) { double a, b; a = svm_kernel_cache(docs[ex1],docs[items[i]]); b = svm_kernel_cache(docs[ex2],docs[items[i]]); error[items[i]] += diff1*a + diff2*b; } { int efrom; double e; /* compute the new bup & blow */ for (i=0, e=ms->bup; i<nitems; i++) { if (e > error[items[i]]) { e = error[items[i]]; efrom = items[i]; } } if (e != ms->bup) { ms->bup = e; ms->iup = efrom; } for (i=0, e=ms->blow; i<nitems; i++) { if (e < error[items[i]]) { e = error[items[i]]; efrom = items[i]; } } if (ms->blow != e) { ms->blow = e; ms->ilow = efrom; } } } kcache_age(); //printf("blow = %f(%d), bup = %f(%d)\n",ms->blow, ms->ilow, ms->bup, ms->iup); return 1; }
static void flow_reaching_defs (flowgraph_t *graph) { int i; int changed; flownode_t *node; statement_t *st; set_t *stdef = set_new (); set_t *stgen = set_new (); set_t *stkill = set_new (); set_t *oldout = set_new (); set_t *gen, *kill, *in, *out, *uninit; set_iter_t *var_i; set_iter_t *pred_i; flowvar_t *var; // First, create out for the entry dummy node using fake statement numbers. kill = set_new (); for (i = 0; i < graph->func->num_statements; i++) set_add (kill, i); uninit = set_new (); for (i = 0; i < graph->func->num_vars; i++) { var = graph->func->vars[i]; set_union (uninit, var->define);// do not want alias handling here set_difference (uninit, kill); // remove any gens from the function } graph->nodes[graph->num_nodes]->reaching_defs.out = uninit; graph->nodes[graph->num_nodes]->reaching_defs.in = set_new (); graph->nodes[graph->num_nodes]->reaching_defs.gen = set_new (); graph->nodes[graph->num_nodes]->reaching_defs.kill = set_new (); // Calculate gen and kill for each block, and initialize in and out for (i = 0; i < graph->num_nodes; i++) { node = graph->nodes[i]; gen = set_new (); kill = set_new (); for (st = node->sblock->statements; st; st = st->next) { flow_analyze_statement (st, 0, stdef, 0, 0); set_empty (stgen); set_empty (stkill); for (var_i = set_first (stdef); var_i; var_i = set_next (var_i)) { var = graph->func->vars[var_i->element]; flow_kill_aliases (stkill, var, uninit); set_remove (stkill, st->number); set_add (stgen, st->number); } set_difference (gen, stkill); set_union (gen, stgen); set_difference (kill, stgen); set_union (kill, stkill); } node->reaching_defs.gen = gen; node->reaching_defs.kill = kill; node->reaching_defs.in = set_new (); node->reaching_defs.out = set_new (); } changed = 1; while (changed) { changed = 0; // flow down the graph for (i = 0; i < graph->num_nodes; i++) { node = graph->nodes[graph->dfo[i]]; in = node->reaching_defs.in; out = node->reaching_defs.out; gen = node->reaching_defs.gen; kill = node->reaching_defs.kill; for (pred_i = set_first (node->predecessors); pred_i; pred_i = set_next (pred_i)) { flownode_t *pred = graph->nodes[pred_i->element]; set_union (in, pred->reaching_defs.out); } set_assign (oldout, out); set_assign (out, in); set_difference (out, kill); set_union (out, gen); if (!set_is_equivalent (out, oldout)) changed = 1; } } set_delete (oldout); set_delete (stdef); set_delete (stgen); set_delete (stkill); }
void dag_node_footprint_delete(struct dag_node_footprint *f) { set_delete(f->direct_children); set_delete(f->accounted); set_delete(f->terminal_files); set_delete(f->coexist_files); list_delete(f->residual_nodes); set_delete(f->residual_files); set_delete(f->run_files); set_delete(f->delete_files); list_delete(f->delete_run_order); set_delete(f->prog_min_files); set_delete(f->prog_max_files); list_delete(f->prog_run_order); set_delete(f->footprint_min_files); set_delete(f->footprint_max_files); set_delete(f->res_files); set_delete(f->wgt_files); set_delete(f->max_wgt_files); free(f); }
static void flow_live_vars (flowgraph_t *graph) { int i, j; flownode_t *node; set_t *use; set_t *def; set_t *stuse = set_new (); set_t *stdef = set_new (); set_t *tmp = set_new (); set_iter_t *succ; statement_t *st; int changed = 1; // first, calculate use and def for each block, and initialize the in and // out sets. for (i = 0; i < graph->num_nodes; i++) { node = graph->nodes[i]; use = set_new (); def = set_new (); for (st = node->sblock->statements; st; st = st->next) { flow_analyze_statement (st, stuse, stdef, 0, 0); live_set_use (stuse, use, def); live_set_def (stdef, use, def); } node->live_vars.use = use; node->live_vars.def = def; node->live_vars.in = set_new (); node->live_vars.out = set_new (); } // create in for the exit dummy block using the global vars used by the // function use = set_new (); set_assign (use, graph->func->global_vars); node = graph->nodes[graph->num_nodes + 1]; node->live_vars.in = use; node->live_vars.out = set_new (); node->live_vars.use = set_new (); node->live_vars.def = set_new (); while (changed) { changed = 0; // flow UP the graph because live variable analysis uses information // from a node's successors rather than its predecessors. for (j = graph->num_nodes - 1; j >= 0; j--) { node = graph->nodes[graph->dfo[j]]; set_empty (tmp); for (succ = set_first (node->successors); succ; succ = set_next (succ)) set_union (tmp, graph->nodes[succ->element]->live_vars.in); if (!set_is_equivalent (node->live_vars.out, tmp)) { changed = 1; set_assign (node->live_vars.out, tmp); } set_assign (node->live_vars.in, node->live_vars.out); set_difference (node->live_vars.in, node->live_vars.def); set_union (node->live_vars.in, node->live_vars.use); } } set_delete (stuse); set_delete (stdef); set_delete (tmp); }
static void delete_loop (flowloop_t *loop) { set_delete (loop->nodes); FREE (loops, loop); }
/* The descendant footprint of a node is defined as a balance between * the widest point of the children branches, while still maintaining * the existance of the sibling branches. The assumption is that by * knowing the larget size needed, all other branches can be executed * within that designated size, so we only need to add the residual * size of a branch to hold onto it while the heavier weights are * computed. */ void dag_node_footprint_determine_descendant(struct dag_node *n) { struct dag_node *node1, *node2; //, *res_node; struct list *tmp_direct_children = list_create(); struct set *footprint = set_create(0); uint64_t footprint_size = 0; /* Create a second list of direct children that allows us to sort on footprint properties. This is used when we compare footprint and the residual nodes. */ set_first_element(n->footprint->direct_children); while((node1 = set_next_element(n->footprint->direct_children))){ list_push_tail(tmp_direct_children, node1); list_first_item(node1->footprint->residual_nodes); } /* There are two cases for descendant nodes: 1. Multiple direct_children indicating that multiple branches will need to be maintained concurrently and we need to account. 2. One descendant indicating we want to continue the chain of residual and footprints that out child holds. create empty lists for this case. */ set_first_element(n->footprint->direct_children); if(set_size(n->footprint->direct_children) > 1){ dag_node_footprint_determine_desc_residual_intersect(n); dag_node_footprint_set_desc_res_wgt_diff(n); set_insert_list(footprint, n->target_files); list_sort(tmp_direct_children, dag_node_footprint_comp_diff); list_first_item(tmp_direct_children); /* Loop over each child giving it the chance to be the largest footprint. */ while((node1 = list_next_item(tmp_direct_children))){ footprint_size = dag_file_set_size(footprint); if((footprint_size + node1->footprint->wgt) > n->footprint->delete_footprint){ set_delete(n->footprint->delete_files); n->footprint->delete_files = set_duplicate(footprint); set_insert_set(n->footprint->delete_files, node1->footprint->wgt_files); n->footprint->delete_footprint = dag_file_set_size(n->footprint->delete_files); } // This is where we would remove an input file if it wasn't needed for other branches set_insert_set(footprint, node1->footprint->res_files); list_push_tail(n->footprint->delete_run_order, node1); } list_sort(tmp_direct_children, dag_node_footprint_comp_wgt_rev); list_first_item(tmp_direct_children); node1 = list_next_item(tmp_direct_children); set_insert_set(n->footprint->prog_max_files, node1->footprint->max_wgt_files); set_insert_set(n->footprint->prog_min_files, node1->footprint->wgt_files); list_push_tail(n->footprint->prog_run_order, node1); /* Find what the total space is needed to hold all residuals and the largest footprint branch concurrently. */ while((node2 = list_next_item(tmp_direct_children))){ set_insert_set(n->footprint->prog_max_files, node2->footprint->max_wgt_files); set_insert_set(n->footprint->prog_min_files, node2->footprint->res_files); list_push_tail(n->footprint->prog_run_order, node2); } n->footprint->prog_max_footprint = dag_file_set_size(n->footprint->prog_max_files); n->footprint->prog_min_footprint = dag_file_set_size(n->footprint->prog_min_files); } else { if(set_size(n->footprint->direct_children) == 1){ node1 = set_next_element(n->footprint->direct_children); list_delete(n->footprint->residual_nodes); n->footprint->residual_nodes = list_duplicate(node1->footprint->residual_nodes); } set_insert_list(n->footprint->residual_files, n->target_files); set_insert_set(n->footprint->residual_files, n->footprint->terminal_files); n->footprint->residual_size = dag_file_set_size(n->footprint->residual_files); } /* Adding the current nodes list so parents can quickly access these decisions. */ list_push_tail(n->footprint->residual_nodes, n); list_delete(tmp_direct_children); set_delete(footprint); }
int main(int argc, char **argv) { struct option long_options[] = { // These options don't set a flag {"help", no_argument, NULL, 'h'}, {"duration", required_argument, NULL, 'd'}, {"initial-size", required_argument, NULL, 'i'}, {"thread-num", required_argument, NULL, 't'}, {"range", required_argument, NULL, 'r'}, {"seed", required_argument, NULL, 'S'}, {"update-rate", required_argument, NULL, 'u'}, {"bias-range", required_argument, NULL, 'b'}, {"bias-offset", required_argument, NULL, 'u'}, {"elasticity", required_argument, NULL, 'x'}, {NULL, 0, NULL, 0} }; intset_t *set; int i, c, size; val_t last = 0; val_t val = 0; unsigned long reads, effreads, updates, effupds, aborts, aborts_locked_read, aborts_locked_write, aborts_validate_read, aborts_validate_write, aborts_validate_commit, aborts_invalid_memory, aborts_double_write, max_retries, failures_because_contention; thread_data_t *data; pthread_t *threads; pthread_attr_t attr; barrier_t barrier; struct timeval start, end; struct timespec timeout; int duration = DEFAULT_DURATION; int initial = DEFAULT_INITIAL; int nb_threads = DEFAULT_NB_THREADS; long range = DEFAULT_RANGE; long bias_range = DEFAULT_BIAS_RANGE; long bias_offset = DEFAULT_BIAS_OFFSET; int bias_enabled = 0; int seed = DEFAULT_SEED; int update = DEFAULT_UPDATE; int unit_tx = DEFAULT_ELASTICITY; int alternate = DEFAULT_ALTERNATE; int effective = DEFAULT_EFFECTIVE; sigset_t block_set; while(1) { i = 0; c = getopt_long(argc, argv, "hAf:d:i:t:r:S:u:b:B:x:", long_options, &i); if(c == -1) break; if(c == 0 && long_options[i].flag == 0) c = long_options[i].val; switch(c) { case 0: /* Flag is automatically set */ break; case 'h': printf("intset -- STM stress test " "(linked list)\n" "\n" "Usage:\n" " intset [options...]\n" "\n" "Options:\n" " -h, --help\n" " Print this message\n" " -A, --alternate (default="XSTR(DEFAULT_ALTERNATE)")\n" " Consecutive insert/remove target the same value\n" " -f, --effective <int>\n" " update txs must effectively write (0=trial, 1=effective, default=" XSTR(DEFAULT_EFFECTIVE) ")\n" " -d, --duration <int>\n" " Test duration in milliseconds (0=infinite, default=" XSTR(DEFAULT_DURATION) ")\n" " -i, --initial-size <int>\n" " Number of elements to insert before test (default=" XSTR(DEFAULT_INITIAL) ")\n" " -t, --thread-num <int>\n" " Number of threads (default=" XSTR(DEFAULT_NB_THREADS) ")\n" " -r, --range <int>\n" " Range of integer values inserted in set (default=" XSTR(DEFAULT_RANGE) ")\n" " -S, --seed <int>\n" " RNG seed (0=time-based, default=" XSTR(DEFAULT_SEED) ")\n" " -u, --update-rate <int>\n" " Percentage of update transactions (default=" XSTR(DEFAULT_UPDATE) ")\n" " -b, --bias-range <int>\n" " If used, updates will take place in range [B, B+b)\n" " -B, --bias-offset <int>\n" " If used, updates will take place in range [B, B+b)\n" " -x, --elasticity (default=4)\n" " Use elastic transactions\n" " 0 = non-protected,\n" " 1 = normal transaction,\n" " 2 = read elastic-tx,\n" " 3 = read/add elastic-tx,\n" " 4 = read/add/rem elastic-tx,\n" " 5 = all recursive elastic-tx,\n" " 6 = harris lock-free\n" ); exit(0); case 'A': alternate = 1; break; case 'f': effective = atoi(optarg); break; case 'd': duration = atoi(optarg); break; case 'i': initial = atoi(optarg); break; case 't': nb_threads = atoi(optarg); break; case 'r': range = atol(optarg); break; case 'S': seed = atoi(optarg); break; case 'u': update = atoi(optarg); break; case 'b': bias_range = atol(optarg); break; case 'B': bias_offset = atol(optarg); break; case 'x': unit_tx = atoi(optarg); break; case '?': printf("Use -h or --help for help\n"); exit(0); default: exit(1); } } assert(duration >= 0); assert(initial >= 0); assert(nb_threads > 0); assert(range > 0 && range >= initial); assert(update >= 0 && update <= 100); if (bias_range != DEFAULT_BIAS_RANGE || bias_offset != DEFAULT_BIAS_OFFSET) { bias_enabled = 1; assert(bias_range >= 0); assert(bias_offset > 0); } printf("Bench type : " ALGONAME "\n"); printf("Duration : %d\n", duration); printf("Initial size : %d\n", initial); printf("Nb threads : %d\n", nb_threads); printf("Value range : %ld\n", range); if (bias_enabled) { printf("Biased range: [%ld, %ld)\n", bias_offset, bias_offset+bias_range); } printf("Seed : %d\n", seed); printf("Update rate : %d\n", update); printf("Elasticity : %d\n", unit_tx); printf("Alternate : %d\n", alternate); printf("Effective : %d\n", effective); printf("Type sizes : int=%d/long=%d/ptr=%d/word=%d\n", (int)sizeof(int), (int)sizeof(long), (int)sizeof(void *), (int)sizeof(uintptr_t)); printf("Node size : %d\n", (int)sizeof(node_t)); timeout.tv_sec = duration / 1000; timeout.tv_nsec = (duration % 1000) * 1000000; if ((data = (thread_data_t *)malloc(nb_threads * sizeof(thread_data_t))) == NULL) { perror("malloc"); exit(1); } if ((threads = (pthread_t *)malloc(nb_threads * sizeof(pthread_t))) == NULL) { perror("malloc"); exit(1); } if (seed == 0) srand((int)time(0)); else srand(seed); set = set_new(); stop = 0; /* Populate set */ printf("Adding %d entries to set\n", initial); i = 0; while (i < initial) { val = rand_range(range); if (set_insert(set, val)) { last = val; i++; } } size = set_size(set); printf("Set size : %d\n", size); /* Access set from all threads */ barrier_init(&barrier, nb_threads + 1); pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i = 0; i < nb_threads; i++) { printf("Creating thread %d\n", i); data[i].first = last; data[i].bias_enabled = bias_enabled; data[i].bias_range = bias_range; data[i].bias_offset = bias_offset; data[i].range = range; data[i].update = update; data[i].unit_tx = unit_tx; data[i].alternate = alternate; data[i].effective = effective; data[i].nb_add = 0; data[i].nb_added = 0; data[i].nb_remove = 0; data[i].nb_removed = 0; data[i].nb_contains = 0; data[i].nb_found = 0; data[i].nb_aborts = 0; data[i].nb_aborts_locked_read = 0; data[i].nb_aborts_locked_write = 0; data[i].nb_aborts_validate_read = 0; data[i].nb_aborts_validate_write = 0; data[i].nb_aborts_validate_commit = 0; data[i].nb_aborts_invalid_memory = 0; data[i].nb_aborts_double_write = 0; data[i].max_retries = 0; data[i].seed = rand(); data[i].set = set; data[i].barrier = &barrier; data[i].failures_because_contention = 0; if (pthread_create(&threads[i], &attr, test, (void *)(&data[i])) != 0) { fprintf(stderr, "Error creating thread\n"); exit(1); } } pthread_attr_destroy(&attr); /* Start threads */ barrier_cross(&barrier); printf("STARTING...\n"); gettimeofday(&start, NULL); if (duration > 0) { nanosleep(&timeout, NULL); } else { sigemptyset(&block_set); sigsuspend(&block_set); } /* #ifdef ICC stop = 1; #else AO_store_full(&stop, 1); #endif // ICC */ atomic_store(&stop, 1); gettimeofday(&end, NULL); printf("STOPPING...\n"); /* Wait for thread completion */ for (i = 0; i < nb_threads; i++) { if (pthread_join(threads[i], NULL) != 0) { fprintf(stderr, "Error waiting for thread completion\n"); exit(1); } } duration = (end.tv_sec * 1000 + end.tv_usec / 1000) - (start.tv_sec * 1000 + start.tv_usec / 1000); aborts = 0; aborts_locked_read = 0; aborts_locked_write = 0; aborts_validate_read = 0; aborts_validate_write = 0; aborts_validate_commit = 0; aborts_invalid_memory = 0; aborts_double_write = 0; failures_because_contention = 0; reads = 0; effreads = 0; updates = 0; effupds = 0; max_retries = 0; for (i = 0; i < nb_threads; i++) { printf("Thread %d\n", i); printf(" #add : %lu\n", data[i].nb_add); printf(" #added : %lu\n", data[i].nb_added); printf(" #remove : %lu\n", data[i].nb_remove); printf(" #removed : %lu\n", data[i].nb_removed); printf(" #contains : %lu\n", data[i].nb_contains); printf(" #found : %lu\n", data[i].nb_found); printf(" #aborts : %lu\n", data[i].nb_aborts); printf(" #lock-r : %lu\n", data[i].nb_aborts_locked_read); printf(" #lock-w : %lu\n", data[i].nb_aborts_locked_write); printf(" #val-r : %lu\n", data[i].nb_aborts_validate_read); printf(" #val-w : %lu\n", data[i].nb_aborts_validate_write); printf(" #val-c : %lu\n", data[i].nb_aborts_validate_commit); printf(" #inv-mem : %lu\n", data[i].nb_aborts_invalid_memory); printf(" #inv-mem : %lu\n", data[i].nb_aborts_double_write); printf(" #failures : %lu\n", data[i].failures_because_contention); printf(" Max retries : %lu\n", data[i].max_retries); aborts += data[i].nb_aborts; aborts_locked_read += data[i].nb_aborts_locked_read; aborts_locked_write += data[i].nb_aborts_locked_write; aborts_validate_read += data[i].nb_aborts_validate_read; aborts_validate_write += data[i].nb_aborts_validate_write; aborts_validate_commit += data[i].nb_aborts_validate_commit; aborts_invalid_memory += data[i].nb_aborts_invalid_memory; aborts_double_write += data[i].nb_aborts_double_write; failures_because_contention += data[i].failures_because_contention; reads += data[i].nb_contains; effreads += data[i].nb_contains + (data[i].nb_add - data[i].nb_added) + (data[i].nb_remove - data[i].nb_removed); updates += (data[i].nb_add + data[i].nb_remove); effupds += data[i].nb_removed + data[i].nb_added; size += data[i].nb_added - data[i].nb_removed; if (max_retries < data[i].max_retries) max_retries = data[i].max_retries; } printf("Set size : %d (expected: %d)\n", set_size(set), size); if (set_size(set) != size) { printf("ERROR: Set size did not match expected.\n"); } printf("Duration : %d (ms)\n", duration); printf("#txs : %lu (%f / s)\n", reads + updates, (reads + updates) * 1000.0 / duration); printf("#read txs : "); if (effective) { printf("%lu (%f / s)\n", effreads, effreads * 1000.0 / duration); printf(" #contains : %lu (%f / s)\n", reads, reads * 1000.0 / duration); } else printf("%lu (%f / s)\n", reads, reads * 1000.0 / duration); printf("#eff. upd rate: %f \n", 100.0 * effupds / (effupds + effreads)); printf("#update txs : "); if (effective) { printf("%lu (%f / s)\n", effupds, effupds * 1000.0 / duration); printf(" #upd trials : %lu (%f / s)\n", updates, updates * 1000.0 / duration); } else printf("%lu (%f / s)\n", updates, updates * 1000.0 / duration); printf("#aborts : %lu (%f / s)\n", aborts, aborts * 1000.0 / duration); printf(" #lock-r : %lu (%f / s)\n", aborts_locked_read, aborts_locked_read * 1000.0 / duration); printf(" #lock-w : %lu (%f / s)\n", aborts_locked_write, aborts_locked_write * 1000.0 / duration); printf(" #val-r : %lu (%f / s)\n", aborts_validate_read, aborts_validate_read * 1000.0 / duration); printf(" #val-w : %lu (%f / s)\n", aborts_validate_write, aborts_validate_write * 1000.0 / duration); printf(" #val-c : %lu (%f / s)\n", aborts_validate_commit, aborts_validate_commit * 1000.0 / duration); printf(" #inv-mem : %lu (%f / s)\n", aborts_invalid_memory, aborts_invalid_memory * 1000.0 / duration); printf(" #dup-w : %lu (%f / s)\n", aborts_double_write, aborts_double_write * 1000.0 / duration); printf(" #failures : %lu\n", failures_because_contention); printf("Max retries : %lu\n", max_retries); /* Delete set */ set_delete(set); free(threads); free(data); return 0; }
void process_datafile(const char *fname, set_t *set, struct cmdline_opt *optvals) { FILE *file; char cmd; int ret, val; int output_flag; /* sanity check for pointers */ assert(set); assert(optvals); output_flag = optvals->verbose; if ((file = fopen(fname, "r")) == NULL) { fprintf(stderr,"Error: cannot open file %s.\n", fname); return; } /* we are assuming a well-formed datafile for this project */ while (fscanf(file, "%c %d\n", &cmd, &val) > 0) { switch (cmd) { case 'I': /* insert */ ret = set_insert(set, val); if (output_flag) { printf("insert:\t%d\t%s\n", val, (ret ? "new" : "repeat")); } break; case 'D': /* delete */ ret = set_delete(set, val); if (output_flag) { printf("delete:\t%d\t%s\n", val, (ret ? "removed" : "missing")); } break; case 'S': /* search */ ret = set_search(set, val); if (output_flag) { printf("search:\t%d\t%s\n", val, (ret ? "present" : "absent")); } break; /* define your own commands if it helps with testing */ case 'P': /* print, useful for debugging */ if (output_flag) { set_print(set); } break; default: /* ignore any unrecognised commands */ break; } } fclose(file); }
/************************************************************************* * *N get_selected_tile_primitives * *:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: * * Purpose: *P * This function determines all of the selected primitive rows from * the selected features of a given tile. * * This function expects a feature class relationship structure that * has been successfully created with select_feature_class_relate() * from the feature table to the primitive. The primitive table in * the feature class relate structure must have been successfully * opened for the given tile. *E *:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: * * Parameters: *A * library <input>==(library_type *) VPF library structure. * fcnum <input>==(int) feature class number of the feature table. * fcrel <input>==(fcrel_type) feature class relate structure. * feature_rows <input>==(set_type) set of selected features. * mapenv <input>==(map_environment_type *) map environment. * tile <input>==(rspf_int32) tile number. * tiledir <input>==(char *) path to the tile directory. * status <output>==(int *) status of the function: * 1 if completed, 0 if user escape. * return <output>==(set_type) set of primitives for the features * in the corresponding tile. *E *:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: * * History: *H * Barry Michaels May 1991 DOS Turbo C *E *************************************************************************/ set_type get_selected_tile_primitives( library_type *library, int fcnum, fcrel_type fcrel, set_type feature_rows, map_environment_type *mapenv, rspf_int32 tile, char *tiledir, int *status ) { int cov, degree; int feature, prim; row_type row; char *spxname[] = {"","esi","fsi","tsi","nsi","csi"}; char *brname[] = {"","ebr","fbr","tbr","nbr","cbr"}; set_type primitive_rows, tile_features; set_type primitives; rspf_int32 prim_rownum; register rspf_int32 i,pclass, start,end; char path[255], covpath[255]; linked_list_type primlist; position_type p; vpf_relate_struct rcell; ThematicIndex idx; feature = 0; prim = fcrel.nchain-1; /* Assume that fcrel.table[prim] has been opened */ primitives.size = 0; primitives.buf = NULL; cov = library->fc[fcnum].coverage; strcpy( covpath, library->cover[cov].path ); p = ll_previous(ll_last(fcrel.relate_list),fcrel.relate_list); ll_element(p,&rcell); degree = rcell.degree; for (pclass=EDGE;pclass<=CONNECTED_NODE;pclass++) { if ((pclass != library->fc[fcnum].primclass) && (library->fc[fcnum].primclass != COMPLEX_FEATURE)) continue; primitives = set_init(fcrel.table[prim].nrows+1); /* Get the set of primitive rows within the map extent */ /* Look for the spatial index file to weed out primitives in the */ /* given tile but outside of the viewing area. If a projection */ /* other than plate-carree (rectangular) is on, or if the extent */ /* crosses the meridian, the quick check is no longer valid. */ primitive_rows.size = 0; if (mapenv->projection == PLATE_CARREE && mapenv->mapextent.x1 < mapenv->mapextent.x2) { sprintf(path,"%s%s%s",covpath,tiledir,spxname[pclass]); /* 20 (below) is a fairly arbitrary cutoff of the number of */ /* primitives that make a spatial index search worth while. */ if ((access(path,0)==0)&&(fcrel.table[prim].nrows > 20)) { primitive_rows = spatial_index_search(path, mapenv->mapextent.x1,mapenv->mapextent.y1, mapenv->mapextent.x2,mapenv->mapextent.y2); } else { /* Next best thing - bounding rectangle table */ sprintf(path,"%s%s%s",covpath,tiledir,brname[pclass]); if ((access(path,0)==0)&&(fcrel.table[prim].nrows > 20)) { primitive_rows = bounding_select(path,mapenv->mapextent, library->dec_degrees); } } } if (primitive_rows.size == 0) { /* Search through all the primitives */ primitive_rows=set_init(fcrel.table[prim].nrows+1); set_on(primitive_rows); } if (strcmp(tiledir,"") != 0) { tile_thematic_index_name(fcrel.table[feature], path); if ((strcmp(path,"")!=0) && (access(path,4)==0)) { /* Tile thematic index for feature table present, */ tile_features = read_thematic_index( path, (char *)&tile ); } else { tile_features = set_init(fcrel.table[feature].nrows+1); set_on(tile_features); } } else { tile_features = set_init(fcrel.table[feature].nrows+1); set_on(tile_features); } set_delete(0,tile_features); idx.fp = NULL; i = table_pos(rcell.key2,fcrel.table[prim]); if (i >= 0) { if (fcrel.table[prim].header[i].tdx) { sprintf(path,"%s%s",fcrel.table[prim].path, fcrel.table[prim].header[i].tdx); if (access(path,0)==0) idx = open_thematic_index(path); } if (fcrel.table[prim].header[i].keytype == 'U') degree = R_ONE; if (fcrel.table[prim].header[i].keytype == 'N') degree = R_MANY; if (fcrel.table[prim].header[i].keytype == 'P') degree = R_ONE; } start = set_min(tile_features); end = set_max(tile_features); /* It turns out to be MUCH faster off of a CD-ROM to */ /* read each row and discard unwanted ones than to */ /* forward seek past them. It's about the same off */ /* of a hard disk. */ fseek(fcrel.table[feature].fp,index_pos(start,fcrel.table[feature]), SEEK_SET); for (i=start;i<=end;i++) { row = read_next_row(fcrel.table[feature]); if (!set_member( i, feature_rows )) { free_row(row,fcrel.table[feature]); continue; } if (!set_member( i, tile_features )) { free_row(row,fcrel.table[feature]); continue; } if (degree == R_ONE) { prim_rownum = fc_row_number( row, fcrel, tile ); primlist = NULL; p = NULL; } else { primlist = fc_row_numbers( row, fcrel, tile, &idx ); } free_row( row, fcrel.table[feature] ); if (!primlist) { if ((prim_rownum<1)||(prim_rownum>fcrel.table[prim].nrows)) continue; if (set_member( prim_rownum, primitive_rows )) { set_insert(prim_rownum,primitives); } } else { p = ll_first(primlist); while (!ll_end(p)) { ll_element(p,&prim_rownum); if ((prim_rownum<1)|| (prim_rownum>fcrel.table[prim].nrows)) continue; if (set_member( prim_rownum, primitive_rows )) { set_insert(prim_rownum,primitives); } p = ll_next(p); } } if (primlist) ll_reset(primlist); if (kbhit()) { if (getch()==27) { *status = 0; break; } } } set_nuke(&primitive_rows); set_nuke(&tile_features); if (idx.fp) close_thematic_index(&idx); *status = 1; if (kbhit()) { if (getch()==27) { *status = 0; break; } } } return primitives; }
int main(int argc, char *argv[]) { program_name = strrchr(argv[0], '/'); program_name = program_name ? program_name + 1 : argv[0]; const char *manufacturer = 0; device = getenv("TINI_DEVICE"); if (!device) device = DEVICE; setenv("TZ", "UTC", 1); tzset(); opterr = 0; while (1) { static struct option options[] = { { "device", required_argument, 0, 'd' }, { "directory", required_argument, 0, 'D' }, { "help", no_argument, 0, 'h' }, { "overwrite", no_argument, 0, 'o' }, { "quiet", no_argument, 0, 'q' }, { "manufacturer", required_argument, 0, 'm' }, { "short-filenames", no_argument, 0, 's' }, { "log", required_argument, 0, 'l' }, { 0, 0, 0, 0 }, }; int c = getopt_long(argc, argv, ":D:d:hl:m:oqs", options, 0); if (c == -1) break; switch (c) { case 'D': if (chdir(optarg) == -1) error("chdir: %s: %s", optarg, strerror(errno)); break; case 'd': device = optarg; break; case 'h': usage(); exit(EXIT_SUCCESS); case 'l': if (strcmp(optarg, "-") == 0) logfile = stdout; else { logfile = fopen(optarg, "a"); if (!logfile) error("fopen: %s: %s", optarg, strerror(errno)); } break; case 'm': manufacturer = optarg; break; case 'o': overwrite = 1; break; case 'q': quiet = 1; break; case 's': igc_filename_format = igc_filename_format_short; break; case ':': error("option '%c' requires an argument", optopt); case '?': error("invalid option '%c'", optopt); } } flytec_t *flytec = flytec_new(device, logfile); if (!manufacturer) { flytec_pbrsnp(flytec); manufacturer = flytec->manufacturer; } if (optind == argc || strcmp(argv[optind], "do") == 0 || strcmp(argv[optind], "download") == 0) { ++optind; set_t *indexes = 0; for (; optind < argc; ++optind) indexes = set_merge(indexes, argv[optind]); tini_download(flytec, indexes, manufacturer, igc_filename_format); set_delete(indexes); } else { if (optind + 1 != argc) error("excess argument%s on command line", argc - optind == 1 ? "" : "s"); if (strcmp(argv[optind], "id") == 0) { tini_id(flytec); } else if (strcmp(argv[optind], "ig") == 0 || strcmp(argv[optind], "igc") == 0) { tini_igc(flytec); } else if (strcmp(argv[optind], "li") == 0 || strcmp(argv[optind], "list") == 0) { tini_list(flytec, manufacturer, igc_filename_format); } else { error("invalid command '%s'", argv[optind]); } } flytec_delete(flytec); if (logfile && logfile != stdout) fclose(logfile); return EXIT_SUCCESS; }
int main(int argc, char **argv) { struct option long_options[] = { // These options don't set a flag {"help", no_argument, NULL, 'h'}, {"do-not-alternate", no_argument, NULL, 'a'}, #ifndef TM_COMPILER {"contention-manager", required_argument, NULL, 'c'}, #endif /* ! TM_COMPILER */ {"duration", required_argument, NULL, 'd'}, {"initial-size", required_argument, NULL, 'i'}, {"num-threads", required_argument, NULL, 'n'}, {"range", required_argument, NULL, 'r'}, {"seed", required_argument, NULL, 's'}, {"update-rate", required_argument, NULL, 'u'}, #ifdef USE_LINKEDLIST {"unit-tx", no_argument, NULL, 'x'}, #endif /* LINKEDLIST */ {NULL, 0, NULL, 0} }; intset_t *set; int i, c, val, size, ret; unsigned long reads, updates; #ifndef TM_COMPILER char *s; unsigned long aborts, aborts_1, aborts_2, aborts_locked_read, aborts_locked_write, aborts_validate_read, aborts_validate_write, aborts_validate_commit, aborts_invalid_memory, aborts_killed, locked_reads_ok, locked_reads_failed, max_retries; stm_ab_stats_t ab_stats; #endif /* ! TM_COMPILER */ thread_data_t *data; pthread_t *threads; pthread_attr_t attr; barrier_t barrier; struct timeval start, end; struct timespec timeout; int duration = DEFAULT_DURATION; int initial = DEFAULT_INITIAL; int nb_threads = DEFAULT_NB_THREADS; int range = DEFAULT_RANGE; int seed = DEFAULT_SEED; int update = DEFAULT_UPDATE; int alternate = 1; #ifndef TM_COMPILER char *cm = NULL; #endif /* ! TM_COMPILER */ #ifdef USE_LINKEDLIST int unit_tx = 0; #endif /* LINKEDLIST */ sigset_t block_set; while(1) { i = 0; c = getopt_long(argc, argv, "ha" #ifndef TM_COMPILER "c:" #endif /* ! TM_COMPILER */ "d:i:n:r:s:u:" #ifdef USE_LINKEDLIST "x" #endif /* LINKEDLIST */ , long_options, &i); if(c == -1) break; if(c == 0 && long_options[i].flag == 0) c = long_options[i].val; switch(c) { case 0: /* Flag is automatically set */ break; case 'h': printf("intset -- STM stress test " #if defined(USE_LINKEDLIST) "(linked list)\n" #elif defined(USE_RBTREE) "(red-black tree)\n" #elif defined(USE_SKIPLIST) "(skip list)\n" #elif defined(USE_HASHSET) "(hash set)\n" #endif /* defined(USE_HASHSET) */ "\n" "Usage:\n" " intset [options...]\n" "\n" "Options:\n" " -h, --help\n" " Print this message\n" " -a, --do-not-alternate\n" " Do not alternate insertions and removals\n" #ifndef TM_COMPILER " -c, --contention-manager <string>\n" " Contention manager for resolving conflicts (default=suicide)\n" #endif /* ! TM_COMPILER */ " -d, --duration <int>\n" " Test duration in milliseconds (0=infinite, default=" XSTR(DEFAULT_DURATION) ")\n" " -i, --initial-size <int>\n" " Number of elements to insert before test (default=" XSTR(DEFAULT_INITIAL) ")\n" " -n, --num-threads <int>\n" " Number of threads (default=" XSTR(DEFAULT_NB_THREADS) ")\n" " -r, --range <int>\n" " Range of integer values inserted in set (default=" XSTR(DEFAULT_RANGE) ")\n" " -s, --seed <int>\n" " RNG seed (0=time-based, default=" XSTR(DEFAULT_SEED) ")\n" " -u, --update-rate <int>\n" " Percentage of update transactions (default=" XSTR(DEFAULT_UPDATE) ")\n" #ifdef USE_LINKEDLIST " -x, --unit-tx\n" " Use unit transactions\n" #endif /* LINKEDLIST */ ); exit(0); case 'a': alternate = 0; break; #ifndef TM_COMPILER case 'c': cm = optarg; break; #endif /* ! TM_COMPILER */ case 'd': duration = atoi(optarg); break; case 'i': initial = atoi(optarg); break; case 'n': nb_threads = atoi(optarg); break; case 'r': range = atoi(optarg); break; case 's': seed = atoi(optarg); break; case 'u': update = atoi(optarg); break; #ifdef USE_LINKEDLIST case 'x': unit_tx++; break; #endif /* LINKEDLIST */ case '?': printf("Use -h or --help for help\n"); exit(0); default: exit(1); } } assert(duration >= 0); assert(initial >= 0); assert(nb_threads > 0); assert(range > 0 && range >= initial); assert(update >= 0 && update <= 100); #if defined(USE_LINKEDLIST) printf("Set type : linked list\n"); #elif defined(USE_RBTREE) printf("Set type : red-black tree\n"); #elif defined(USE_SKIPLIST) printf("Set type : skip list\n"); #elif defined(USE_HASHSET) printf("Set type : hash set\n"); #endif /* defined(USE_HASHSET) */ #ifndef TM_COMPILER printf("CM : %s\n", (cm == NULL ? "DEFAULT" : cm)); #endif /* ! TM_COMPILER */ printf("Duration : %d\n", duration); printf("Initial size : %d\n", initial); printf("Nb threads : %d\n", nb_threads); printf("Value range : %d\n", range); printf("Seed : %d\n", seed); printf("Update rate : %d\n", update); printf("Alternate : %d\n", alternate); #ifdef USE_LINKEDLIST printf("Unit tx : %d\n", unit_tx); #endif /* LINKEDLIST */ printf("Type sizes : int=%d/long=%d/ptr=%d/word=%d\n", (int)sizeof(int), (int)sizeof(long), (int)sizeof(void *), (int)sizeof(size_t)); timeout.tv_sec = duration / 1000; timeout.tv_nsec = (duration % 1000) * 1000000; if ((data = (thread_data_t *)malloc(nb_threads * sizeof(thread_data_t))) == NULL) { perror("malloc"); exit(1); } if ((threads = (pthread_t *)malloc(nb_threads * sizeof(pthread_t))) == NULL) { perror("malloc"); exit(1); } if (seed == 0) srand((int)time(NULL)); else srand(seed); set = set_new(INIT_SET_PARAMETERS); stop = 0; /* Thread-local seed for main thread */ rand_init(main_seed); /* Init STM */ printf("Initializing STM\n"); TM_INIT; #ifndef TM_COMPILER if (stm_get_parameter("compile_flags", &s)) printf("STM flags : %s\n", s); if (cm != NULL) { if (stm_set_parameter("cm_policy", cm) == 0) printf("WARNING: cannot set contention manager \"%s\"\n", cm); } #endif /* ! TM_COMPILER */ if (alternate == 0 && range != initial * 2) printf("WARNING: range is not twice the initial set size\n"); /* Populate set */ printf("Adding %d entries to set\n", initial); i = 0; while (i < initial) { val = rand_range(range, main_seed) + 1; if (set_add(set, val, 0)) i++; } size = set_size(set); printf("Set size : %d\n", size); /* Access set from all threads */ barrier_init(&barrier, nb_threads + 1); pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); for (i = 0; i < nb_threads; i++) { printf("Creating thread %d\n", i); data[i].range = range; data[i].update = update; data[i].alternate = alternate; #ifdef USE_LINKEDLIST data[i].unit_tx = unit_tx; #endif /* LINKEDLIST */ data[i].nb_add = 0; data[i].nb_remove = 0; data[i].nb_contains = 0; data[i].nb_found = 0; #ifndef TM_COMPILER data[i].nb_aborts = 0; data[i].nb_aborts_1 = 0; data[i].nb_aborts_2 = 0; data[i].nb_aborts_locked_read = 0; data[i].nb_aborts_locked_write = 0; data[i].nb_aborts_validate_read = 0; data[i].nb_aborts_validate_write = 0; data[i].nb_aborts_validate_commit = 0; data[i].nb_aborts_invalid_memory = 0; data[i].nb_aborts_killed = 0; data[i].locked_reads_ok = 0; data[i].locked_reads_failed = 0; data[i].max_retries = 0; #endif /* ! TM_COMPILER */ data[i].diff = 0; rand_init(data[i].seed); data[i].set = set; data[i].barrier = &barrier; if (pthread_create(&threads[i], &attr, test, (void *)(&data[i])) != 0) { fprintf(stderr, "Error creating thread\n"); exit(1); } } pthread_attr_destroy(&attr); /* Start threads */ barrier_cross(&barrier); printf("STARTING...\n"); gettimeofday(&start, NULL); if (duration > 0) { nanosleep(&timeout, NULL); } else { sigemptyset(&block_set); sigsuspend(&block_set); } stop = 1; gettimeofday(&end, NULL); printf("STOPPING...\n"); /* Wait for thread completion */ for (i = 0; i < nb_threads; i++) { if (pthread_join(threads[i], NULL) != 0) { fprintf(stderr, "Error waiting for thread completion\n"); exit(1); } } duration = (end.tv_sec * 1000 + end.tv_usec / 1000) - (start.tv_sec * 1000 + start.tv_usec / 1000); #ifndef TM_COMPILER aborts = 0; aborts_1 = 0; aborts_2 = 0; aborts_locked_read = 0; aborts_locked_write = 0; aborts_validate_read = 0; aborts_validate_write = 0; aborts_validate_commit = 0; aborts_invalid_memory = 0; aborts_killed = 0; locked_reads_ok = 0; locked_reads_failed = 0; max_retries = 0; #endif /* ! TM_COMPILER */ reads = 0; updates = 0; for (i = 0; i < nb_threads; i++) { printf("Thread %d\n", i); printf(" #add : %lu\n", data[i].nb_add); printf(" #remove : %lu\n", data[i].nb_remove); printf(" #contains : %lu\n", data[i].nb_contains); printf(" #found : %lu\n", data[i].nb_found); #ifndef TM_COMPILER printf(" #aborts : %lu\n", data[i].nb_aborts); printf(" #lock-r : %lu\n", data[i].nb_aborts_locked_read); printf(" #lock-w : %lu\n", data[i].nb_aborts_locked_write); printf(" #val-r : %lu\n", data[i].nb_aborts_validate_read); printf(" #val-w : %lu\n", data[i].nb_aborts_validate_write); printf(" #val-c : %lu\n", data[i].nb_aborts_validate_commit); printf(" #inv-mem : %lu\n", data[i].nb_aborts_invalid_memory); printf(" #killed : %lu\n", data[i].nb_aborts_killed); printf(" #aborts>=1 : %lu\n", data[i].nb_aborts_1); printf(" #aborts>=2 : %lu\n", data[i].nb_aborts_2); printf(" #lr-ok : %lu\n", data[i].locked_reads_ok); printf(" #lr-failed : %lu\n", data[i].locked_reads_failed); printf(" Max retries : %lu\n", data[i].max_retries); aborts += data[i].nb_aborts; aborts_1 += data[i].nb_aborts_1; aborts_2 += data[i].nb_aborts_2; aborts_locked_read += data[i].nb_aborts_locked_read; aborts_locked_write += data[i].nb_aborts_locked_write; aborts_validate_read += data[i].nb_aborts_validate_read; aborts_validate_write += data[i].nb_aborts_validate_write; aborts_validate_commit += data[i].nb_aborts_validate_commit; aborts_invalid_memory += data[i].nb_aborts_invalid_memory; aborts_killed += data[i].nb_aborts_killed; locked_reads_ok += data[i].locked_reads_ok; locked_reads_failed += data[i].locked_reads_failed; if (max_retries < data[i].max_retries) max_retries = data[i].max_retries; #endif /* ! TM_COMPILER */ reads += data[i].nb_contains; updates += (data[i].nb_add + data[i].nb_remove); size += data[i].diff; } printf("Set size : %d (expected: %d)\n", set_size(set), size); ret = (set_size(set) != size); printf("Duration : %d (ms)\n", duration); printf("#txs : %lu (%f / s)\n", reads + updates, (reads + updates) * 1000.0 / duration); printf("#read txs : %lu (%f / s)\n", reads, reads * 1000.0 / duration); printf("#update txs : %lu (%f / s)\n", updates, updates * 1000.0 / duration); #ifndef TM_COMPILER printf("#aborts : %lu (%f / s)\n", aborts, aborts * 1000.0 / duration); printf(" #lock-r : %lu (%f / s)\n", aborts_locked_read, aborts_locked_read * 1000.0 / duration); printf(" #lock-w : %lu (%f / s)\n", aborts_locked_write, aborts_locked_write * 1000.0 / duration); printf(" #val-r : %lu (%f / s)\n", aborts_validate_read, aborts_validate_read * 1000.0 / duration); printf(" #val-w : %lu (%f / s)\n", aborts_validate_write, aborts_validate_write * 1000.0 / duration); printf(" #val-c : %lu (%f / s)\n", aborts_validate_commit, aborts_validate_commit * 1000.0 / duration); printf(" #inv-mem : %lu (%f / s)\n", aborts_invalid_memory, aborts_invalid_memory * 1000.0 / duration); printf(" #killed : %lu (%f / s)\n", aborts_killed, aborts_killed * 1000.0 / duration); printf("#aborts>=1 : %lu (%f / s)\n", aborts_1, aborts_1 * 1000.0 / duration); printf("#aborts>=2 : %lu (%f / s)\n", aborts_2, aborts_2 * 1000.0 / duration); printf("#lr-ok : %lu (%f / s)\n", locked_reads_ok, locked_reads_ok * 1000.0 / duration); printf("#lr-failed : %lu (%f / s)\n", locked_reads_failed, locked_reads_failed * 1000.0 / duration); printf("Max retries : %lu\n", max_retries); for (i = 0; stm_get_ab_stats(i, &ab_stats) != 0; i++) { printf("Atomic block : %d\n", i); printf(" #samples : %lu\n", ab_stats.samples); printf(" Mean : %f\n", ab_stats.mean); printf(" Variance : %f\n", ab_stats.variance); printf(" Min : %f\n", ab_stats.min); printf(" Max : %f\n", ab_stats.max); printf(" 50th perc. : %f\n", ab_stats.percentile_50); printf(" 90th perc. : %f\n", ab_stats.percentile_90); printf(" 95th perc. : %f\n", ab_stats.percentile_95); } #endif /* ! TM_COMPILER */ /* Delete set */ set_delete(set); /* Cleanup STM */ TM_EXIT; free(threads); free(data); return ret; }