static void makeflow_gc_all( struct dag *d, struct batch_queue *queue, int maxfiles ) { int collected = 0; struct dag_file *f; char *name; timestamp_t start_time, stop_time; /* This will walk the table of files to collect and will remove any * that are below or equal to the threshold. */ start_time = timestamp_get(); hash_table_firstkey(d->files); while(hash_table_nextkey(d->files, &name, (void **) &f) && collected < maxfiles) { if(f->state == DAG_FILE_STATE_COMPLETE && !dag_file_is_source(f) && !set_lookup(d->outputs, f) && !set_lookup(d->inputs, f) && makeflow_file_clean(d, queue, f, 0)){ collected++; } } stop_time = timestamp_get(); /* Record total amount of files collected to Makeflowlog. */ if(collected > 0) { makeflow_gc_collected += collected; makeflow_log_gc_event(d,collected,stop_time-start_time,makeflow_gc_collected); } }
inline static int e_closure(int states) { int i, set_size; struct e_closure_memo *ptr; /* e_closure extends the list of states which are reachable */ /* and appends these to e_table */ if (epsilon_symbol == -1) return(set_lookup(temp_move, states)); if (states == 0) return -1; mainloop--; set_size = states; for (i = 0; i < states; i++) { /* State number we want to do e-closure on */ ptr = e_closure_memo + *(temp_move+i); if (ptr->target == NULL) continue; ptr_stack_push(ptr); while (!(ptr_stack_isempty())) { ptr = ptr_stack_pop(); /* Don't follow if already seen */ if (*(marktable+ptr->state) == mainloop) continue; ptr->mark = mainloop; *(marktable+ptr->state) = mainloop; /* Add to tail of list */ if (*(e_table+(ptr->state)) != mainloop) { *(temp_move+set_size) = ptr->state; *(e_table+(ptr->state)) = mainloop; set_size++; } if (ptr->target == NULL) continue; /* Traverse chain */ for (; ptr != NULL ; ptr = ptr->next) { if (ptr->target->mark != mainloop) { /* Push */ ptr->target->mark = mainloop; ptr_stack_push(ptr->target); } } } } mainloop++; return(set_lookup(temp_move, set_size)); }
/* Function that calculates the three different footprint values and * stores the largest as the key footprint of the node. */ void dag_node_footprint_determine_children(struct dag_node *n) { struct dag_node *c; if(!n->footprint) n->footprint = dag_node_footprint_create(); /* Have un-updated children calculate their direct children. */ set_first_element(n->descendants); while((c = set_next_element(n->descendants))){ if(!(c->footprint && c->footprint->children_updated)){ dag_node_footprint_determine_children(c); } set_insert_set(n->footprint->accounted, c->footprint->accounted); } set_first_element(n->descendants); while((c = set_next_element(n->descendants))){ if(!set_lookup(n->footprint->accounted, c)){ set_insert(n->footprint->direct_children, c); set_insert(n->footprint->accounted, c); } } n->footprint->children_updated = 1; }
Bool symbol_has_entry(const Symbol* sym, const Tuple* tuple) { assert(symbol_is_valid(sym)); assert(tuple_is_valid(tuple)); return hash_has_entry(sym->hash, tuple) || (sym->deflt != NULL && set_lookup(sym->set, tuple)); }
int options_get_int(const char* key) { void* value = NULL; if (!set_lookup(options.parameters, key, &value)) return 0; return atoi((const char*)value); }
const char* options_get_string(const char* key) { void* value = NULL; if (!set_lookup(options.parameters, key, &value)) return NULL; return (const char*)value; }
/* Entry is eaten. * No check is done if entry->tuple is a member of sym->set ! * This has to be done before. */ void symbol_add_entry(Symbol* sym, Entry* entry) { const Tuple* tuple; assert(symbol_is_valid(sym)); assert(entry_is_valid(entry)); assert(sym->used <= sym->size); if (sym->used == sym->size) { sym->size += sym->extend; sym->extend += sym->extend; sym->entry = realloc( sym->entry, (size_t)sym->size * sizeof(*sym->entry)); assert(sym->entry != NULL); } assert(sym->used < sym->size); tuple = entry_get_tuple(entry); /* There is no index set for the internal symbol. */ assert(!strcmp(sym->name, SYMBOL_NAME_INTERNAL) || set_lookup(sym->set, tuple)); if (hash_has_entry(sym->hash, tuple)) { if (stmt_trigger_warning(166)) { fprintf(stderr, "--- Warning 166: Duplicate element "); tuple_print(stderr, tuple); fprintf(stderr, " for symbol %s rejected\n", sym->name); } entry_free(entry); } else { /* Falls noch nicht geschehen, legen wir hier den Typ des * Symbols fest. */ if ((sym->type == SYM_ERR) && (sym->used == 0)) sym->type = entry_get_type(entry); assert(sym->type != SYM_ERR); hash_add_entry(sym->hash, entry); sym->entry[sym->used] = entry; sym->used++; } }
void makeflow_clean(struct dag *d, struct batch_queue *queue, makeflow_clean_depth clean_depth) { struct dag_file *f; char *name; hash_table_firstkey(d->files); while(hash_table_nextkey(d->files, &name, (void **) &f)) { if(dag_file_is_source(f)) continue; int silent = 1; if(dag_file_should_exist(f)) silent = 0; if(clean_depth == MAKEFLOW_CLEAN_ALL){ makeflow_file_clean(d, queue, f, silent); } else if(set_lookup(d->outputs, f) && (clean_depth == MAKEFLOW_CLEAN_OUTPUTS)) { makeflow_file_clean(d, queue, f, silent); } else if(!set_lookup(d->outputs, f) && (clean_depth == MAKEFLOW_CLEAN_INTERMEDIATES)){ makeflow_file_clean(d, queue, f, silent); } } struct dag_node *n; for(n = d->nodes; n; n = n->next) { /* If the node is a Makeflow job, then we should recursively call the * * clean operation on it. */ if(n->nested_job) { char *command = xxmalloc(sizeof(char) * (strlen(n->command) + 4)); sprintf(command, "%s -c", n->command); /* XXX this should use the batch job interface for consistency */ makeflow_node_export_variables(d, n); system(command); free(command); } } }
/* Liefert NULL wenn nicht gefunden. * Falls ein default zurueckgegeben wird, stimmt "tuple" nicht mit * entry->tuple ueberein. */ const Entry* symbol_lookup_entry(const Symbol* sym, const Tuple* tuple) { const Entry* entry; assert(symbol_is_valid(sym)); assert(tuple_is_valid(tuple)); entry = hash_lookup_entry(sym->hash, tuple); if (NULL == entry && sym->deflt != NULL && set_lookup(sym->set, tuple)) entry = sym->deflt; return entry; }
int smo(bow_wv **docs, int *yvect, double *weights, double *a_b, double **W, int ndocs, double *error, float *cvect, int *nsv) { int changed; int inspect_all; struct svm_smo_model model; int nchanged; int num_words; double *original_weights; int i,j,k,n; num_words = bow_num_words(); m1 = m2 = m3 = m4 = 0; model.n_pair_suc = model.n_pair_tot = model.n_single_suc = model.n_single_tot = model.n_outer = 0; model.nsv = *nsv; model.docs = docs; model.error = error; model.ndocs = ndocs; model.cvect = cvect; original_weights = NULL; if (svm_kernel_type == 0 && !(*W)) { *W = model.W = (double *) malloc(sizeof(double)*num_words); } else { model.W = NULL; } model.weights = weights; model.yvect = yvect; /* figure out the # of positives */ for (i=j=k=n=0; i<ndocs; i++) { if (yvect[i] == 1) { k = i; j++; } else { n = i; } } /* k is set to the last positive example found, n is the last negative */ make_set(ndocs,ndocs,&(model.I0)); make_set(ndocs,j,&(model.I1)); make_set(ndocs,ndocs-j,&(model.I2)); make_set(ndocs,j,&(model.I3)); make_set(ndocs,ndocs-j,&(model.I4)); /* this is the code which initializes the sets according to the weights values */ for (i=0; i<ndocs; i++) { struct set *s; if (weights[i] > svm_epsilon_a && weights[i] < cvect[i] - svm_epsilon_a) { s = &(model.I0); } else if (yvect[i] == 1) { if (weights[i] < svm_epsilon_a) s = &(model.I1); else s = &(model.I3); } else { if (weights[i] < svm_epsilon_a) s = &(model.I4); else s = &(model.I2); } set_insert(i, s); } if (model.W) { for (i=0; i<num_words; i++) { model.W[i] = 0.0; } } if (model.I0.ilength == 0) { model.blow = 1; model.bup = -1; model.iup = k; model.ilow = n; error[k] = -1; error[n] = 1; } else { /* compute bup & blow */ int efrom, nitems; int *items; double e; nitems = model.I0.ilength; items = model.I0.items; for (i=0, e=-1*MAXDOUBLE; i<nitems; i++) { if (e < error[items[i]]) { e = error[items[i]]; efrom = items[i]; } } model.blow = e; model.ilow = efrom; for (i=0, e=MAXDOUBLE; i<nitems; i++) { if (e > error[items[i]]) { e = error[items[i]]; efrom = items[i]; } } model.bup = e; model.iup = efrom; if (model.W) { for (i=0; i<nitems; i++) { for (j=0; j<docs[items[i]]->num_entries; j++) { model.W[docs[items[i]]->entry[j].wi] += yvect[items[i]] * weights[items[i]] * docs[items[i]]->entry[j].weight; } } /* also need to include bound sv's (I2 & I3) */ for (k=0, nitems=model.I2.ilength, items=model.I2.items; k<2; k++, nitems=model.I3.ilength, items=model.I3.items) { for (i=0; i<nitems; i++) { for (j=0; j<docs[items[i]]->num_entries; j++) { model.W[docs[items[i]]->entry[j].wi] += yvect[items[i]] * weights[items[i]] * docs[items[i]]->entry[j].weight; } } } } } if (!model.W) { model.W = *W; } if (svm_weight_style == WEIGHTS_PER_MODEL) { kcache_init(ndocs); } inspect_all = 1; nchanged = 0; changed = 0; while (nchanged || inspect_all) { nchanged = 0; #ifdef DEBUG check_inv(&model,ndocs); #endif model.n_outer ++; PRINT_SMO_PROGRESS(stderr, &model); fflush(stderr); if (1 && inspect_all) { int ub = ndocs; i=j=random() % ndocs; for (k=0; k<2; k++,ub=j,i=0) { for (; i<ub; i++) { nchanged += opt_single(i, &model); #ifdef DEBUG check_inv(&model,ndocs); #endif } } inspect_all = 0; } else { /* greg's modification to keerthi, et al's modification 2 */ /* loop of optimizing all pairwise in a row with all elements * in I0 (just like above, but only those in I0) */ do { nchanged = 0; /* here's the continuous iup/ilow loop */ while (1) { if (!set_lookup(model.iup, &(model.I0))) { error[model.iup] = smo_evaluate_error(&model,model.iup); } if (!set_lookup(model.ilow, &(model.I0))) { error[model.ilow] = smo_evaluate_error(&model,model.ilow); } if (opt_pair(model.iup, model.ilow, &model)) { #ifdef DEBUG check_inv(&model,ndocs); #endif nchanged ++; } else { break; } if (model.bup > model.blow - 2*svm_epsilon_crit) break; } if (nchanged) { changed = 1; } nchanged = 0; /* now inspect all of the elements in I0 */ { int ub = ndocs; i=j=random() % ndocs; for (k=0; k<2; k++,ub=j,i=0) { for (; i<ub; i++) { if (set_lookup(i, &(model.I0))) { nchanged += opt_single(i, &model); #ifdef DEBUG check_inv(&model,ndocs); #endif } } } } } while (nchanged); /* of of the loop */ if (nchanged) { changed = 1; } inspect_all = 1; } /* note: both of the above blocks no when they are done so they flip inspect_all */ if (nchanged) { changed = 1; } } free_set(&model.I0); free_set(&model.I1); free_set(&model.I2); free_set(&model.I3); free_set(&model.I4); if (svm_weight_style == WEIGHTS_PER_MODEL) { kcache_clear(); } if (svm_verbosity > 3) fprintf(stderr,"\n"); //printf("bup=%f, blow=%f\n",model.bup,model.blow); *a_b = (model.bup + model.blow) / 2; if (svm_kernel_type == 0) { for (i=j=0; i<num_words; i++) { if (model.W[i] != 0.0) j++; } } //printf("m1: %d, m2: %d, m3: %d, m4: %d", m1,m2,m3,m4); *nsv = model.nsv; return (changed); }
/* this function is only called when all examples are being queried (ie. * the examine_all phase). */ int opt_single(int ex2, struct svm_smo_model *ms) { double *error; int ndocs; double *weights; int *yvect; double a2; double e2; int y2; ms->n_single_tot ++; error = ms->error; ndocs = ms->ndocs; weights = ms->weights; yvect = ms->yvect; y2 = ms->yvect[ex2]; a2 = weights[ex2]; if (set_lookup(ex2, &(ms->I0))) { e2 = error[ex2]; } else { e2 = error[ex2] = smo_evaluate_error(ms, ex2); if (set_lookup(ex2, &(ms->I1)) || set_lookup(ex2, &(ms->I2))) { if (e2 < ms->bup) { ms->iup = ex2; ms->bup = e2; } } else if (!set_lookup(ex2, &(ms->I0))) { /* must be in I3 orI4 */ if (e2 > ms->blow) { ms->ilow = ex2; ms->blow = e2; } } } { int opt=1; int ex1; if (set_lookup(ex2, &(ms->I0)) || set_lookup(ex2, &(ms->I1)) || set_lookup(ex2, &(ms->I2))) { if (ms->blow-e2 > 2*svm_epsilon_crit) { opt = 0; ex1 = ms->ilow; } } if (set_lookup(ex2, &(ms->I0)) || set_lookup(ex2, &(ms->I3)) || set_lookup(ex2, &(ms->I4))) { if (e2-ms->bup > 2*svm_epsilon_crit) { opt = 0; ex1 = ms->iup; } } if (opt == 1) { kcache_age(); return 0; } /* if we get here, then opt was == 1 & ex1 was valid */ if (set_lookup(ex2, &(ms->I0))) { if (ms->blow > 2*e2 - ms->bup) { ex1 = ms->ilow; } else { ex1 = ms->iup; } } if (!set_lookup(ex1, &(ms->I0))) { /* not in the cache & it needs to be */ error[ex1] = smo_evaluate_error(ms, ex1); } kcache_age(); if (opt_pair(ex1, ex2, ms)) { ms->n_single_suc ++; return 1; } else { return 0; } } }
int main(int argc, char **argv) { unsigned long long vaddr; char *endp; addrxlat_ctx_t *ctx; addrxlat_cb_t cb = { .read32 = read32, .read64 = read64, .read_caps = (ADDRXLAT_CAPS(ADDRXLAT_MACHPHYSADDR) | ADDRXLAT_CAPS(ADDRXLAT_KVADDR)) }; addrxlat_meth_t pgt, linear, lookup, memarr, *meth; int opt; unsigned long refcnt; int rc; ctx = NULL; meth = NULL; pgt.kind = ADDRXLAT_PGT; pgt.target_as = ADDRXLAT_MACHPHYSADDR; pgt.param.pgt.root.as = ADDRXLAT_NOADDR; pgt.param.pgt.root.addr = 0; linear.kind = ADDRXLAT_LINEAR; linear.target_as = ADDRXLAT_MACHPHYSADDR; linear.param.linear.off = 0; lookup.kind = ADDRXLAT_LOOKUP; lookup.target_as = ADDRXLAT_MACHPHYSADDR; lookup.param.lookup.endoff = 0; memarr.kind = ADDRXLAT_MEMARR; memarr.target_as = ADDRXLAT_MACHPHYSADDR; memarr.param.memarr.base.as = ADDRXLAT_NOADDR; while ((opt = getopt_long(argc, argv, "he:f:l:m:pr:t:", opts, NULL)) != -1) { switch (opt) { case 'f': meth = &pgt; rc = set_paging_form(&meth->param.pgt.pf, optarg); if (rc != TEST_OK) return rc; break; case 'r': meth = &pgt; rc = set_root(&meth->param.pgt.root, optarg); if (rc != TEST_OK) return rc; break; case 'e': rc = add_entry(optarg); if (rc != TEST_OK) return rc; break; case 'l': meth = &linear; rc = set_linear(&meth->param.linear.off, optarg); if (rc != TEST_OK) return rc; break; case 'm': meth = &memarr; rc = set_memarr(&meth->param.memarr, optarg); if (rc != TEST_OK) return rc; break; case 'p': meth = &pgt; break; case 't': meth = &lookup; rc = set_lookup(&meth->param.lookup.endoff, optarg); if (rc != TEST_OK) return rc; break; case 'h': default: usage(argv[0]); rc = (opt == 'h') ? TEST_OK : TEST_ERR; goto out; } } if (meth == NULL) { fputs("No translation method specified\n", stderr); return TEST_ERR; } if (argc - optind != 1 || !*argv[optind]) { fprintf(stderr, "Usage: %s <addr>\n", argv[0]); return TEST_ERR; } vaddr = strtoull(argv[optind], &endp, 0); if (*endp) { fprintf(stderr, "Invalid address: %s\n", argv[optind]); return TEST_ERR; } lookup.param.lookup.nelem = nentries; lookup.param.lookup.tbl = entries; ctx = addrxlat_ctx_new(); if (!ctx) { perror("Cannot initialize address translation context"); rc = TEST_ERR; goto out; } cb.data = ctx; addrxlat_ctx_set_cb(ctx, &cb); rc = do_xlat(ctx, meth, vaddr); out: if (ctx && (refcnt = addrxlat_ctx_decref(ctx)) != 0) fprintf(stderr, "WARNING: Leaked %lu addrxlat references\n", refcnt); return rc; }
int makeflow_clean(struct dag *d, struct batch_queue *queue, makeflow_clean_depth clean_depth)//, struct makeflow_wrapper *w, struct makeflow_monitor *m) { struct dag_file *f; char *name; hash_table_firstkey(d->files); while(hash_table_nextkey(d->files, &name, (void **) &f)) { int silent = 1; if(dag_file_should_exist(f)) silent = 0; /* We have a record of the file, but it is no longer created or used so delete */ if(dag_file_is_source(f) && dag_file_is_sink(f) && !set_lookup(d->inputs, f)) makeflow_clean_file(d, queue, f, silent); if(dag_file_is_source(f)) { if(f->source && (clean_depth == MAKEFLOW_CLEAN_CACHE || clean_depth == MAKEFLOW_CLEAN_ALL)) { /* this file is specified in the mountfile */ if(makeflow_clean_mount_target(f->filename)) { fprintf(stderr, "Failed to remove %s!\n", f->filename); return -1; } } continue; } if(clean_depth == MAKEFLOW_CLEAN_ALL) { makeflow_clean_file(d, queue, f, silent); } else if(set_lookup(d->outputs, f) && (clean_depth == MAKEFLOW_CLEAN_OUTPUTS)) { makeflow_clean_file(d, queue, f, silent); } else if(!set_lookup(d->outputs, f) && (clean_depth == MAKEFLOW_CLEAN_INTERMEDIATES)){ makeflow_clean_file(d, queue, f, silent); } } /* clean up the cache dir created due to the usage of mountfile */ if(clean_depth == MAKEFLOW_CLEAN_CACHE || clean_depth == MAKEFLOW_CLEAN_ALL) { if(d->cache_dir && unlink_recursive(d->cache_dir)) { fprintf(stderr, "Failed to clean up the cache dir (%s) created due to the usage of the mountfile!\n", d->cache_dir); dag_mount_clean(d); return -1; } dag_mount_clean(d); } struct dag_node *n; for(n = d->nodes; n; n = n->next) { /* If the node is a Makeflow job, then we should recursively call the * * clean operation on it. */ if(n->nested_job) { char *command = xxmalloc(sizeof(char) * (strlen(n->command) + 4)); sprintf(command, "%s -c", n->command); /* XXX this should use the batch job interface for consistency */ makeflow_node_export_variables(d, n); system(command); free(command); } } return 0; }