struct makeflow_alloc * makeflow_alloc_traverse_to_node(struct makeflow_alloc *a, struct dag_node *n) { struct dag_node *node; struct makeflow_alloc *alloc1, *alloc2, *tmp; alloc1 = a; makeflow_alloc_print_stats(alloc1, "TRAVERSE"); list_first_item(n->footprint->residual_nodes); while((node = list_peek_current(n->footprint->residual_nodes))){ tmp = NULL; list_first_item(alloc1->residuals); while((alloc2 = list_next_item(alloc1->residuals))){ if(alloc2->nodeid == node->nodeid){ tmp = alloc2; break; } } if(tmp){ alloc1 = tmp; makeflow_alloc_print_stats(alloc1, "TRAVERSE"); }else{ break; } list_next_item(n->footprint->residual_nodes); } return alloc1; }
/** Deletes task struct and frees contained data. */ void batch_task_delete(struct batch_task *t) { if (!t) return; free(t->command); struct batch_file *f; list_first_item(t->input_files); while((f = list_next_item(t->input_files))){ batch_file_delete(f); } list_delete(t->input_files); list_first_item(t->output_files); while((f = list_next_item(t->output_files))){ batch_file_delete(f); } list_delete(t->output_files); rmsummary_delete(t->resources); jx_delete(t->envlist); batch_job_info_delete(t->info); free(t); }
struct catalog_query *catalog_query_create(const char *hosts, struct jx *filter_expr, time_t stoptime) { struct catalog_query *q = NULL; char *n; struct catalog_host *h; struct list *sorted_hosts = catalog_query_sort_hostlist(hosts); int backoff_interval = 1; list_first_item(sorted_hosts); while(time(NULL) < stoptime) { if(!(h = list_next_item(sorted_hosts))) { list_first_item(sorted_hosts); sleep(backoff_interval); int max_backoff_interval = MAX(0, stoptime - time(NULL)); backoff_interval = MIN(backoff_interval * 2, max_backoff_interval); continue; } struct jx *j = catalog_query_send_query(h->url, time(NULL) + 5); if(j) { q = xxmalloc(sizeof(*q)); q->data = j; q->current = j->u.items; q->filter_expr = filter_expr; if(h->down) { debug(D_DEBUG,"catalog server at %s is back up", h->host); set_first_element(down_hosts); while((n = set_next_element(down_hosts))) { if(!strcmp(n, h->host)) { free(n); set_remove(down_hosts, n); break; } } } break; } else { if(!h->down) { debug(D_DEBUG,"catalog server at %s seems to be down", h->host); set_insert(down_hosts, xxstrdup(h->host)); } } } list_first_item(sorted_hosts); while((h = list_next_item(sorted_hosts))) { free(h->host); free(h->url); free(h); } list_delete(sorted_hosts); return q; }
void print_stats(struct list *masters, struct list *foremen, int submitted, int needed, int requested) { struct timeval tv; struct tm *tm; gettimeofday(&tv, 0); tm = localtime(&tv.tv_sec); needed = needed > 0 ? needed : 0; requested = requested > 0 ? requested : 0; fprintf(stdout, "%04d/%02d/%02d %02d:%02d:%02d: " "|submitted: %d |needed: %d |requested: %d \n", tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, submitted, needed, requested); int master_count = 0; master_count += masters ? list_size(masters) : 0; master_count += foremen ? list_size(foremen) : 0; if(master_count < 1) { fprintf(stdout, "No change this cycle.\n\n"); return; } nvpair_print_table_header(stdout, queue_headers); struct nvpair *nv; if(masters && list_size(masters) > 0) { fprintf(stdout, "masters:\n"); list_first_item(masters); while((nv = list_next_item(masters))) { nvpair_print_table(nv, stdout, queue_headers); } } if(foremen && list_size(foremen) > 0) { fprintf(stdout, "foremen:\n"); list_first_item(foremen); while((nv = list_next_item(foremen))) { nvpair_print_table(nv, stdout, queue_headers); } } fprintf(stdout, "\n"); }
void dag_node_footprint_set_desc_res_wgt_diff(struct dag_node *n) { struct dag_node *node1, *node2; set_first_element(n->footprint->direct_children); while((node1 = set_next_element(n->footprint->direct_children))){ node2 = list_peek_current(node1->footprint->residual_nodes); /* Add the last residual's residual and terminal files in the branch to the current residual files */ set_insert_set(n->footprint->residual_files, node2->footprint->residual_files); set_insert_set(n->footprint->residual_files, node2->footprint->terminal_files); /* Add the last residual's residual and terminal files in the branch to the branch's first node residual files */ set_insert_set(node1->footprint->res_files, node2->footprint->residual_files); set_insert_set(node1->footprint->res_files, node2->footprint->terminal_files); /* Set branch head's res size */ node1->footprint->res = dag_file_set_size(node1->footprint->res_files); set_insert_set(node1->footprint->wgt_files, node2->footprint->footprint_min_files); node1->footprint->wgt = node2->footprint->footprint_min_size; set_insert_set(node1->footprint->max_wgt_files, node2->footprint->footprint_max_files); node1->footprint->max_wgt = node2->footprint->footprint_max_size; list_next_item(node1->footprint->residual_nodes); while((node2 = list_peek_current(node1->footprint->residual_nodes))){ if(node2->footprint->footprint_min_size >= node1->footprint->wgt){ set_delete(node1->footprint->wgt_files); node1->footprint->wgt_files = set_duplicate(node2->footprint->footprint_min_files); node1->footprint->wgt = node2->footprint->footprint_min_size; } if(node2->footprint->footprint_max_size >= node1->footprint->max_wgt){ set_delete(node1->footprint->max_wgt_files); node1->footprint->max_wgt_files = set_duplicate(node2->footprint->footprint_max_files); node1->footprint->max_wgt = node2->footprint->footprint_max_size; } list_next_item(node1->footprint->residual_nodes); } } n->footprint->residual_size = dag_file_set_size(n->footprint->residual_files); set_first_element(n->footprint->direct_children); while((node1 = set_next_element(n->footprint->direct_children))){ node1->footprint->diff = node1->footprint->wgt - node1->footprint->res; } }
const char *construct_label(struct rmonitor_file_watch_info *f) { struct rmonitor_file_watch_event *e; static buffer_t *b = NULL; if(!b) { b = malloc(sizeof(*b)); buffer_init(b); } buffer_rewind(b, 0); int event_count = 0; char *sep = ""; list_first_item(f->events); while((e = list_next_item(f->events))) { if(e->cycle_count > 0) { e->total_count += e->cycle_count; event_count += e->cycle_count; buffer_printf(b, "%s%s(%" PRId64 ")", sep, e->label, e->cycle_count); sep = ","; } } if(event_count) { return buffer_tostring(b); } else { return NULL; } }
static int upload_input_files_to_s3(char* files,char* jobname){ int success = 1; char* env_var = initialized_data.master_env_prefix; struct list* file_list = extract_file_names_from_list(files); debug(D_BATCH,"extra input files list: %s, len: %i",files, list_size(file_list)); list_first_item(file_list); char* cur_file = NULL; while((cur_file = list_next_item(file_list)) != NULL){ if(hash_table_lookup(submitted_files,cur_file) == &HAS_SUBMITTED_VALUE){ continue; } debug(D_BATCH,"Submitting file: %s",cur_file); char* put_file_command = string_format("tar -cvf %s.txz %s && %s aws s3 cp %s.txz s3://%s/%s.txz ",cur_file,cur_file,env_var,cur_file,bucket_name,cur_file); int ret = sh_system(put_file_command); if(ret != 0){ debug(D_BATCH,"File Submission: %s FAILURE return code: %i",cur_file,ret); success = 0; }else{ debug(D_BATCH,"File Submission: %s SUCCESS return code: %i",cur_file,ret); } free(put_file_command); put_file_command = string_format("rm %s.txz",cur_file); sh_system(put_file_command); free(put_file_command); //assume everything went well? hash_table_insert(submitted_files,cur_file,&HAS_SUBMITTED_VALUE); } list_free(file_list); list_delete(file_list); return success; }
void denormalize_summaries(struct list *summaries) { struct summary *s; list_first_item(summaries); while( (s = list_next_item(summaries)) ) denormalize_summary(s); }
static char* generate_s3_cp_cmds(char* files, char* src, char* dst){ char* env_var = initialized_data.master_env_prefix; struct list* file_list = extract_file_names_from_list(files); list_first_item(file_list); char* new_cmd=malloc(sizeof(char)*1); new_cmd[0]='\0'; if(list_size(file_list)> 0){ char* copy_cmd_prefix = string_format("%s aws s3 cp ", env_var); char* cur_file = NULL; while((cur_file=list_next_item(file_list)) != NULL){ char* tmp; if(strstr(dst,"s3")){ tmp = string_format("tar -cvf %s.txz %s && %s %s/%s.txz %s/%s.txz",cur_file,cur_file,copy_cmd_prefix, src, cur_file, dst, cur_file); }else{ tmp = string_format("%s %s/%s.txz %s/%s.txz && tar -xvf %s.txz",copy_cmd_prefix, src, cur_file, dst, cur_file, cur_file); } char* tmp2 = string_format("%s\n%s\n",new_cmd,tmp); free(new_cmd); free(tmp); new_cmd = tmp2; } } list_free(file_list); list_delete(file_list); return new_cmd; }
void dag_node_footprint_find_largest_residual(struct dag_node *n, struct dag_node *limit) { struct dag_node *node1; list_first_item(n->footprint->residual_nodes); node1 = list_peek_current(n->footprint->residual_nodes); if(n != node1){ n->footprint->residual_size = node1->footprint->residual_size; set_delete(n->footprint->residual_files); n->footprint->residual_files = set_duplicate(node1->footprint->residual_files); } while((node1 = list_next_item(n->footprint->residual_nodes)) && (!limit || node1 != limit)){ if(node1->footprint->footprint_min_size > n->footprint->footprint_min_size){ set_delete(n->footprint->footprint_min_files); n->footprint->footprint_min_size = node1->footprint->footprint_min_size; n->footprint->footprint_min_files = set_duplicate(node1->footprint->footprint_min_files); } if(node1->footprint->footprint_max_size > n->footprint->footprint_max_size){ set_delete(n->footprint->footprint_max_files); n->footprint->footprint_max_size = node1->footprint->footprint_max_size; n->footprint->footprint_max_files = set_duplicate(node1->footprint->footprint_max_files); } } }
uint64_t makeflow_alloc_node_size( struct makeflow_alloc *a, struct dag_node *cur_node, struct dag_node *n) { uint64_t alloc_size; uint64_t freed_space = 0; struct dag_file *f; if(n->footprint->footprint_min_type != DAG_NODE_FOOTPRINT_RUN){ list_first_item(n->source_files); while((f = list_next_item(n->source_files))){ if(f->reference_count == 1){ freed_space += dag_file_size(f); } } } switch(a->enabled){ case MAKEFLOW_ALLOC_TYPE_OUT: alloc_size = n->footprint->target_size; break; case MAKEFLOW_ALLOC_TYPE_MIN: alloc_size = cur_node->footprint->footprint_min_size - freed_space; break; case MAKEFLOW_ALLOC_TYPE_MAX: alloc_size = cur_node->footprint->footprint_max_size - freed_space; break; default: alloc_size = 0; } return alloc_size; }
/* This finds the intersect of all of the children lists. This intersect forms the basis for the parents residual nodes as all sub-branches will culminate in the listed nodes. */ void dag_node_footprint_determine_desc_residual_intersect(struct dag_node *n) { struct dag_node *node1, *node2; int comp = 1; int index = 0; while(comp){ index++; node1 = set_next_element(n->footprint->direct_children); // Get first child node1 = list_peek_current(node1->footprint->residual_nodes); // Grab next node in its list while((node2 = set_next_element(n->footprint->direct_children))){ // Loop over remaining children node2 = list_peek_current(node2->footprint->residual_nodes); /* We mark when the nodes are no longer comparable, but do not break as we need all of the lists to be in the first non-shared location for future use. */ if(!node1 || !node2 || (node1 != node2)) comp = 0; } set_first_element(n->footprint->direct_children); /* Only add the node if it occurred in all of the branch lists. */ if(comp){ list_push_tail(n->footprint->residual_nodes, node1); //res_node = node1; /* Advance all direct_children forward one residual. */ while((node1 = set_next_element(n->footprint->direct_children))){ list_next_item(node1->footprint->residual_nodes); } set_first_element(n->footprint->direct_children); } } }
void dag_node_footprint_prepare_node_terminal_files(struct dag_node *n) { struct dag_file *f; list_first_item(n->target_files); while((f = list_next_item(n->target_files))){ if(f->type == DAG_FILE_TYPE_OUTPUT){ set_push(n->footprint->terminal_files, f); } set_push(n->footprint->coexist_files, f); } struct dag_node *node1; set_first_element(n->ancestors); while((node1 = set_next_element(n->ancestors))){ set_insert_set(n->footprint->terminal_files, node1->footprint->terminal_files); set_first_element(node1->footprint->coexist_files); while((f = set_next_element(node1->footprint->coexist_files))){ if(dag_file_coexist_files(n->footprint->accounted, f)) set_push(n->footprint->coexist_files, f); } } set_first_element(n->descendants); while((node1 = set_next_element(n->descendants))){ node1->footprint->terminal_updated--; if(node1->footprint->terminal_updated <= 0) dag_node_footprint_prepare_node_terminal_files(node1); } }
/* The disk needed by a task is shared between the cache and the process * sandbox. To account for this overlap, the sandbox size is computed from the * stated task size minus those files in the cache directory (i.e., input * files). In this way, we can only measure the size of the sandbox when * enforcing limits on the process, as a task should never write directly to * the cache. */ void work_queue_process_compute_disk_needed( struct work_queue_process *p ) { struct work_queue_task *t = p->task; struct work_queue_file *f; struct stat s; p->disk = t->resources_requested->disk; /* task did not specify its disk usage. */ if(p->disk < 0) return; if(t->input_files) { list_first_item(t->input_files); while((f = list_next_item(t->input_files))) { if(f->type != WORK_QUEUE_FILE && f->type != WORK_QUEUE_FILE_PIECE) continue; if(stat(f->cached_name, &s) < 0) continue; /* p->disk is in MD, st_size in bytes. */ p->disk -= s.st_size/MEGA; } } if(p->disk < 0) { p->disk = -1; } }
static void export_environment( struct work_queue_process *p ) { struct list *env_list = p->task->env_list; char *name; list_first_item(env_list); while((name=list_next_item(env_list))) { char *value = strchr(name,'='); if(value) { *value = 0; setenv(name,value+1,1); *value='='; } else { /* Without =, we remove the variable */ unsetenv(name); } } /* we set TMPDIR after env_list on purpose. We do not want a task writing * to some other tmp dir. */ if(p->tmpdir) { setenv("TMPDIR", p->tmpdir, 1); setenv("TEMP", p->tmpdir, 1); setenv("TMP", p->tmpdir, 1); } }
void reset_events_counts(struct rmonitor_file_watch_info *f) { struct rmonitor_file_watch_event *e; // reset counts for cycle list_first_item(f->events); while((e = list_next_item(f->events))) { e->cycle_count = 0; } }
int makeflow_alloc_check_space( struct makeflow_alloc *a, struct dag_node *n) { uint64_t start = timestamp_get(); if(!a){ dynamic_alloc += timestamp_get() - start; makeflow_alloc_print_stats(a, "CHECK FAIL NON-EXIST"); return 0; } makeflow_alloc_print_stats(a, "CHECK"); if(a->enabled == MAKEFLOW_ALLOC_TYPE_OFF){ dynamic_alloc += timestamp_get() - start; makeflow_alloc_print_stats(a, "CHECK SUCCESS"); return 1; } struct dag_node *node1; struct makeflow_alloc *alloc1, *alloc2; alloc1 = makeflow_alloc_traverse_to_node(a, n); if(alloc1->nodeid == n->nodeid){ if(a->enabled != MAKEFLOW_ALLOC_TYPE_OUT && (alloc1->storage->free < n->footprint->target_size)){ dynamic_alloc += timestamp_get() - start; //printf("%d\t", n->nodeid); makeflow_alloc_print_stats(alloc1, "CHECK FAIL PRE-ALLOC"); return 0; } dynamic_alloc += timestamp_get() - start; makeflow_alloc_print_stats(alloc1, "CHECK SUCCESS"); return 1; } while((node1 = list_peek_current(n->footprint->residual_nodes))){ alloc2 = makeflow_alloc_create(node1->nodeid, alloc1, 0, 0, a->enabled); if(!(makeflow_alloc_try_grow_alloc(alloc2, makeflow_alloc_node_size(a, node1, n)) || (n == node1 && set_size(n->descendants) < 2 && makeflow_alloc_try_grow_alloc(alloc2, node1->footprint->self_res)))){ dynamic_alloc += timestamp_get() - start; //printf("%d\t%"PRIu64"\t", n->nodeid, makeflow_alloc_node_size(a, node1, n)); makeflow_alloc_print_stats(alloc1, "CHECK FAIL NON-FIT"); makeflow_alloc_delete(alloc2); return 0; } makeflow_alloc_delete(alloc2); list_next_item(n->footprint->residual_nodes); } dynamic_alloc += timestamp_get() - start; makeflow_alloc_print_stats(alloc1, "CHECK SUCCESS"); return 1; }
int makeflow_alloc_commit_space( struct makeflow_alloc *a, struct dag_node *n) { uint64_t start = timestamp_get(); if(!a) return 0; makeflow_alloc_print_stats(a, "COMMIT"); if(a->enabled == MAKEFLOW_ALLOC_TYPE_OFF) return 1; struct dag_node *node1; struct makeflow_alloc *alloc1, *alloc2; alloc1 = makeflow_alloc_traverse_to_node(a, n); if(alloc1->nodeid == n->nodeid && (a->enabled == MAKEFLOW_ALLOC_TYPE_OUT)){ if(!(makeflow_alloc_grow_alloc(alloc1, makeflow_alloc_node_size(a, n, n)))){ dynamic_alloc += timestamp_get() - start; return 0; } } else if(alloc1->nodeid == n->nodeid){ if(alloc1->storage->free < n->footprint->target_size){ dynamic_alloc += timestamp_get() - start; return 0; } } else { while((node1 = list_peek_current(n->footprint->residual_nodes))){ alloc2 = makeflow_alloc_create(node1->nodeid, alloc1, 0, 0, a->enabled); if(!(makeflow_alloc_grow_alloc(alloc2, makeflow_alloc_node_size(a, node1, n)) || (n == node1 && set_size(n->descendants) < 2 && makeflow_alloc_grow_alloc(alloc2, node1->footprint->self_res)))){ dynamic_alloc += timestamp_get() - start; return 0; } list_push_tail(alloc1->residuals, alloc2); alloc1 = alloc2; list_next_item(n->footprint->residual_nodes); } } alloc1->storage->greedy += n->footprint->target_size; alloc1->storage->free -= n->footprint->target_size; makeflow_alloc_print_stats(alloc1, "GREEDY"); alloc1 = alloc1->parent; while(alloc1){ alloc1->storage->greedy += n->footprint->target_size; alloc1->storage->commit -= n->footprint->target_size; makeflow_alloc_print_stats(alloc1, "GREEDY"); alloc1 = alloc1->parent; } dynamic_alloc += timestamp_get() - start; return 1; }
void dag_node_print_node_list(struct list *s, FILE *out, char *t) { if(!s){ fprintf(out, "\\{\\}%s", t); return; } list_first_item(s); struct dag_node *n; if(list_size(s) == 0){ fprintf(out, "\\{\\}%s", t); } else { n = list_next_item(s); fprintf(out, "\\{%d", n->nodeid); while((n = list_next_item(s))){ fprintf(out, ",%d", n->nodeid); } fprintf(out, "\\}%s", t); } }
int set_insert_list(struct set *s, struct list *l) { list_first_item(l); int additions = 0; const void *element; while((element = list_next_item(l))){ additions += set_insert(s, element); } return additions; }
struct list *get_masters_from_catalog(const char *catalog_host, int catalog_port, struct list *regex_list) { struct catalog_query *q; struct nvpair *nv; struct list *ml; struct work_queue_master *m; char *regex; time_t timeout = 60, stoptime; stoptime = time(0) + timeout; q = catalog_query_create(catalog_host, catalog_port, stoptime); if(!q) { fprintf(stderr, "Failed to query catalog server at %s:%d\n", catalog_host, catalog_port); return NULL; } ml = list_create(); if(!ml) return NULL; while((nv = catalog_query_read(q, stoptime))) { if(strcmp(nvpair_lookup_string(nv, "type"), CATALOG_TYPE_WORK_QUEUE_MASTER) == 0) { m = parse_work_queue_master_nvpair(nv); if(m) { if(regex_list) { // Matched preferred masters int match_found = 0; list_first_item(regex_list); while((regex = (char *)list_next_item(regex_list))) { if(whole_string_match_regex(m->proj, regex)) { debug(D_WQ, "Master matched: %s -> %s\n", regex, m->proj); list_push_head(ml, m); match_found = 1; break; } } if(match_found == 0) { free_work_queue_master(m); } } else { list_push_head(ml, m); } } else { fprintf(stderr, "Failed to parse a work queue master record!\n"); } } nvpair_delete(nv); } // Must delete the query otherwise it would occupy 1 tcp connection forever! catalog_query_delete(q); return ml; }
struct work_queue_task * ap_task_create( struct text_list *seta, struct text_list *setb ) { int x,y; char *buf, *name; if(xcurrent>=xstop) { xcurrent=0; ycurrent+=yblock; } if(ycurrent>=ystop) return 0; char cmd[ALLPAIRS_LINE_MAX]; sprintf(cmd,"./%s -e \"%s\" A B %s%s",string_basename(allpairs_multicore_program),extra_arguments,use_external_program ? "./" : "",string_basename(allpairs_compare_program)); struct work_queue_task *task = work_queue_task_create(cmd); if(use_external_program) { work_queue_task_specify_file(task,allpairs_compare_program,string_basename(allpairs_compare_program),WORK_QUEUE_INPUT,WORK_QUEUE_CACHE); } work_queue_task_specify_file(task,allpairs_multicore_program,string_basename(allpairs_multicore_program),WORK_QUEUE_INPUT,WORK_QUEUE_CACHE); const char *f; list_first_item(extra_files_list); while((f = list_next_item(extra_files_list))) { work_queue_task_specify_file(task,f,string_basename(f),WORK_QUEUE_INPUT,WORK_QUEUE_CACHE); } buf = text_list_string(seta,xcurrent,xcurrent+xblock); work_queue_task_specify_buffer(task,buf,strlen(buf),"A",WORK_QUEUE_NOCACHE); free(buf); buf = text_list_string(setb,ycurrent,ycurrent+yblock); work_queue_task_specify_buffer(task,buf,strlen(buf),"B",WORK_QUEUE_NOCACHE); free(buf); for(x=xcurrent;x<(xcurrent+xblock);x++) { name = text_list_get(seta,x); if(!name) break; work_queue_task_specify_file(task,name,string_basename(name),WORK_QUEUE_INPUT,WORK_QUEUE_CACHE); } for(y=ycurrent;y<(ycurrent+yblock);y++) { name = text_list_get(setb,y); if(!name) break; work_queue_task_specify_file(task,name,string_basename(name),WORK_QUEUE_INPUT,WORK_QUEUE_CACHE); } /* advance to the next row/column */ xcurrent += xblock; return task; }
void free_work_queue_master_list(struct list *ml) { if(!ml) return; struct work_queue_master *m; list_first_item(ml); while((m = (struct work_queue_master *)list_next_item(ml))) { free_work_queue_master(m); } list_delete(ml); }
static int count_workers_needed( struct list *masters_list, int only_waiting ) { int needed_workers=0; int masters=0; struct jx *j; if(!masters_list) { return needed_workers; } list_first_item(masters_list); while((j=list_next_item(masters_list))) { const char *project =jx_lookup_string(j,"project"); const char *host = jx_lookup_string(j,"name"); const int port = jx_lookup_integer(j,"port"); const char *owner = jx_lookup_string(j,"owner"); const int tr = jx_lookup_integer(j,"tasks_on_workers"); const int tw = jx_lookup_integer(j,"tasks_waiting"); const int tl = jx_lookup_integer(j,"tasks_left"); int tasks = tr+tw+tl; // first assume one task per worker int need; if(only_waiting) { need = tw; } else { need = tasks; } // enforce many tasks per worker if(tasks_per_worker > 0) { need = DIV_INT_ROUND_UP(need, tasks_per_worker); } // consider if tasks declared resources... need = MAX(need, master_workers_needed_by_resource(j)); int capacity = master_workers_capacity(j); if(consider_capacity && capacity > 0) { need = MIN(need, capacity); } debug(D_WQ,"%s %s:%d %s %d %d %d",project,host,port,owner,tasks,capacity,need); needed_workers += need; masters++; } return needed_workers; }
//Useful for debugging: void lexer_print_queue(struct lexer *lx) { struct token *t; debug(D_MAKEFLOW_LEXER, "Queue: "); list_first_item(lx->token_queue); while((t = list_next_item(lx->token_queue))) debug(D_MAKEFLOW_LEXER, "%s", lexer_print_token(t)); list_first_item(lx->token_queue); debug(D_MAKEFLOW_LEXER, "End queue."); }
/** * returns the depth of the given DAG. */ int dag_depth(struct dag *d) { struct dag_node *n, *parent; struct dag_file *f; struct list *level_unsolved_nodes = list_create(); for(n = d->nodes; n != NULL; n = n->next) { n->level = 0; list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if((parent = f->target_of) != NULL) { n->level = -1; list_push_tail(level_unsolved_nodes, n); break; } } } int max_level = 0; while((n = (struct dag_node *) list_pop_head(level_unsolved_nodes)) != NULL) { list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if((parent = f->target_of) != NULL) { if(parent->level == -1) { n->level = -1; list_push_tail(level_unsolved_nodes, n); break; } else { int tmp_level = parent->level + 1; n->level = n->level > tmp_level ? n->level : tmp_level; max_level = n->level > max_level ? n->level : max_level; } } } } list_delete(level_unsolved_nodes); return max_level + 1; }
static void export_environment( struct list *env_list ) { char *name; list_first_item(env_list); while((name=list_next_item(env_list))) { char *value = strchr(name,'='); if(value) { *value = 0; setenv(name,value+1,1); *value='='; } } }
int at_least_one_event_still_active(struct rmonitor_file_watch_info *f) { struct rmonitor_file_watch_event *e; int at_least_one_active = 0; list_first_item(f->events); while((e = list_next_item(f->events))) { if(e->max_count < 0 || e->total_count < e->max_count) { at_least_one_active = 1; break; } } return at_least_one_active; }
void debug_print_masters(struct list *ml) { struct work_queue_master *m; int count = 0; char timestr[1024]; list_first_item(ml); while((m = (struct work_queue_master *) list_next_item(ml))) { if(timestamp_fmt(timestr, sizeof(timestr), "%R %b %d, %Y", (timestamp_t)(m->start_time)*1000000) == 0) { strcpy(timestr, "unknown time"); } debug(D_WQ, "%d:\t%s@%s:%d started on %s\n", ++count, m->proj, m->addr, m->port, timestr); } }
void remove_created_files() { char *filename; int i = 0; list_first_item(created_files); while((filename = (char *)list_next_item(created_files))) { if(unlink(filename) == 0) { printf("File removed: %s\n", filename); i++; } } printf("%d created files are removed\n", i); list_free(created_files); list_delete(created_files); }