struct list *lexer_read_command_aux(struct lexer *lx) { int spaces_deleted = lexer_discard_white_space(lx); struct list *tokens = list_create(); //Preserve space in substitutions. if(spaces_deleted && lx->depth > 0) { list_push_tail(tokens, lexer_pack_token(lx, TOKEN_SPACE)); } /* Read all command tokens. Note that we read from lx, but put in lx_c. */ while(1) { struct token *t = lexer_read_command_argument(lx); if(!t) break; if(t->type == TOKEN_SUBSTITUTION) { tokens = list_splice(tokens, lexer_expand_substitution(lx, t, lexer_read_command_aux)); lexer_free_token(t); continue; } else { list_push_tail(tokens, t); if(t->type==TOKEN_NEWLINE) break; } } return tokens; }
int submit_task_series(struct work_queue *q, struct task_series *ts, int series_id) { char input_file[128], output_file[128], command[256]; char gen_input_cmd[256]; sprintf(input_file, "input-%d", series_id); list_push_tail(created_files, xxstrdup(input_file)); sprintf(gen_input_cmd, "dd if=/dev/zero of=%s bs=1M count=%d", input_file, ts->input_size); system(gen_input_cmd); // submit tasks to the queue int i; for(i = 0; i < ts->num_of_tasks; i++) { sprintf(output_file, "output-%d-%d", series_id, i); list_push_tail(created_files, xxstrdup(output_file)); sprintf(command, "dd if=/dev/zero of=%s bs=1M count=%d; sleep %d", output_file, ts->output_size, ts->execution_time); struct work_queue_task *t = work_queue_task_create(command); if (!work_queue_task_specify_file(t, input_file, input_file, WORK_QUEUE_INPUT, WORK_QUEUE_CACHE)) { printf("task_specify_file() failed for %s: check if arguments are null or remote name is an absolute path.\n", input_file); return 0; } if (!work_queue_task_specify_file(t, output_file, output_file, WORK_QUEUE_OUTPUT, WORK_QUEUE_NOCACHE)) { printf("task_specify_file() failed for %s: check if arguments are null or remote name is an absolute path.\n", output_file); return 0; } int taskid = work_queue_submit(q, t); printf("submitted task (id# %d): %s\n", taskid, t->command_line); } return 1; // success }
void mpi_queue_task_specify_file(struct mpi_queue_task *t, const char *name, int type) { struct mpi_queue_file *tf = malloc(sizeof(struct mpi_queue_file)); tf->type = MPI_QUEUE_FILE; tf->length = strlen(name); tf->name = strdup(name); if(type == MPI_QUEUE_INPUT) { list_push_tail(t->input_files, tf); } else { list_push_tail(t->output_files, tf); } }
void makeflow_wrapper_add_output_file( struct makeflow_wrapper *w, const char *file ) { char *f = xxstrdup(file); char *p = strchr(f, '='); if(p) w->uses_remote_rename = 1; list_push_tail(w->output_files, f); }
/** Creates new batch_file and adds to outputs. */ struct batch_file * batch_task_add_output_file(struct batch_task *task, const char * outer_name, const char * inner_name) { struct batch_file *f = batch_file_create(task->queue, outer_name, inner_name); list_push_tail(task->output_files, f); return f; }
void parse_summary_recursive(struct rmDsummary_set *dest, char *dirname, struct hash_table *categories) { FTS *hierarchy; FTSENT *entry; char *argv[] = {dirname, NULL}; hierarchy = fts_open(argv, FTS_PHYSICAL, NULL); if(!hierarchy) fatal("fts_open error: %s\n", strerror(errno)); struct rmDsummary *s; while( (entry = fts_read(hierarchy)) ) if( S_ISREG(entry->fts_statp->st_mode) && strstr(entry->fts_name, RULE_SUFFIX) ) //bug: no links { FILE *stream; stream = fopen(entry->fts_accpath, "r"); if(!stream) fatal("Cannot open resources summary file: %s : %s\n", entry->fts_accpath, strerror(errno)); while((s = parse_summary(stream, entry->fts_path, categories))) list_push_tail(dest->summaries, s); fclose(stream); } fts_close(hierarchy); }
void process_putback(struct process_info *p) { if(!complete_list) complete_list = list_create(); list_push_tail(complete_list, p); }
//return 1 if name was processed as special variable, 0 otherwise int dag_parse_process_special_variable(struct lexer_book *bk, struct dag_node *n, int nodeid, char *name, const char *value) { struct dag *d = bk->d; int special = 0; if(strcmp(RESOURCES_CATEGORY, name) == 0) { special = 1; /* If we have never seen this label, then create * a new category, otherwise retrieve the category. */ struct dag_task_category *category = dag_task_category_lookup_or_create(d, value); /* If we are parsing inside a node, make category * the category of the node, but do not update * the global task_category. Else, update the * global task category. */ if(n) { /* Remove node from previous category...*/ list_pop_tail(n->category->nodes); n->category = category; /* and add it to the new one */ list_push_tail(n->category->nodes, n); debug(D_DEBUG, "Updating category '%s' for rule %d.\n", value, n->nodeid); } else bk->category = category; } /* else if some other special variable .... */ /* ... */ return special; }
void initialize_watch_events(struct rmonitor_file_watch_info *f, struct jx *watch_spec) { struct jx *events_array = jx_lookup(watch_spec, "events"); if(!events_array) { fatal("File watch for '%s' did not define any events", f->filename); } if(!jx_istype(events_array, JX_ARRAY)) { fatal("Value for key 'events' in file watch for '%s' is not an array.", f->filename); } f->events = list_create(0); struct jx *event_spec; int error = 0; for (void *i = NULL; (event_spec = jx_iterate_array(events_array, &i));) { struct rmonitor_file_watch_event *e = parse_event(f->filename, event_spec); if(e) { if(e->on_pattern) { // at least one event defines a pattern, thus we need line by // line processing. f->event_with_pattern = 1; } list_push_tail(f->events, e); debug(D_RMON, "Added event for file '%s', label '%s', max_count %" PRId64, f->filename, e->label, e->max_count); } else { error = 1; } } if(error) { fatal("Error parsing file watch for '%s'.", f->filename); } }
/* This finds the intersect of all of the children lists. This intersect forms the basis for the parents residual nodes as all sub-branches will culminate in the listed nodes. */ void dag_node_footprint_determine_desc_residual_intersect(struct dag_node *n) { struct dag_node *node1, *node2; int comp = 1; int index = 0; while(comp){ index++; node1 = set_next_element(n->footprint->direct_children); // Get first child node1 = list_peek_current(node1->footprint->residual_nodes); // Grab next node in its list while((node2 = set_next_element(n->footprint->direct_children))){ // Loop over remaining children node2 = list_peek_current(node2->footprint->residual_nodes); /* We mark when the nodes are no longer comparable, but do not break as we need all of the lists to be in the first non-shared location for future use. */ if(!node1 || !node2 || (node1 != node2)) comp = 0; } set_first_element(n->footprint->direct_children); /* Only add the node if it occurred in all of the branch lists. */ if(comp){ list_push_tail(n->footprint->residual_nodes, node1); //res_node = node1; /* Advance all direct_children forward one residual. */ while((node1 = set_next_element(n->footprint->direct_children))){ list_next_item(node1->footprint->residual_nodes); } set_first_element(n->footprint->direct_children); } } }
int list_push_priority(struct list *l, void *item, double priority) { struct list_node *n; struct list_node *node; int result; if(!l->head) { result = list_push_head(l, item); if(result) l->head->priority = priority; return result; } if(l->head->priority < priority) { result = list_push_head(l, item); if(result) l->head->priority = priority; return result; } for(n = l->head; n; n = n->next) { if(n->priority < priority) { node = new_node(item, n->prev, n); l->size++; node->priority = priority; return 1; } } result = list_push_tail(l, item); if(result) l->tail->priority = priority; return result; }
bool list_insert (List* list, int32_t index, void* data) { assert(NULL != list); assert(0 <= index); assert(index <= list->count); if (0 == index) { return list_push_head(list, data); } else if (index == list->count) { return list_push_tail(list, data); } else if (index > list->count) { return false; } ListItem* item = NULL; ListItem* new_item = (ListItem*) malloc(sizeof(ListItem)); if (NULL == new_item) { return false; } // we need the item previous to the insertion index __LIST_GET(item, list, index - 1); new_item->data = data; new_item->next = item->next; item->next = new_item; list->count++; return true; }
/* Parse the file assuming there are multiple summaries in it. Summary boundaries are lines starting with # */ struct list *rmsummary_parse_file_multiple(char *filename) { FILE *stream; stream = fopen(filename, "r"); if(!stream) { debug(D_NOTICE, "Cannot open resources summary file: %s : %s\n", filename, strerror(errno)); return NULL; } struct list *lst = list_create(0); struct rmsummary *s; do { s = rmsummary_parse_next(stream); if(s) list_push_tail(lst, s); } while(s); fclose(stream); return lst; }
static int dag_parse_node(struct lexer *bk) { struct token *t = lexer_next_token(bk); if(t->type != TOKEN_FILES) { lexer_report_error(bk, "Error reading rule."); } lexer_free_token(t); struct dag_node *n; n = dag_node_create(bk->d, bk->line_number); if(verbose_parsing && bk->d->nodeid_counter % parsing_rule_mod_counter == 0) { fprintf(stdout, "\rRules parsed: %d", bk->d->nodeid_counter + 1); fflush(stdout); } n->category = bk->category; list_push_tail(n->category->nodes, n); dag_parse_node_filelist(bk, n); bk->environment->node = n; /* Read variables, if any */ while((t = lexer_peek_next_token(bk)) && t->type != TOKEN_COMMAND) { switch (t->type) { case TOKEN_VARIABLE: dag_parse_variable(bk, n); break; default: lexer_report_error(bk, "Expected COMMAND or VARIABLE, got: %s", lexer_print_token(t)); break; } } if(!t) { lexer_report_error(bk, "Rule does not have a command.\n"); } dag_parse_node_command(bk, n); bk->environment->node = NULL; n->next = bk->d->nodes; bk->d->nodes = n; itable_insert(bk->d->node_table, n->nodeid, n); debug(D_MAKEFLOW_PARSER, "Setting resource category '%s' for rule %d.\n", n->category->label, n->nodeid); dag_node_fill_resources(n); dag_node_print_debug_resources(n); return 1; }
int makeflow_alloc_commit_space( struct makeflow_alloc *a, struct dag_node *n) { uint64_t start = timestamp_get(); if(!a) return 0; makeflow_alloc_print_stats(a, "COMMIT"); if(a->enabled == MAKEFLOW_ALLOC_TYPE_OFF) return 1; struct dag_node *node1; struct makeflow_alloc *alloc1, *alloc2; alloc1 = makeflow_alloc_traverse_to_node(a, n); if(alloc1->nodeid == n->nodeid && (a->enabled == MAKEFLOW_ALLOC_TYPE_OUT)){ if(!(makeflow_alloc_grow_alloc(alloc1, makeflow_alloc_node_size(a, n, n)))){ dynamic_alloc += timestamp_get() - start; return 0; } } else if(alloc1->nodeid == n->nodeid){ if(alloc1->storage->free < n->footprint->target_size){ dynamic_alloc += timestamp_get() - start; return 0; } } else { while((node1 = list_peek_current(n->footprint->residual_nodes))){ alloc2 = makeflow_alloc_create(node1->nodeid, alloc1, 0, 0, a->enabled); if(!(makeflow_alloc_grow_alloc(alloc2, makeflow_alloc_node_size(a, node1, n)) || (n == node1 && set_size(n->descendants) < 2 && makeflow_alloc_grow_alloc(alloc2, node1->footprint->self_res)))){ dynamic_alloc += timestamp_get() - start; return 0; } list_push_tail(alloc1->residuals, alloc2); alloc1 = alloc2; list_next_item(n->footprint->residual_nodes); } } alloc1->storage->greedy += n->footprint->target_size; alloc1->storage->free -= n->footprint->target_size; makeflow_alloc_print_stats(alloc1, "GREEDY"); alloc1 = alloc1->parent; while(alloc1){ alloc1->storage->greedy += n->footprint->target_size; alloc1->storage->commit -= n->footprint->target_size; makeflow_alloc_print_stats(alloc1, "GREEDY"); alloc1 = alloc1->parent; } dynamic_alloc += timestamp_get() - start; return 1; }
int s3_mk_bucket(char* bucketname, enum amz_base_perm perms, const char* access_key_id, const char* access_key) { struct link* server; char path[] = "/"; struct s3_header_object *head; time_t stoptime = time(0)+s3_timeout; struct s3_message mesg; char response[HEADER_LINE_MAX]; if(!access_key_id || !access_key || !s3_endpoint) return -1; mesg.type = S3_MESG_PUT; mesg.path = path; mesg.bucket = bucketname; mesg.content_length = 0; mesg.content_type = NULL; mesg.content_md5 = NULL; mesg.date = time(0); mesg.expect = 0; switch(perms) { case AMZ_PERM_PRIVATE: head = s3_new_header_object(S3_HEADER_AMZ_ACL, NULL, "private"); break; case AMZ_PERM_PUBLIC_READ: head = s3_new_header_object(S3_HEADER_AMZ_ACL, NULL, "public-read"); break; case AMZ_PERM_PUBLIC_WRITE: head = s3_new_header_object(S3_HEADER_AMZ_ACL, NULL, "public-read-write"); break; case AMZ_PERM_AUTH_READ: head = s3_new_header_object(S3_HEADER_AMZ_ACL, NULL, "authenticated-read"); break; case AMZ_PERM_BUCKET_READ: head = s3_new_header_object(S3_HEADER_AMZ_ACL, NULL, "bucket-owner-read"); break; case AMZ_PERM_BUCKET_FULL: head = s3_new_header_object(S3_HEADER_AMZ_ACL, NULL, "bucket-owner-full-control"); break; default: return -1; } mesg.amz_headers = list_create(); list_push_tail(mesg.amz_headers, head); sign_message(&mesg, access_key_id, access_key); server = s3_send_message(&mesg, NULL, stoptime); list_free(mesg.amz_headers); list_delete(mesg.amz_headers); if(!server) return -1; link_readline(server, response, HEADER_LINE_MAX, stoptime); if(strcmp(response, "HTTP/1.1 200 OK")) { // Error: transfer failed; close connection and return failure //fprintf(stderr, "Error: create bucket failed\nResponse: %s\n", response); link_close(server); return -1; } do { if(!strcmp(response, "Server: AmazonS3")) break; } while(link_readline(server, response, HEADER_LINE_MAX, stoptime)); link_close(server); return 0; }
struct list *catalog_query_sort_hostlist(const char *hosts) { const char *next_host; char *n; struct catalog_host *h; struct list *previously_up = list_create(); struct list *previously_down = list_create(); if(string_null_or_empty(hosts)) { next_host = CATALOG_HOST; } else { next_host = hosts; } if(!down_hosts) { down_hosts = set_create(0); } do { int port; char host[DOMAIN_NAME_MAX]; h = xxmalloc(sizeof(*h)); next_host = parse_hostlist(next_host, host, &port); h->host = xxstrdup(host); h->url = string_format("http://%s:%d/query.json", host, port); h->down = 0; set_first_element(down_hosts); while((n = set_next_element(down_hosts))) { if(!strcmp(n, host)) { h->down = 1; } } if(h->down) { list_push_tail(previously_down, h); } else { list_push_tail(previously_up, h); } } while (next_host); return list_splice(previously_up, previously_down); }
void channel_handle_client_read(connector_t pconn, int event) { //由于和客户端只有一次交互,不用一直读取 if (connector_read(pconn, event) > 0) { char *val = buffer_get_read(pconn->preadbuf); message_t pmsg = (message_t)malloc(sizeof(message)); memset(pmsg, 0, sizeof(pmsg)); size_t len1 = get_client_msg(val, pmsg); if (len1 == 0) { print_log(LOG_TYPE_ERROR, "Read Client Msg Error %s", val); free(pmsg); return; } char data[20] = {0}; memcpy(data, pmsg->uid, pmsg->len); buffer_read(pconn->preadbuf, len1, TRUE); memcpy(pconn->uid, data, pmsg->len); int len2 = sizeof(connector_t); ht_insert(pconn->pworker->pht, data, (pmsg->len)+1, pconn, len2+1); context_t pcontext = (context_t)malloc(sizeof(context)); memset(pcontext, 0, sizeof(context)); memcpy(pcontext->data, data, pmsg->len); list_push_tail(pconn->pworker->plist, pcontext); //print_log(LOG_TYPE_DEBUG, "Hash key %s, Len %d", pcontext->data, pmsg->len); char cmd[REDIS_CMD_LEN] = {'\0'}; get_request_str(data, cmd); int len = strlen(cmd); if (pconn->pworker->redis->state == CONN_STATE_RUN) { buffer_write(pconn->pworker->redis->pwritebuf, cmd, len); connector_write(pconn->pworker->redis); } else { print_log(LOG_TYPE_ERROR, "Redis not run"); list_pop_head(pconn->pworker->plist); ht_remove(pconn->pworker->pht, data, (pmsg->len)+1); pconn->pworker->neterr_count++; } free(pmsg); } }
static INT64_T do_put_one_dir(const char *hostport, const char *source_file, const char *target_file, int mode, time_t stoptime) { char new_source_file[CHIRP_PATH_MAX]; char new_target_file[CHIRP_PATH_MAX]; struct list *work_list; const char *name; INT64_T result; INT64_T total = 0; struct dirent *d; DIR *dir; work_list = list_create(); result = chirp_reli_mkdir(hostport, target_file, mode, stoptime); if(result == 0 || errno == EEXIST) { result = 0; dir = opendir(source_file); if(dir) { while((d = readdir(dir))) { if(!strcmp(d->d_name, ".")) continue; if(!strcmp(d->d_name, "..")) continue; list_push_tail(work_list, strdup(d->d_name)); } closedir(dir); while((name = list_pop_head(work_list))) { sprintf(new_source_file, "%s/%s", source_file, name); sprintf(new_target_file, "%s/%s", target_file, name); result = chirp_recursive_put(hostport, new_source_file, new_target_file, stoptime); free((char *) name); if(result < 0) break; total += result; } } else { result = -1; } } else { result = -1; } while((name = list_pop_head(work_list))) free((char *) name); list_delete(work_list); if(result < 0) { return -1; } else { return total; } }
bool list_push_tail_ts (List* list, void* data) { assert(NULL != list); assert(NULL != list->mutex); pthread_mutex_lock(list->mutex); bool ret = list_push_tail(list, data); pthread_mutex_unlock(list->mutex); return ret; }
/** * returns the depth of the given DAG. */ int dag_depth(struct dag *d) { struct dag_node *n, *parent; struct dag_file *f; struct list *level_unsolved_nodes = list_create(); for(n = d->nodes; n != NULL; n = n->next) { n->level = 0; list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if((parent = f->target_of) != NULL) { n->level = -1; list_push_tail(level_unsolved_nodes, n); break; } } } int max_level = 0; while((n = (struct dag_node *) list_pop_head(level_unsolved_nodes)) != NULL) { list_first_item(n->source_files); while((f = list_next_item(n->source_files))) { if((parent = f->target_of) != NULL) { if(parent->level == -1) { n->level = -1; list_push_tail(level_unsolved_nodes, n); break; } else { int tmp_level = parent->level + 1; n->level = n->level > tmp_level ? n->level : tmp_level; max_level = n->level > max_level ? n->level : max_level; } } } } list_delete(level_unsolved_nodes); return max_level + 1; }
//opened tracks whether it is the opening (opened = 0) or closing (opened = 1) double quote we encounter. struct list *lexer_read_expandable_recursive(struct lexer *lx, char end_marker, int opened) { lexer_discard_white_space(lx); struct list *tokens = list_create(); while(!lx->eof) { int c = lexer_next_peek(lx); if(c == '$') { list_push_tail(tokens, lexer_read_substitution(lx)); } if(c == '\'') { lexer_read_literal(lx); list_push_tail(tokens, lexer_pack_token(lx, TOKEN_LITERAL)); } else if(c == '"' && opened == 0) { lexer_add_to_lexeme(lx, lexer_next_char(lx)); list_push_tail(tokens, lexer_pack_token(lx, TOKEN_LITERAL)); // Add first " tokens = list_splice(tokens, lexer_read_expandable_recursive(lx, '"', 1)); lexer_add_to_lexeme(lx, '"'); list_push_tail(tokens, lexer_pack_token(lx, TOKEN_LITERAL)); // Add closing " if(end_marker == '"') return tokens; } else if(c == '#' && end_marker != '"') { lexer_discard_comments(lx); } else if(c == end_marker) { lexer_next_char(lx); /* Jump end_marker */ return tokens; } else { list_push_tail(tokens, lexer_read_literal_in_expandable_until(lx, end_marker)); } } lexer_report_error(lx, "Found EOF before end marker: %c.\n", end_marker); return NULL; }
static struct list* extract_file_names_from_list(char* in){ struct list* output = list_create(); char* tmp = strdup(in); char* ta = strtok(tmp,","); while(ta != NULL){ int push_success = list_push_tail(output,strdup(ta)); if(!push_success){ fatal("Error appending file name to list due to being out of memory"); } ta = strtok(0,","); } return output; }
struct list *list_duplicate(struct list *list) { struct list *list2; struct list_node *node; list2 = list_create(); node = list->head; while(node) { list_push_tail(list2, node->data); if(list->iter == node) { list2->iter = list2->tail; } node = node->next; } return list2; }
/* Returns the list of dag_file's which are not the target of any * node */ struct list *dag_input_files(struct dag *d) { struct dag_file *f; char *filename; struct list *il; il = list_create(0); hash_table_firstkey(d->file_table); while((hash_table_nextkey(d->file_table, &filename, (void **) &f))) if(!f->target_of) { debug(D_DEBUG, "Found independent input file: %s", f->filename); list_push_tail(il, f); } return il; }
/*-------------------------------------------- Author: Max Ashton Description: create element then push it onto the tail then increase list size ----------------------------------------------*/ int add_element_tail( maStructList &list, void* pvData, unsigned int uiDataType ) { struct_list_element* pElement = create_list_element( pvData, uiDataType ); if( pElement == NULL ) { return 0; } int success = list_push_tail( list, pElement ); if( success == 1 ) { list._iListSize++; } return success; }
struct list *list_sort(struct list *list, int (*comparator) (const void *, const void *)) { void **array; int size, i = 0; size = list_size(list); array = malloc(size * sizeof(*array)); while(list_size(list)) { array[i] = list_pop_head(list); i++; } qsort(array, size, sizeof(*array), comparator); for(i = 0; i < size; i++) { list_push_tail(list, array[i]); } free(array); return list; }
int get_pool_decisions_from_catalog(const char *catalog_host, int catalog_port, const char *proj, struct list *decisions) { struct catalog_query *q; struct nvpair *nv; time_t timeout = 60, stoptime; stoptime = time(0) + timeout; if(!decisions) { fprintf(stderr, "No list to store pool decisions.\n"); return 0; } q = catalog_query_create(catalog_host, catalog_port, stoptime); if(!q) { fprintf(stderr, "Failed to query catalog server at %s:%d\n", catalog_host, catalog_port); return 0; } // multiple pools while((nv = catalog_query_read(q, stoptime))) { if(strcmp(nvpair_lookup_string(nv, "type"), CATALOG_TYPE_WORK_QUEUE_POOL) == 0) { struct work_queue_pool *p; p = parse_work_queue_pool_nvpair(nv); debug(D_WQ, "pool %s's decision: %s\n", p->name, p->decision); int x = workers_by_item(p->decision, proj); if(x >= 0) { struct pool_info *pi; pi = (struct pool_info *)xxmalloc(sizeof(*pi)); strncpy(pi->name, p->name, WORK_QUEUE_POOL_NAME_MAX); pi->count = x; list_push_tail(decisions, pi); } free(p->decision); free(p); } nvpair_delete(nv); } // Must delete the query otherwise it would occupy 1 tcp connection forever! catalog_query_delete(q); return 1; }
void parse_summary_from_filelist(struct rmDsummary_set *dest, char *filename, struct hash_table *categories) { FILE *flist; if(strcmp(filename, "-") == 0) { flist = stdin; } else { flist = fopen(filename, "r"); if(!flist) fatal("Cannot open resources summary list: %s : %s\n", filename, strerror(errno)); } struct rmDsummary *s; char file_summ[MAX_LINE]; while((fgets(file_summ, MAX_LINE, flist))) { FILE *stream; int n = strlen(file_summ); if(n < 1) continue; if(file_summ[n - 1] == '\n') { file_summ[n - 1] = '\0'; } stream = fopen(file_summ, "r"); if(!stream) fatal("Cannot open resources summary file: %s : %s\n", file_summ, strerror(errno)); while((s = parse_summary(stream, file_summ, categories))) list_push_tail(dest->summaries, s); fclose(stream); } }
int get_results(struct link *mpi_link, struct itable *active_list, struct list *complete_list, int timeout) { char line[MPI_QUEUE_LINE_MAX]; int num_results, n = 0; int stoptime = time(0) + timeout; debug(D_MPI, "Getting any results\n"); link_putliteral(mpi_link, "get results\n", stoptime); if(link_readline(mpi_link, line, sizeof(line), stoptime)) { debug(D_MPI, "received: %s\n", line); sscanf(line, "num results %d", &num_results); } else { return 0; } debug(D_MPI, "%d results available\n", num_results); while(n++ < num_results && link_readline(mpi_link, line, sizeof(line), stoptime)) { struct mpi_queue_task *t; int taskid, status, result, result_length; sscanf(line, "result %d %d %d %d", &taskid, &status, &result, &result_length); t = itable_remove(active_list, taskid); if(!t) { debug(D_NOTICE, "Invalid taskid (%d) returned\n", taskid); return -1; } if(result_length) { t->output = malloc(result_length+1); link_read(mpi_link, t->output, result_length, time(0) + timeout); t->output[result_length] = 0; } t->status = MPI_QUEUE_TASK_STATUS_COMPLETE; t->return_status = result; t->result = status; list_push_tail(complete_list, t); } return num_results; }