void mpi_queue_task_delete(struct mpi_queue_task *t) { struct mpi_queue_file *tf; if(t) { if(t->command_line) free(t->command_line); if(t->tag) free(t->tag); if(t->output) free(t->output); if(t->input_files) { while((tf = list_pop_tail(t->input_files))) { if(tf->name) free(tf->name); free(tf); } list_delete(t->input_files); } if(t->output_files) { while((tf = list_pop_tail(t->output_files))) { if(tf->name) free(tf->name); free(tf); } list_delete(t->output_files); } free(t); } }
//return 1 if name was processed as special variable, 0 otherwise int dag_parse_process_special_variable(struct lexer_book *bk, struct dag_node *n, int nodeid, char *name, const char *value) { struct dag *d = bk->d; int special = 0; if(strcmp(RESOURCES_CATEGORY, name) == 0) { special = 1; /* If we have never seen this label, then create * a new category, otherwise retrieve the category. */ struct dag_task_category *category = dag_task_category_lookup_or_create(d, value); /* If we are parsing inside a node, make category * the category of the node, but do not update * the global task_category. Else, update the * global task category. */ if(n) { /* Remove node from previous category...*/ list_pop_tail(n->category->nodes); n->category = category; /* and add it to the new one */ list_push_tail(n->category->nodes, n); debug(D_DEBUG, "Updating category '%s' for rule %d.\n", value, n->nodeid); } else bk->category = category; } /* else if some other special variable .... */ /* ... */ return special; }
void* list_remove (List* list, int32_t index) { assert(NULL != list); assert(0 <= index); assert(index < list->count); if (0 == index) { return list_pop_head(list); } else if (index == list->count - 1) { return list_pop_tail(list); } else if (index >= list->count) { return false; } ListItem* item = NULL; // we need the item previous to the removal index __LIST_GET(item, list, index - 1); ListItem* rem_item = item->next; void* data = rem_item->data; item->next = rem_item->next; list->count--; free(rem_item); return data; }
/*-------------------------------------------- Author: Max Ashton Description: remove element from tail then decrease list size ----------------------------------------------*/ struct_list_element* remove_element_tail( maStructList &list ) { struct_list_element* pdPoped = list_pop_tail( list ); if( pdPoped != NULL ) { list._iListSize--; } return pdPoped; }
void* list_pop_tail_ts (List* list) { assert(NULL != list); assert(NULL != list->mutex); pthread_mutex_lock(list->mutex); void* ret = list_pop_tail(list); pthread_mutex_unlock(list->mutex); return ret; }
void dag_node_footprint_print(struct dag *d, struct dag_node *base, char *output) { struct dag_node *n; int tex = 0; char *retrn = "\n"; char *node_retrn = "\n"; char *delim = "\t"; if(tex){ retrn = "\\\\ \\hline \n\t"; node_retrn = "\\\\ \n\t"; delim = " & "; } FILE * out; out = fopen(output, "w"); if(tex) fprintf(out, "\\begin{tabular}{|cccccccc|}\n\t\\hline\n"); fprintf(out, "Node%s",delim); fprintf(out, "Foot-Min%s",delim); fprintf(out, "Foot-Max%s",delim); fprintf(out, "Residual%s",delim); fprintf(out, "Parent%s",delim); fprintf(out, "Child%s",delim); fprintf(out, "Desc-Min%s",delim); fprintf(out, "Desc-Max%s",node_retrn); fprintf(out, "Res Nodes%s%s%s%s%s%s%s%s", delim,delim,delim,delim,delim,delim,delim,retrn); for(n = d->nodes; n; n = n->next) { dag_node_print_footprint_node(n, out, retrn, node_retrn, delim); } fprintf(out, "Base %s %"PRIu64" %s %"PRIu64" %s %"PRIu64"%s%s%s%s%s", delim, base->footprint->footprint_min_size, delim, base->footprint->footprint_max_size, delim, base->footprint->residual_size, delim,delim,delim,delim,node_retrn); list_pop_tail(base->footprint->residual_nodes); dag_node_print_node_list(base->footprint->residual_nodes, out, delim); dag_node_print_file_set(base->footprint->footprint_min_files, out, delim); dag_node_print_file_set(base->footprint->footprint_max_files, out, delim); dag_node_print_file_set(base->footprint->residual_files, out, delim); fprintf(out,"%s%s%s%s", delim,delim,delim,retrn); if(tex) fprintf(out, "\\end{tabular}\n"); fclose(out); }
void lexer_concatenate_consecutive_literals(struct list *tokens) { struct list *tmp = list_create(); struct token *t, *prev = NULL; list_first_item(tokens); while((t = list_pop_head(tokens))) { if(t->type != TOKEN_LITERAL) { list_push_tail(tmp, t); continue; } prev = list_pop_tail(tmp); if(!prev) { list_push_tail(tmp, t); continue; } if(prev->type != TOKEN_LITERAL) { list_push_tail(tmp, prev); list_push_tail(tmp, t); continue; } char *merge = string_format("%s%s", prev->lexeme, t->lexeme); lexer_free_token(t); free(prev->lexeme); prev->lexeme = merge; list_push_tail(tmp, prev); } /* Copy to tokens, drop spaces. */ list_first_item(tmp); while((t = list_pop_head(tmp))) if(t->type != TOKEN_SPACE) { list_push_tail(tokens, t); } else { lexer_free_token(t); } list_delete(tmp); }
void lexer_append_all_tokens(struct lexer_book *bk, struct lexer_book *bk_s) { struct token *head_s; bk_s->substitution_mode = bk->substitution_mode; while( !bk_s->eof ) { if(lexer_next_peek(bk_s) == CHAR_EOF) { /* Found end of string while completing command */ bk_s->lexeme_end++; bk_s->eof = 1; } else { switch(bk_s->substitution_mode) { case CHAR_EOF: case COMMAND: head_s = lexer_read_command_argument(bk_s); break; case FILES: head_s = lexer_read_file(bk_s); break; case SYNTAX: lexer_read_expandable(bk_s, CHAR_EOF); head_s = lexer_pack_token(bk_s, LITERAL); break; default: lexer_read_line(bk_s); continue; break; } if(head_s) lexer_push_token(bk_s, head_s); } } while( (head_s = list_pop_tail(bk_s->token_queue)) != NULL ) list_push_head(bk->token_queue, head_s); }
void path_disk_size_info_delete_state(struct path_disk_size_info *state) { if(!state) return; if(state->current_dirs) { struct DIR_with_name *tail; while((tail = list_pop_tail(state->current_dirs))) { if(tail->dir) closedir(tail->dir); if(tail->name) free(tail->name); free(tail); } list_delete(state->current_dirs); } free(state); }
int path_disk_size_info_get_r(const char *path, int64_t max_secs, struct path_disk_size_info **state) { int64_t start_time = time(0); int result = 0; if(!*state) { /* if state is null, there is no state, and path is the root of the measurement. */ *state = calloc(1, sizeof(struct path_disk_size_info)); } struct path_disk_size_info *s = *state; /* shortcut for *state, so we do not need to type (*state)->... */ /* if no current_dirs, we begin a new measurement. */ if(!s->current_dirs) { s->complete_measurement = 0; struct DIR_with_name *here = malloc(sizeof(struct DIR_with_name)); if((here->dir = opendir(path))) { here->name = xxstrdup(path); s->current_dirs = list_create(0); s->size_so_far = 0; s->count_so_far = 1; /* count the root directory */ list_push_tail(s->current_dirs, here); } else { debug(D_DEBUG, "error reading disk usage on directory: %s.\n", path); s->size_so_far = -1; s->count_so_far = -1; s->complete_measurement = 1; result = -1; goto timeout; } } struct DIR_with_name *tail; while((tail = list_peek_tail(s->current_dirs))) { struct dirent *entry; struct stat file_info; while((entry = readdir(tail->dir))) { if( strcmp(".", entry->d_name) == 0 || strcmp("..", entry->d_name) == 0) continue; char composed_path[PATH_MAX]; if(entry->d_name[0] == '/') { strncpy(composed_path, entry->d_name, PATH_MAX); } else { snprintf(composed_path, PATH_MAX, "%s/%s", tail->name, entry->d_name); } if(lstat(composed_path, &file_info) < 0) { if(errno == ENOENT) { /* our DIR structure is stale, and a file went away. We simply do nothing. */ } else { debug(D_DEBUG, "error reading disk usage on '%s'.\n", path); result = -1; } continue; } s->count_so_far++; if(S_ISREG(file_info.st_mode)) { s->size_so_far += file_info.st_size; } else if(S_ISDIR(file_info.st_mode)) { struct DIR_with_name *branch = malloc(sizeof(struct DIR_with_name)); if((branch->dir = opendir(composed_path))) { /* future while we'll read from the branch */ branch->name = xxstrdup(composed_path); list_push_head(s->current_dirs, branch); } else { free(branch); result = -1; continue; } } else if(S_ISLNK(file_info.st_mode)) { /* do nothing, avoiding infinite loops. */ } if(max_secs > -1) { if( time(0) - start_time >= max_secs ) { goto timeout; } } } /* we are done reading a complete directory, and we go to the next in the queue */ tail = list_pop_tail(s->current_dirs); closedir(tail->dir); free(tail->name); free(tail); } list_delete(s->current_dirs); s->current_dirs = NULL; /* signal that a new measurement is needed, if state structure is reused. */ s->complete_measurement = 1; timeout: if(s->complete_measurement) { /* if a complete measurement has been done, then update * for the found value */ s->last_byte_size_complete = s->size_so_far; s->last_file_count_complete = s->count_so_far; } else { /* else, we hit a timeout. measurement reported is conservative, from * what we knew, and know so far. */ s->last_byte_size_complete = MAX(s->last_byte_size_complete, s->size_so_far); s->last_file_count_complete = MAX(s->last_file_count_complete, s->count_so_far); } return result; }
int main( int argc, char *argv[] ) { signed char c; const char *progname = "wavefront"; debug_config(progname); progress_log_file = stdout; struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"version", no_argument, 0, 'v'}, {"debug", required_argument, 0, 'd'}, {"jobs", required_argument, 0, 'n'}, {"block-size", required_argument, 0, 'b'}, {"debug-file", required_argument, 0, 'o'}, {"log-file", required_argument, 0, 'l'}, {"bitmap", required_argument, 0, 'B'}, {"bitmap-interval", required_argument, 0, 'i'}, {"auto", no_argument, 0, 'A'}, {"local", no_argument, 0, 'L'}, {"batch-type", required_argument, 0, 'T'}, {"verify", no_argument, 0, 'V'}, {0,0,0,0} }; while((c=getopt_long(argc,argv,"n:b:d:o:l:B:i:qALDT:VX:Y:vh", long_options, NULL)) > -1) { switch(c) { case 'n': manual_max_jobs_running = atoi(optarg); break; case 'b': manual_block_size = atoi(optarg); break; case 'd': debug_flags_set(optarg); break; case 'o': debug_config_file(optarg); break; case 'B': progress_bitmap_file = optarg; break; case 'i': progress_bitmap_interval = atoi(optarg); break; case 'l': progress_log_file = fopen(optarg,"w"); if(!progress_log_file) { fprintf(stderr,"couldn't open %s: %s\n",optarg,strerror(errno)); return 1; } break; case 'A': wavefront_mode = WAVEFRONT_MODE_AUTO; break; case 'L': wavefront_mode = WAVEFRONT_MODE_MULTICORE; break; case 'T': wavefront_mode = WAVEFRONT_MODE_DISTRIBUTED; batch_system_type = batch_queue_type_from_string(optarg); if(batch_system_type==BATCH_QUEUE_TYPE_UNKNOWN) { fprintf(stderr,"unknown batch system type: %s\n",optarg); exit(1); } break; case 'V': verify_mode = 1; break; case 'X': xstart = atoi(optarg); break; case 'Y': ystart = atoi(optarg); break; case 'v': cctools_version_print(stdout, progname); exit(0); break; case 'h': show_help(progname); exit(0); break; } } cctools_version_debug(D_DEBUG, argv[0]); if( (argc-optind<3) ) { show_help(progname); exit(1); } function = argv[optind]; xsize=atoi(argv[optind+1]); ysize=atoi(argv[optind+2]); total_cells = xsize*ysize; if(!verify_mode && !check_configuration(function,xsize,ysize)) exit(1); int ncpus = load_average_get_cpus(); if(wavefront_mode!=WAVEFRONT_MODE_MULTICORE) { double task_time = measure_task_time(); printf("Each function takes %.02lfs to run.\n",task_time); block_size = find_best_block_size(xsize,1000,2,task_time,average_dispatch_time); double distributed_time = wavefront_distributed_model(xsize,1000,2,task_time,block_size,average_dispatch_time); double multicore_time = wavefront_multicore_model(xsize,ncpus,task_time); double ideal_multicore_time = wavefront_multicore_model(xsize,xsize,task_time); double sequential_time = wavefront_multicore_model(xsize,1,task_time); printf("---------------------------------\n"); printf("This workload would take:\n"); printf("%.02lfs sequentially\n",sequential_time); printf("%.02lfs on this %d-core machine\n",multicore_time,ncpus); printf("%.02lfs on a %d-core machine\n",ideal_multicore_time,xsize); printf("%.02lfs on a 1000-node distributed system with block size %d\n",distributed_time,block_size); printf("---------------------------------\n"); if(wavefront_mode==WAVEFRONT_MODE_AUTO) { if(multicore_time < distributed_time*2) { wavefront_mode = WAVEFRONT_MODE_MULTICORE; } else { wavefront_mode = WAVEFRONT_MODE_DISTRIBUTED; } } } if(wavefront_mode==WAVEFRONT_MODE_MULTICORE) { batch_system_type = BATCH_QUEUE_TYPE_LOCAL; max_jobs_running = ncpus; } else { max_jobs_running = 1000; } if(manual_block_size!=0) { block_size = manual_block_size; } if(manual_max_jobs_running!=0) { max_jobs_running = manual_max_jobs_running; } if(wavefront_mode==WAVEFRONT_MODE_MULTICORE) { printf("Running in multicore mode with %d CPUs.\n",max_jobs_running); } else { printf("Running in distributed mode with block size %d on up to %d CPUs\n",block_size,max_jobs_running); } batch_q = batch_queue_create(batch_system_type); if(verify_mode) exit(0); struct bitmap * b = bitmap_create(xsize+1,ysize+1); struct list *ready_list = list_create(); struct itable *running_table = itable_create(0); struct batch_job_info info; UINT64_T jobid; struct wavefront_task *task; wavefront_task_initialize(b,ready_list); printf("Starting workload...\n"); fprintf(progress_log_file,"# elapsed time : waiting jobs / running jobs / cells complete (percent complete)\n"); while(1) { if(abort_mode) { while((task=list_pop_tail(ready_list))) { wavefront_task_delete(task); } itable_firstkey(running_table); while(itable_nextkey(running_table,&jobid,(void**)&task)) { batch_job_remove(batch_q,jobid); } } if(list_size(ready_list)==0 && itable_size(running_table)==0) break; while(1) { if(itable_size(running_table)>=max_jobs_running) break; task = list_pop_tail(ready_list); if(!task) break; jobid = wavefront_task_submit(task); if(jobid>0) { itable_insert(running_table,jobid,task); wavefront_task_mark_range(task,b,WAVEFRONT_TASK_STATE_RUNNING); } else { abort(); sleep(1); list_push_head(ready_list,task); } } save_status(b,ready_list,running_table); jobid = batch_job_wait(batch_q,&info); if(jobid>0) { task = itable_remove(running_table,jobid); if(task) { if(info.exited_normally && info.exit_code==0) { total_dispatch_time += info.started-info.submitted; total_execute_time += MAX(info.finished-info.started,1); total_cells_complete+=task->width*task->height; total_jobs_complete++; average_dispatch_time = 1.0*total_dispatch_time / total_jobs_complete; average_task_time = 1.0*total_execute_time / total_cells_complete; wavefront_task_complete(b,ready_list,task); } else { printf("job %" PRIu64 " failed, aborting this workload\n",jobid); abort_mode = 1; } } } } save_status(b,ready_list,running_table); if(abort_mode) { printf("Workload was aborted.\n"); } else { printf("Workload complete.\n"); } return 0; }
int test_lists() { int len = 0; // create a list of integers (li1) from 1 to SIZE DLL *li1 = list_sequence(1, SIZE); // copy the list to li2 DLL *li2 = list_copy(li1); // remove each individual item from left side of li2 and // append to right side of li3 (preserving order) DLL *li3 = list_new(); // compare li2 and li1 for equality if (!list_equal(li2, li1)) { print("li2!=li1\n"); while(1); } while (!list_empty(li2)) { list_push_tail(li3, list_pop_head(li2)); } // li2 must now be empty if (!list_empty(li2)) { print("li2 ne\n"); while(1); } // remove each individual item from right side of li3 and // append to right side of li2 (reversing list) while (!list_empty(li3)) { list_push_tail(li2, list_pop_tail(li3)); } // li3 must now be empty if (!list_empty(li3)) { print( "li3 ne\n"); while(1); } // reverse li1 in place list_reverse(li1); // check that li1's first item is now SIZE if (list_first(li1)->val != SIZE) { print( "li1 stw\n"); while(1); } // check that li1's last item is now 1 if (list_last(li1)->val != 1) { print( "lstw\n"); while(1); } // check that li2's first item is now SIZE if (list_first(li2)->val != SIZE) { print( "li2 stw\n"); while(1); } // check that li2's last item is now 1 if (list_last(li2)->val != 1) { print( "lstw\n"); while(1); } // check that li1's length is still SIZE if (list_length(li1) != SIZE) { print( "li1 szw\n"); while(1); } // compare li1 and li2 for equality if (!list_equal(li1, li2)) { print( "li1!=li2\n"); while(1); } len = list_length(li1); // free(li1); // free(li2); // free(li3); // return the length of the list printnum(len); print("\n"); return 0; }
int main(int argc, char *argv[]) { struct work_queue *q; int port = WORK_QUEUE_DEFAULT_PORT; if(argc != 4) { printf("Usage: work_queue_workload_simulator <workload_spec> <logfile> <proj_name> \n"); exit(1); } struct list *specs = get_workload_specs(argv[1]); if(!specs) { printf("Failed to load a non-empty workload specification.\n"); exit(1); } created_files = list_create(); if(!created_files) { printf("Failed to allocate memory for a list to store created files.\n"); exit(1); } // open log file logfile = fopen(argv[2], "a"); if(!logfile) { printf("Couldn't open logfile %s: %s\n", argv[2], strerror(errno)); exit(1); } q = work_queue_create(port); if(!q) { printf("couldn't listen on port %d: %s\n", port, strerror(errno)); goto fail; exit(1); } printf("listening on port %d...\n", work_queue_port(q)); // specifying the right modes work_queue_specify_master_mode(q, WORK_QUEUE_MASTER_MODE_CATALOG); work_queue_specify_name(q, argv[3]); work_queue_specify_estimate_capacity_on(q, 1); // report capacity on int time_elapsed = 0; // in seconds int series_id = 0; time_t start_time = time(0); log_work_queue_status(q); while(1) { struct task_series *ts = (struct task_series *)list_peek_tail(specs); if(!ts) { while(!work_queue_empty(q)) { // wait until all tasks to finish wait_for_task(q, 5); } break; } else { time_elapsed = time(0) - start_time; int time_until_next_submit = ts->submit_time - time_elapsed; if(time_until_next_submit <=0) { list_pop_tail(specs); printf("time elapsed: %d seconds\n", time_elapsed); if(!submit_task_series(q, ts, series_id)) { // failed to submit tasks fprintf(stderr, "Failed to submit tasks.\n"); goto fail; } free(ts); series_id++; } else { time_t stoptime = start_time + ts->submit_time; while(!work_queue_empty(q)) { int timeout = stoptime - time(0); if(timeout > 0) { wait_for_task(q, timeout); } else { break; } } time_t current_time = time(0); if(current_time < stoptime) { sleep(stoptime - current_time); } } } } printf("all tasks complete!\n"); work_queue_delete(q); remove_created_files(); fclose(logfile); return 0; fail: remove_created_files(); fclose(logfile); exit(1); }