int main(int argc, char *argv[]) { int myrank; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); printf("%2d - started\n", myrank); if (myrank == 0) { //Master, i.e. distributor of tasks MPI_Comm_size(MPI_COMM_WORLD, &ITEMS_TO_BE_SENT); int number_of_processes = ITEMS_TO_BE_SENT; ITEMS_TO_BE_SENT--; //number of processes that are supposed to receive a task if(argc == 1) ftw(".", parse, 1); else ftw(argv[1], parse, 1); kill_slaves(); sleep(1); //just to make sure that files are closed properly... merge_files(number_of_processes); fprintf(stderr, "Finished merging\n"); } else { run_slave(myrank); } MPI_Finalize(); return 0; }
int main(int argc, char *argv[]) { int myrank; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &myrank); printf("%2d - started\n", myrank); if (myrank == 0) { //Master, i.e. distributor of tasks logfile = fopen("logfile.txt", "w"); if (!logfile) {fprintf(stderr, "Can't open logfile, exiting\n"); exit(1);} MPI_Comm_size(MPI_COMM_WORLD, &ITEMS_TO_BE_SEND); ITEMS_TO_BE_SEND--; //number of processes that are supposed to receive a task if(argc == 1) ftw(".", parse, 1); else ftw(argv[1], parse, 1); kill_slaves(); fclose(logfile); } else { run_slave(myrank); } return 0; }
void cascade_one_level_down(int myid, int numbprocs, char *infile, char *outfile, WSET *ws, int num_group, int stages, int cnt_stage, int cnt_step, IDLE_ELEMENT **ie, LISTELEMENT **listpointer, int *TaskCount, int NUM_GROUP, int *dimension, int *cnt_index, int update_flag) { int n,/* dimension */ nbsols, dim, deg, fail, i, n1, n2, cd; double vcp[34]; if(myid == 0) { printf("manager is in inside of cascade_one_level_down\n"); read_dimension_of_system((int)strlen(infile),infile,&n); } dimension_broadcast(myid,&n); if(myid == 0) { fail = syscon_clear_symbol_table(); fail = read_named_start_without_solutions((int)strlen(infile),infile); /*fail = copy_start_system_to_container();*/ fail = copy_start_system_to_container(); fail = syscon_sort_embed_symbols(&dim); printf("the top dimension is %d\n",dim); fail = copy_container_to_start_system(); fail = solcon_scan_solution_banner(); fail = solcon_read_solution_dimensions(&nbsols,dimension); } else /* initialize system container */ fail = syscon_initialize_number_of_standard_polynomials(n); monomials_broadcast(myid,n); /* broadcast container */ if(myid != 0) /* copy result of broadcast */ { fail = copy_container_to_start_system(); fail = syscon_clear_standard_system(); /* clear system container */ } fail = create_cascade_homotopy(); MPI_Barrier(MPI_COMM_WORLD); if(myid == 0) { if(v>3) printf("# paths to track : %d\n",nbsols); fail = define_output_file_with_string ((int)strlen(outfile), outfile); fail = write_standard_target_system(); fail = write_standard_start_system(); /*fail = tune_continuation_parameters(); printf("\n");*/ fail = retrieve_continuation_parameters(vcp); write_solution_banner_to_defined_output_file(nbsols,n); printf("\nSee the output file for results...\n\n"); } MPI_Bcast(&nbsols,1,MPI_INT,0,MPI_COMM_WORLD); MPI_Bcast(vcp,34,MPI_DOUBLE,0,MPI_COMM_WORLD); if(myid != 0) { fail = set_continuation_parameters(vcp); } if(myid == 0) { n1=0; n2=0; cd=n; send_collect(ws, num_group, stages, cnt_stage, numbprocs, cnt_step, ie, listpointer, TaskCount, NUM_GROUP, cnt_index, update_flag, n1, n2, cd); if(update_flag) *cnt_index=*cnt_index+1; if(v>3) { printf("after send_collect \n"); printf("%d JOBs in listpointer. \n", length(*listpointer)); printf("%d idles\n", num_idle(*ie)); printf("indexing........ with cnt_index=%d\n", *cnt_index); } } else { if(v>3) printf("node %d will run run_slave \n", myid); fflush; run_slave(myid); } MPI_Barrier(MPI_COMM_WORLD); if(myid == 0) { if(v>3) printf("manager clear data \n"); fail = clear_data(); if(fail>0) printf("manager fail to clear data.\n"); fflush; fail = solcon_close_solution_input_file(0); if(fail>0) printf("fail to close solution input file.\n"); fail = solcon_clear_standard_solutions( ); if(fail>0) printf("fail to clear solution container.\n"); fail = close_output_file(); if(fail>0) printf("fail to close output file. \n"); fflush; } else { if(v>3) printf("node %d clear data \n", myid); fail = clear_data(); if(fail>0) printf("node %d fail to clear data.\n", myid); fflush; } }
void start_a_diagonal_homotopy(int myid, int numbprocs, char *name_1, char *name_2, char *outfile, WSET *ws, int num_group, int stages, int cnt_stage, int cnt_step, IDLE_ELEMENT **ie, LISTELEMENT **listpointer, int *TaskCount, int NUM_GROUP, int NUM_VARS, int *cnt_index, int *expected, int *dim, int update_flag) { int n, deg, n1, n2, dim1, dim2, deg1, deg2, cd, nbsols, fail, i, slv, count; double vcp[34]; if(myid==0) { printf("manager is in inside of start_a_diagonal_homotopy\n"); fail = read_two_witness_sets_from_file(&n1,&n2,&dim1,&dim2, °1,°2,name_1,name_2,&cd); n = cd; /* dimension */ nbsols = deg1*deg2; printf("#paths to track: %d\n", nbsols); fail = define_output_file_with_string ((int)strlen(outfile), outfile); fail = standard_diagonal_homotopy(dim1,dim2); if(fail == 0) { fail = write_standard_target_system(); fail = write_standard_start_system(); } /*fail = tune_continuation_parameters(); printf("\n");*/ fail = retrieve_continuation_parameters(vcp); write_solution_banner_to_defined_output_file(nbsols,n); printf("\nSee the output file %s for results...\n\n", outfile); *expected = dim1+dim2-NUM_VARS; printf("expected dimension=%d\n", *expected); } dimension_broadcast(myid,&n); MPI_Bcast(&nbsols,1,MPI_INT,0,MPI_COMM_WORLD); MPI_Bcast(vcp,34,MPI_DOUBLE,0,MPI_COMM_WORLD); if(myid != 0) { fail = set_continuation_parameters(vcp); } fail = homotopy_broadcast(myid,n); fail = create_homotopy_with_given_gamma (-0.824263733224601,0.566206056211557); if(myid == 0) { send_collect(ws, num_group, stages, cnt_stage, numbprocs, cnt_step, ie, listpointer, TaskCount, NUM_GROUP, cnt_index, update_flag, n1, n2, cd); if(v>3) { printf("after send_collect \n"); printf("%d JOBs in listpointer. \n", length(*listpointer)); printf("%d idles\n", num_idle(*ie)); printf("indexing........ with cnt_index=%d\n", *cnt_index); } } else { if(v>3) printf("node %d will run run_slave \n", myid); fflush; run_slave(myid); } MPI_Barrier(MPI_COMM_WORLD); if(myid == 0) { if(v>3) printf("manager clear data \n"); fail = clear_data(); if(fail>0) printf("manager fail to clear data.\n"); fail = clear_homotopy(); if(fail>0) printf("manager fail to clear homotopy.\n"); fail = solcon_close_solution_input_file(1); if(fail>0) printf("fail to close witness set 1.\n"); fail = solcon_close_solution_input_file(2); if(fail>0) printf("fail to close witness set 2.\n"); fail = solcon_clear_standard_solutions( ); if(fail>0) printf("fail to clear solution container.\n"); fail = close_output_file(); if(fail>0) printf("fail to close output file. \n"); fail = read_witness_set_from_file((int)strlen(outfile), outfile,&n,dim,°); fail = solcon_clear_standard_solutions(); fail = syscon_clear_standard_system( ); printf("end of start_a_diagonal_homotopy\n"); fflush; } else { if(v>3) printf("node %d clear data.\n", myid); fail = clear_data(); if(fail>0) printf("node %d fail to clear data.\n", myid); fail = clear_homotopy(); if(fail>0) printf("node %d fail to clear homotopy.\n", myid); } }
void mpijob_manager::run() { if (is_master()) run_master(); else run_slave(); }