/* * Main migration function. * * It manages every step of the migration, and the deletion of old objects * if the migration was a success. */ int migrate(struct cloudmig_ctx* ctx) { int nb_failures = 0; int ret; cloudmig_log(DEBUG_LVL, "Starting migration...\n"); for (int i=0; i < ctx->options.nb_threads; ++i) { ctx->tinfos[i].stop = false; if (pthread_create(&ctx->tinfos[i].thr, NULL, (void*(*)(void*))migrate_worker_loop, &ctx->tinfos[i]) == -1) { PRINTERR("Could not start worker thread %i/%i", i, ctx->options.nb_threads); nb_failures = 1; // Stop all the already-running threads before attempting to join migration_stop(ctx); break ; } } /* * Join all the threads, and cumulate their error counts */ for (int i=0; i < ctx->options.nb_threads; i++) { int errcount; ret = pthread_join(ctx->tinfos[i].thr, (void**)&errcount); if (ret != 0) cloudmig_log(WARN_LVL, "Could not join thread %i: %s.\n", i, strerror(errno)); else nb_failures += errcount; } // In any case, attempt to update the status digest before doing anything else (void)status_digest_upload(ctx->status->digest); // Check if it was the end of the transfer by checking the number of failures if (nb_failures == 0) // 0 == number of failures that occured. { cloudmig_log(INFO_LVL, "Migration finished with success !\n"); if (ctx->tinfos[0].config_flags & DELETE_SOURCE_DATA) delete_source(ctx); } else { PRINTERR("An error occured during the migration." " At least one file could not be transfered\n", 0); goto err; } err: return nb_failures; }
int main(int argc, char *argv[]) { srand(time(NULL)); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &_numprocs); MPI_Comm_rank(MPI_COMM_WORLD, &_rank); if(create_graph() != 0) { printf("This program needs 6 nodes.\n"); return 1; } int running = 1; //i iterator, neighbors - tabs of my neighbors int i, rank, neighbor_count, *neighbors; int participated = 0; int father = -1; int next_recv; MPI_Comm_rank(graph_comm, &rank); MPI_Graph_neighbors_count(graph_comm, rank, &neighbor_count); neighbors = malloc (neighbor_count * sizeof(int)); MPI_Graph_neighbors(graph_comm, rank, neighbor_count, neighbors); printf("node %d : start ! my neighbors : ", _rank); for(i=0; i<neighbor_count;i++){ printf("%d ", neighbors[i]); } printf("\n"); if(_rank == 0){ printf("******************start algorithm***********************\n"); participated = 1; next_recv=pick_recv(neighbors, neighbor_count); printf("Master %d : send tag : forward, to node %d\n", _rank, next_recv); MPI_Send(NULL, 0, MPI_INT, next_recv, FORWARD, graph_comm); } while (running){ MPI_Recv(NULL, 0, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, graph_comm, &status); // this printf is a control to see all messages received. //printf("node %d receive message tag : %s from node %d\n", _rank, (status.MPI_TAG==FORWARD?"forward":"return"), status.MPI_SOURCE); switch((int)status.MPI_TAG){ case FORWARD: neighbor_count = delete_source(neighbors, neighbor_count, status.MPI_SOURCE); if (participated==0&&neighbor_count!=0){ father=status.MPI_SOURCE; participated=1; printf("node %d : my neighbors : ", _rank); for(i=0; i<neighbor_count;i++){ printf("%d ", neighbors[i]); } printf("\n"); next_recv = pick_recv(neighbors, neighbor_count); printf("node %d : send tag : forward, to node %d\n", _rank, next_recv); MPI_Send(NULL, 0, MPI_INT, next_recv, FORWARD, graph_comm); } else { printf("node %d : send tag : return, to node %d\n", _rank, status.MPI_SOURCE); MPI_Send(NULL, 0, MPI_INT, status.MPI_SOURCE, END_RETURN, graph_comm); if(participated==0){ running=0; } } break; case END_RETURN: neighbor_count = delete_source(neighbors, neighbor_count, status.MPI_SOURCE); if (neighbor_count==0){ if(father==-1){ printf("Master : End of programm !\n"); } else { printf("node %d : send tag : return, to node %d\n", _rank, father); MPI_Send(NULL, 0, MPI_INT, father, END_RETURN, graph_comm); } participated=0; running=0; } else { next_recv = pick_recv(neighbors, neighbor_count); printf("node %d : send tag : forward, to node %d\n", _rank, next_recv); MPI_Send(NULL, 0, MPI_INT, next_recv, FORWARD, graph_comm); } break; } } printf("node %d : end of work\n", _rank); MPI_Finalize(); }