int main (int argc, char **argv) { char *work_dir; init_procs (&argc, &argv); /* Initialization */ num_proc = proc_num (); /* Number of procs */ n_proc = proc_id (); /* Proc id */ work_dir = argv[1]; /* Work directory */ if(argc == 1) work_dir="."; if (n_proc == 0) { fprintf (stdout, "Running on %d processors\n", num_proc); fflush (stdout); } double start = wclock(); driver (work_dir); double finish = wclock(); fini_procs (); /* Wrap up */ fprintf (stdout, "Elapsed time on proc %3d: %le (%le %le)\n", n_proc, finish-start, start, finish); fflush (stdout); return (0); }
/* main process, should never exit.*/ void shell() { int had_print_s = 0; int hello_str[5] = {'h', 'e', 'l', 'l', 'o'}; int hi_str[2] = {'h', 'i'}; int two_str[3] = {'t', 'w', 'o'}; while(1) { // output the 'root@'. printf("root@:"); // get the input. index = -1; enable = 1; input(); // create new process accoding to the command. if (strcmp(hello_str, character, 5)) { create_kernel_proc(hello, 100); } else if (strcmp(hi_str, character, 2)) { create_kernel_proc(hi, 100); } else if (strcmp(two_str, character, 3)) { create_kernel_proc(hi, 100); create_kernel_proc(hello, 100); } else if (index >0) { printf("unknow command\n"); } else { } // wait for the other processess to finished. while(proc_num() != 2) { } } }
int initParallelEnv(){ omp_set_num_threads(THREADS); /* Setup MPI programming environment */ MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &threadSupport); comm = MPI_COMM_WORLD; MPI_Comm_size(comm, &numMPIprocs); MPI_Comm_rank(comm, &myMPIRank); /*Find the number of bytes for an int */ sizeInteger = sizeof(int); /* Find the processor name of each MPI process */ MPI_Get_processor_name(myProcName, &procNameLen); /* Use processor name to create a communicator * across node boundaries. */ setupCommunicators(); /* setup OpenMP programming environment */ #pragma omp parallel shared(numThreads,globalIDarray,myMPIRank) { numThreads = omp_get_num_threads(); myThreadID = omp_get_thread_num(); /* Allocate space for globalIDarray */ #pragma omp single { globalIDarray = (int *)malloc(numThreads * sizeof(int)); } /*calculate the globalID for each thread */ globalIDarray[myThreadID] = (myMPIRank * numThreads) + myThreadID; } MPI_Barrier(comm); gaspi_config_t config; GASPI(config_get(&config)); config.qp_count = THREADS; GASPI(config_set(config)); /* GASPI setup */ GASPI(proc_init(GASPI_BLOCK)); gaspi_rank_t totalRanks; GASPI(proc_num(&totalRanks)); gaspi_rank_t rank; GASPI(proc_rank(&rank)); gaspi_number_t q_num; GASPI(queue_num(&q_num)); assert (q_num == THREADS); GASPI(barrier (GASPI_GROUP_ALL, GASPI_BLOCK)); // ok, we will continue to use the MPI ranks, just make sure GASPI and MPI ranks are identical // this is not guaranteed, so depending on the setup this may fail. assert (totalRanks == numMPIprocs); assert (rank == myMPIRank); /* set parallel info in benchmark report type */ setParallelInfo(numMPIprocs,threadSupport,numThreads); return 0; }