Exemple #1
0
static int choice_worker(void)
{
    static int last_worker = 0;

    int least = 0;
    uint64_t least_num = UINT64_MAX;

    int i;
    for (i = 1; i <= settings.worker_proc_num; ++i)
    {
        int worker_id = (last_worker - 1 + i) % settings.worker_proc_num + 1;
        queue_t *queue = &settings.workers[worker_id].queue;
        uint64_t num = queue_num(queue);

        if (num == 0)
        {
            last_worker = worker_id;

            return worker_id;
        }

        if (num < least_num)
        {
            least = worker_id;
            least_num = num;
        }
    }

    last_worker = least;

    return least;
}
int initParallelEnv(){
    omp_set_num_threads(THREADS);

    /* Setup MPI programming environment */
	MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, &threadSupport);

	comm = MPI_COMM_WORLD;
	MPI_Comm_size(comm, &numMPIprocs);
	MPI_Comm_rank(comm, &myMPIRank);

	/*Find the number of bytes for an int */
	sizeInteger = sizeof(int);

	/* Find the processor name of each MPI process */
    MPI_Get_processor_name(myProcName, &procNameLen);

	/* Use processor name to create a communicator
	 * across node boundaries.
	 */
	setupCommunicators();

	/* setup OpenMP programming environment */
    #pragma omp parallel shared(numThreads,globalIDarray,myMPIRank)
   {
	   numThreads = omp_get_num_threads();
	   myThreadID = omp_get_thread_num();

	   /* Allocate space for globalIDarray */
        #pragma omp single
       {
           globalIDarray = (int *)malloc(numThreads * sizeof(int));
       }

	   /*calculate the globalID for each thread */
	   globalIDarray[myThreadID] = (myMPIRank * numThreads) + myThreadID;
   }
    MPI_Barrier(comm);

    gaspi_config_t config;
    GASPI(config_get(&config));
    config.qp_count = THREADS;
    GASPI(config_set(config));
    /* GASPI setup */
    GASPI(proc_init(GASPI_BLOCK));

    gaspi_rank_t totalRanks;
    GASPI(proc_num(&totalRanks));

    gaspi_rank_t rank;
    GASPI(proc_rank(&rank));

    gaspi_number_t q_num;
    GASPI(queue_num(&q_num));
    assert (q_num == THREADS);

    GASPI(barrier (GASPI_GROUP_ALL, GASPI_BLOCK));
    // ok, we will continue to use the MPI ranks, just make sure GASPI and MPI ranks are identical
    // this is not guaranteed, so depending on the setup this may fail.
    assert (totalRanks == numMPIprocs);
    assert (rank == myMPIRank);

   /* set parallel info in benchmark report type */
   setParallelInfo(numMPIprocs,threadSupport,numThreads);

return 0;
}