Esempio n. 1
0
int main(int argc, char *argv[])
{
    int rank, nprocs, A[SIZE2], B[SIZE2], i;
    MPI_Win win;
    int errs = 0;

    MTest_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);

    if (nprocs != 2) {
        printf("Run this program with 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD,1);
    }

    if (rank == 0) {
        for (i=0; i<SIZE2; i++) A[i] = B[i] = i;
        MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win);

        for (i=0; i<SIZE1; i++) {
            MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win);
            MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
            MPI_Win_unlock(1, win);
        }

        for (i=0; i<SIZE1; i++) {
            MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win);
            MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win);
            MPI_Win_unlock(1, win);
        }

        MPI_Win_free(&win);

        for (i=0; i<SIZE1; i++)
            if (B[i] != (-4)*(i+SIZE1)) {
                printf("Get Error: B[%d] is %d, should be %d\n", i, B[i], (-4)*(i+SIZE1));
                errs++;
            }
    }

    else {  /* rank=1 */
        for (i=0; i<SIZE2; i++) B[i] = (-4)*i;
        MPI_Win_create(B, SIZE2*sizeof(int), sizeof(int), MPI_INFO_NULL,
                       MPI_COMM_WORLD, &win);

        MPI_Win_free(&win);

        for (i=0; i<SIZE1; i++) {
            if (B[i] != i) {
                printf("Put Error: B[%d] is %d, should be %d\n", i, B[i], i);
                errs++;
            }
        }
    }

    /*    if (rank==0) printf("Done\n");*/
    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Esempio n. 2
0
int main(int argc, char *argv[]) 
{ 
    int n, myid, numprocs, i, ierr; 
    double PI25DT = 3.141592653589793238462643; 
    double mypi, pi, h, sum, x; 
    MPI_Win nwin, piwin; 
 
    MPI_Init(&argc,&argv); 
    MPI_Comm_size(MPI_COMM_WORLD,&numprocs); 
    MPI_Comm_rank(MPI_COMM_WORLD,&myid); 
 
    if (myid == 0) { 
	MPI_Win_create(&n, sizeof(int), 1, MPI_INFO_NULL, 
		       MPI_COMM_WORLD, &nwin); 
	MPI_Win_create(&pi, sizeof(double), 1, MPI_INFO_NULL, 
		       MPI_COMM_WORLD, &piwin);  
    } 
    else { 
	MPI_Win_create(MPI_BOTTOM, 0, 1, MPI_INFO_NULL, 
		       MPI_COMM_WORLD, &nwin); 
	MPI_Win_create(MPI_BOTTOM, 0, 1, MPI_INFO_NULL, 
		       MPI_COMM_WORLD, &piwin); 
    } 
    while (1) { 
        if (myid == 0) { 
            fprintf(stdout, "Enter the number of intervals: (0 quits) ");
	    fflush(stdout); 
            ierr=scanf("%d",&n); 
	    pi = 0.0;			 
        } 
	MPI_Win_fence(0, nwin); 
	if (myid != 0)  
	    MPI_Get(&n, 1, MPI_INT, 0, 0, 1, MPI_INT, nwin); 
	MPI_Win_fence(0, nwin); 
        if (n == 0) 
            break; 
        else { 
            h   = 1.0 / (double) n; 
            sum = 0.0; 
            for (i = myid + 1; i <= n; i += numprocs) { 
                x = h * ((double)i - 0.5); 
                sum += (4.0 / (1.0 + x*x)); 
            } 
            mypi = h * sum; 
	    MPI_Win_fence( 0, piwin); 
	    MPI_Accumulate(&mypi, 1, MPI_DOUBLE, 0, 0, 1, MPI_DOUBLE, 
			   MPI_SUM, piwin); 
	    MPI_Win_fence(0, piwin); 
            if (myid == 0) { 
                fprintf(stdout, "pi is approximately %.16f, Error is %.16f\n", 
                       pi, fabs(pi - PI25DT)); 
		fflush(stdout);
	    }
        } 
    } 
    MPI_Win_free(&nwin); 
    MPI_Win_free(&piwin); 
    MPI_Finalize(); 
    return 0; 
} 
Esempio n. 3
0
void inithash(numb N)
{

  MPI_Init(NULL,NULL);
  status=MPI_Info_create(&info);
  status=MPI_Info_set(info,"same_size","true");

  comm=MPI_COMM_WORLD;
  MPI_Comm_rank(comm,&rank);
  MPI_Comm_size(comm,&size);

  hashlen = 2*N+1;
  nobj = 2*N/size + 1;
  /*    hashtab = calloc(hashlen,sizeof(obj));*/
  hashtab = (numb *)calloc(nobj,sizeof(numb));
  hashcount = (numb *)calloc(nobj,sizeof(numb));
  if (hashtab == NULL || hashcount == NULL) exit(1);
  collisions = 0;
  count=0;


  /*    testArray = (int *) malloc(sizeof(int)*10);*/
  status = MPI_Win_create(hashtab,nobj*sizeof(int),sizeof(numb),MPI_INFO_NULL,comm,&win);
  status = MPI_Win_create(hashcount,nobj*sizeof(int),sizeof(numb),MPI_INFO_NULL,comm,&win2);

}
Esempio n. 4
0
SweptDiscretization2D::SweptDiscretization2D(int n,int substeps,int dataPointSize,int totalConstants,int totalConservedQuantities,int remoteConstantsCount)
{
	if(totalConstants<remoteConstantsCount)
	{
		printf("Remote Constants Cannot be more than the total constants!!\n");
		exit(1);
	}
	this->n = n;
	this->substeps = substeps;
	this->dataPointSize = dataPointSize;
	//this->constants = totalConstants;
	this->constants = totalConstants + totalConservedQuantities;
	this->remoteConstantsCount = remoteConstantsCount;
	this->totalConservedQuantities = totalConservedQuantities;
	this->checkForRemoteCycles = -1;
	this->outputToFile = -1;
	this->conserveCheckFreq = -1;
	this->firstConservedCheck = true;
	this->outputLength = 1;
	panelSize = 2;
	for(int i=n;i>2;i=i-2)panelSize += i;panelSize *= 2;
	int constantsToAdd = panelSize * this->constants;
	panelSize = panelSize * substeps * dataPointSize;
	communicationSize = panelSize + constantsToAdd;
	foundationSize = ((n+2) * (n+2) * substeps * dataPointSize + ((n+2) * (n+2) * this->constants));
	resultArray = NULL;		
	
	MPI_Alloc_mem(foundationSize * sizeof(double), MPI_INFO_NULL, &foundation);
	MPI_Win_create(foundation,foundationSize * sizeof(double),1, MPI_INFO_NULL,MPI_COMM_WORLD, &foundationWindow);
	if(remoteConstantsCount > 0)
	{
		MPI_Alloc_mem(n * n * remoteConstantsCount * sizeof(double), MPI_INFO_NULL, &remoteConstants);
		MPI_Win_create(remoteConstants,n * n * remoteConstantsCount * sizeof(double),1, MPI_INFO_NULL,MPI_COMM_WORLD, &constantsWindow);
		constantsArrayBytes = (unsigned char*) malloc(n * n * remoteConstantsCount * sizeof(double) * pg.mpiSize);		
	}

	if(totalConservedQuantities > 0)
	{
		convervedVariables = new double[totalConservedQuantities];
		memset(convervedVariables,'0',sizeof(double) * totalConservedQuantities);
	}
	staging    = new double[foundationSize];
	northPanel = new double[panelSize + constantsToAdd];
	southPanel = new double[panelSize + constantsToAdd];
	eastPanel  = new double[panelSize + constantsToAdd];
	westPanel  = new double[panelSize + constantsToAdd];

	//Define Swept2D components
	up = new UpPyramid(n,foundation,staging,substeps,dataPointSize,this->constants,totalConservedQuantities);
	dp = new DownPyramid(n,foundation,staging,substeps,dataPointSize,this->constants,totalConservedQuantities);
	hb = new HorizontalBridge(n,foundation,staging,substeps,dataPointSize,this->constants,totalConservedQuantities);
	vb = new VerticalBridge(n,foundation,staging,substeps,dataPointSize,this->constants,totalConservedQuantities);

	firstRun = true;
}
Esempio n. 5
0
void allocate_memory(int rank, char *rbuf, int size, WINDOW type, MPI_Win *win)
{
    MPI_Status  reqstat;

    switch (type){
        case WIN_DYNAMIC:
            MPI_CHECK(MPI_Win_create_dynamic(MPI_INFO_NULL, MPI_COMM_WORLD, win));
            MPI_CHECK(MPI_Win_attach(*win, (void *)rbuf, size));
            MPI_CHECK(MPI_Get_address(rbuf, &sdisp_local));
            if(rank == 0){
                MPI_CHECK(MPI_Send(&sdisp_local, 1, MPI_AINT, 1, 1, MPI_COMM_WORLD));
                MPI_CHECK(MPI_Recv(&sdisp_remote, 1, MPI_AINT, 1, 1, MPI_COMM_WORLD, &reqstat));
            }
            else{
                MPI_CHECK(MPI_Recv(&sdisp_remote, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD, &reqstat));
                MPI_CHECK(MPI_Send(&sdisp_local, 1, MPI_AINT, 0, 1, MPI_COMM_WORLD));
            }
            break;
        case WIN_CREATE:
            MPI_CHECK(MPI_Win_create(rbuf, size, 1, MPI_INFO_NULL, MPI_COMM_WORLD, win));
            break;
        default:
            MPI_CHECK(MPI_Win_allocate(size, 1, MPI_INFO_NULL, MPI_COMM_WORLD, rbuf, win));
            break;
    }
}
Esempio n. 6
0
int main( int argc, char *argv[] )
{
    int buf[2];
    MPI_Win        win;
    MPI_Errhandler newerr;
    int            i;

    MTest_Init( &argc, &argv );

    /* Run this test multiple times to expose storage leaks (we found a leak
       of error handlers with this test) */
    for (i=0;i<1000; i++)  {
	calls = 0;
	
	MPI_Win_create( buf, 2*sizeof(int), sizeof(int), 
			MPI_INFO_NULL, MPI_COMM_WORLD, &win );
	mywin = win;
	
	MPI_Win_create_errhandler( eh, &newerr );
	
	MPI_Win_set_errhandler( win, newerr );
	MPI_Win_call_errhandler( win, MPI_ERR_OTHER );
	MPI_Errhandler_free( &newerr );
	if (calls != 1) {
	    errs++;
	    printf( "Error handler not called\n" );
	}
	MPI_Win_free( &win );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Esempio n. 7
0
asagi::Grid::Error grid::NumaDistStaticGrid::init() {
    unsigned long blockSize = getTotalBlockSize();
    unsigned long masterBlockCount = getLocalBlockCount();
    asagi::Grid::Error error;

    // Create the local cache
    error = NumaLocalCacheGrid::init();
    if (error != asagi::Grid::SUCCESS)
        return error;

    // Distribute the blocks
    error = NumaLocalStaticGrid::init();
    if (error != asagi::Grid::SUCCESS)
        return error;
    
    // Create the mpi window for distributed blocks
    if(pthread_equal(m_threadHandle.getMasterthreadId(), pthread_self())){
        if (MPI_Win_create(m_threadHandle.getStaticPtr(m_threadHandle.getMasterthreadId(),m_id),
                getType().getSize() * blockSize * masterBlockCount,
                getType().getSize(),
                MPI_INFO_NULL,
                getMPICommunicator(),
                &m_threadHandle.mpiWindow) != MPI_SUCCESS)
            return asagi::Grid::MPI_ERROR;
    }
    return asagi::Grid::SUCCESS;
}
Esempio n. 8
0
/** Create a mutex group.  Collective.
  *
  * @param[in] count Number of mutexes to create on the calling process
  * @return          Handle to the mutex group
  */
armcix_mutex_hdl_t ARMCIX_Create_mutexes_hdl(int count, ARMCI_Group *pgroup) {
  int         ierr, i;
  armcix_mutex_hdl_t hdl;

  hdl = malloc(sizeof(struct armcix_mutex_hdl_s));
  ARMCII_Assert(hdl != NULL);

  MPI_Comm_dup(pgroup->comm, &hdl->comm);

  if (count > 0) {
    MPI_Alloc_mem(count*sizeof(long), MPI_INFO_NULL, &hdl->base);
    ARMCII_Assert(hdl->base != NULL);
  } else {
    hdl->base = NULL;
  }

  hdl->count = count;

  // Initialize mutexes to 0
  for (i = 0; i < count; i++)
    hdl->base[i] = 0;

  ierr = MPI_Win_create(hdl->base, count*sizeof(long), sizeof(long) /* displacement size */,
                        MPI_INFO_NULL, hdl->comm, &hdl->window);
  ARMCII_Assert(ierr == MPI_SUCCESS);

  return hdl;
}
Esempio n. 9
0
static void
serverWinCreate(void)
{
  int ranks[1], modelID;
  MPI_Comm commCalc = commInqCommCalc ();
  MPI_Group groupCalc;
  int nProcsModel = commInqNProcsModel ();
  MPI_Info no_locks_info;
  xmpi(MPI_Info_create(&no_locks_info));
  xmpi(MPI_Info_set(no_locks_info, "no_locks", "true"));

  xmpi(MPI_Win_create(MPI_BOTTOM, 0, 1, no_locks_info, commCalc, &getWin));

  /* target group */
  ranks[0] = nProcsModel;
  xmpi ( MPI_Comm_group ( commCalc, &groupCalc ));
  xmpi ( MPI_Group_excl ( groupCalc, 1, ranks, &groupModel ));

  rxWin = xcalloc((size_t)nProcsModel, sizeof (rxWin[0]));
  size_t totalBufferSize = collDefBufferSizes();
  rxWin[0].buffer = (unsigned char*) xmalloc(totalBufferSize);
  size_t ofs = 0;
  for ( modelID = 1; modelID < nProcsModel; modelID++ )
    {
      ofs += rxWin[modelID - 1].size;
      rxWin[modelID].buffer = rxWin[0].buffer + ofs;
    }

  xmpi(MPI_Info_free(&no_locks_info));

  xdebug("%s", "created mpi_win, allocated getBuffer");
}
Esempio n. 10
0
int main (int argc,char *argv[]) {
  int       i;
  double    w[NEL];
  MPI_Aint  win_size,warr_size;
  MPI_Win  *win;

  win_size=sizeof(MPI_Win);
  warr_size=sizeof(MPI_DOUBLE)*NEL;
  
      
  MPI_Init (&argc, &argv);
  
  for(i=0;i<NTIMES;i++) {
      MPI_Alloc_mem(win_size,MPI_INFO_NULL,&win);

      MPI_Win_create(w,warr_size,sizeof(double),MPI_INFO_NULL,MPI_COMM_WORLD,win);
      MPI_Win_free(win);

      MPI_Free_mem(win);
  }

  MPI_Finalize();

  return 0;

}
Esempio n. 11
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size;
    int *buf, bufsize;
    int *result;
    int *rmabuf, rsize, rcount;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Request req;
    MPI_Datatype derived_dtp;

    MTest_Init(&argc, &argv);

    bufsize = 256 * sizeof(int);
    buf = (int *) malloc(bufsize);
    if (!buf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    result = (int *) malloc(bufsize);
    if (!result) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    rcount = 16;
    rsize = rcount * sizeof(int);
    rmabuf = (int *) malloc(rsize);
    if (!rmabuf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Type_contiguous(2, MPI_INT, &derived_dtp);
    MPI_Type_commit(&derived_dtp);

    /* The following loop is used to run through a series of communicators
     * that are subsets of MPI_COMM_WORLD, of size 1 or greater. */
    while (MTestGetIntracommGeneral(&comm, 1, 1)) {
        int count = 0;

        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        MPI_Win_create(buf, bufsize, 2 * sizeof(int), MPI_INFO_NULL, comm, &win);
        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

        /** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
        MPI_Win_fence(0, win);

        TEST_FENCE_OP("Put", MPI_Put(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);

        TEST_FENCE_OP("Get", MPI_Get(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
Esempio n. 12
0
int main(int argc, char *argv[])
{
    MPI_Win win;
    int flag, tmp, rank;
    int base[1024], errs = 0;
    MPI_Request req;

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Win_create(base, 1024 * sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    if (rank == 0) {
        MPI_Win_lock(MPI_LOCK_EXCLUSIVE, 0, 0, win);
        MPI_Barrier(MPI_COMM_WORLD);
        MPI_Barrier(MPI_COMM_WORLD);
        MPI_Win_unlock(0, win);
    } else {
        MPI_Barrier(MPI_COMM_WORLD);
        MPI_Win_lock(MPI_LOCK_EXCLUSIVE, 0, 0, win);
        MPI_Rput(&tmp, 1, MPI_INT, 0, 0, 1, MPI_INT, win, &req);
        MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
        MPI_Barrier(MPI_COMM_WORLD);
        MPI_Win_unlock(0, win);
    }

    MPI_Win_free(&win);

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
Esempio n. 13
0
int main(int argc, char **argv) {
    int       i, rank, nproc, mpi_type_size;
    int       errors = 0, all_errors = 0;
    TYPE_C   *val_ptr, *res_ptr;
    MPI_Win   win;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
 
    MPI_Type_size(TYPE_MPI, &mpi_type_size);
    assert(mpi_type_size == sizeof(TYPE_C));

    val_ptr = malloc(sizeof(TYPE_C)*nproc);
    res_ptr = malloc(sizeof(TYPE_C)*nproc);

    MPI_Win_create(val_ptr, sizeof(TYPE_C)*nproc, sizeof(TYPE_C), MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    /* Test self communication */

    reset_vars(val_ptr, res_ptr, win);

    for (i = 0; i < ITER; i++) {
        TYPE_C one = 1, result = -1;
        MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, win);
        MPI_Fetch_and_op(&one, &result, TYPE_MPI, rank, 0, MPI_SUM, win);
        MPI_Win_unlock(rank, win);
    }

    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, win);
    if ( CMP(val_ptr[0], ITER) ) {
        SQUELCH( printf("%d->%d -- SELF: expected "TYPE_FMT", got "TYPE_FMT"\n", rank, rank, (TYPE_C) ITER, val_ptr[0]); );
        errors++;
    }
Esempio n. 14
0
int main(int argc, char *argv[])
{
    int          rank, nproc;
    int          errors = 0, all_errors = 0;
    int          buf, my_buf;
    MPI_Win      win;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    MPI_Win_create(&buf, sizeof(int), sizeof(int),
                    MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

    MPI_Win_fence(0, win);

    MPI_Win_lock(MPI_LOCK_SHARED, 0, MPI_MODE_NOCHECK, win);
    MPI_Get(&my_buf, 1, MPI_INT, 0, 0, 1, MPI_INT, win);
    MPI_Win_unlock(0, win);

    /* This should fail because the window is no longer in a fence epoch */
    CHECK_ERR(MPI_Get(&my_buf, 1, MPI_INT, 0, 0, 1, MPI_INT, win));

    MPI_Win_fence(0, win);
    MPI_Win_free(&win);

    MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0 && all_errors == 0) printf(" No Errors\n");
    MPI_Finalize();

    return 0;
}
Esempio n. 15
0
int main(int argc, char *argv[])
{
    int rank;
    int errors = 0, all_errors = 0;
    int buf;
    MPI_Win win;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Win_create(&buf, sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

    /* This should fail because the window is not locked. */
    CHECK_ERR(MPI_Win_unlock(0, win));

    MPI_Win_free(&win);

    MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0 && all_errors == 0)
        printf(" No Errors\n");
    MPI_Finalize();

    return 0;
}
Esempio n. 16
0
int main(int argc, char **argv)
{
    int rank, nproc;
    int out_val, i, counter = 0;
    MPI_Win win;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    MPI_Win_create(&counter, sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    for (i = 0; i < NITER; i++) {
        MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
        MPI_Get_accumulate(&acc_val, 1, MPI_INT, &out_val, 1, MPI_INT,
                           rank, 0, 1, MPI_INT, MPI_SUM, win);
        MPI_Win_unlock(rank, win);

        if (out_val != acc_val * i) {
            errs++;
            printf("Error: got %d, expected %d at iter %d\n", out_val, acc_val * i, i);
            break;
        }
    }

    MPI_Win_free(&win);

    MTest_Finalize(errs);

    return MTestReturnValue(errs);
}
Esempio n. 17
0
void initialize(field * temperature1, field * temperature2,
                parallel_data * parallel)
{
    int i, j;

    // Allocate also ghost layers
    temperature1->data =
        malloc_2d(temperature1->nx + 2, temperature1->ny + 2);
    temperature2->data =
        malloc_2d(temperature2->nx + 2, temperature2->ny + 2);

    // Create RMA window. In principle, only borders would be needed
    // but it is simpler to expose the whole array
    MPI_Win_create(&temperature1->data[0][0],
                   (temperature1->nx + 2) * (temperature1->ny +
                                             2) * sizeof(double),
                   sizeof(double), MPI_INFO_NULL, parallel->comm,
                   &temperature1->rma_window);
    MPI_Win_create(&temperature2->data[0][0],
                   (temperature2->nx + 2) * (temperature2->ny +
                                             2) * sizeof(double),
                   sizeof(double), MPI_INFO_NULL, parallel->comm,
                   &temperature2->rma_window);

    // Initialize to zero
    memset(temperature1->data[0], 0.0,
           (temperature1->nx + 2) * (temperature1->ny + 2)
           * sizeof(double));


    for (i = 0; i < temperature1->nx + 2; i++) {
        temperature1->data[i][0] = 30.0;
        temperature1->data[i][temperature1->ny + 1] = -10.0;
    }

    if (parallel->rank == 0) {
        for (j = 0; j < temperature1->ny + 2; j++)
            temperature1->data[0][j] = 15.0;
    } else if (parallel->rank == parallel->size - 1) {
        for (j = 0; j < temperature1->ny + 2; j++)
            temperature1->data[temperature1->nx + 1][j] = -25.0;
    }

    copy_field(temperature1, temperature2);

}
Esempio n. 18
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size;
    int *buf, bufsize;
    int *result;
    int *rmabuf, rsize, rcount;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Request req;

    MTest_Init(&argc, &argv);

    bufsize = 256 * sizeof(int);
    buf = (int *) malloc(bufsize);
    if (!buf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    result = (int *) malloc(bufsize);
    if (!result) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    rcount = 16;
    rsize = rcount * sizeof(int);
    rmabuf = (int *) malloc(rsize);
    if (!rmabuf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, 1, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        MPI_Win_create(buf, bufsize, sizeof(int), MPI_INFO_NULL, comm, &win);
        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

        /** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
        MPI_Win_fence(0, win);

        TEST_FENCE_OP("Put",
                      MPI_Put(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);

        TEST_FENCE_OP("Get",
                      MPI_Get(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);
Esempio n. 19
0
static int _ZMPI_Alltoall_int_proclists_put(int alloc_mem, int nphases, int *sendbuf, int nsprocs, int *sprocs, int *recvbuf, int nrprocs, int *rprocs, MPI_Comm comm)
{
  int i, p, size, rank, *rcounts_put;

  MPI_Win win;


  MPI_Comm_size(comm, &size);
  MPI_Comm_rank(comm, &rank);

  if (alloc_mem) MPI_Alloc_mem(size * sizeof(int), MPI_INFO_NULL, &rcounts_put);
  else rcounts_put = recvbuf;

  if (nrprocs >= 0)
    for (i = 0; i < nrprocs; ++i) rcounts_put[rprocs[i]] = DEFAULT_INT;
  else
    for (i = 0; i < size; ++i) rcounts_put[i] = DEFAULT_INT;

  MPI_Win_create(rcounts_put, size * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win);
  MPI_Win_fence(MPI_MODE_NOSTORE|MPI_MODE_NOPRECEDE, win);

  for (p = 0; p < nphases; ++p)
  {
/*    printf("%d: phase = %d of %d\n", rank, p, nphases);*/
  
    if (rank % nphases == p)
    {
      if (nsprocs >= 0)
      {
        for (i = 0; i < nsprocs; ++i)
          if (sendbuf[sprocs[i]] != DEFAULT_INT) MPI_Put(&sendbuf[sprocs[i]], 1, MPI_INT, sprocs[i], rank, 1, MPI_INT, win);

      } else
      {
        for (i = 0; i < size; ++i)
          if (sendbuf[i] != DEFAULT_INT) MPI_Put(&sendbuf[i], 1, MPI_INT, i, rank, 1, MPI_INT, win);
      }
    }

    if (p < nphases - 1) MPI_Win_fence(0, win);
  }

  MPI_Win_fence(MPI_MODE_NOPUT|MPI_MODE_NOSUCCEED, win);
  MPI_Win_free(&win);

  if (alloc_mem)
  {
    if (nrprocs >= 0)
      for (i = 0; i < nrprocs; ++i) recvbuf[rprocs[i]] = rcounts_put[rprocs[i]];
    else
      for (i = 0; i < size; ++i) recvbuf[i] = rcounts_put[i];

    MPI_Free_mem(rcounts_put);    
  }

  return MPI_SUCCESS;
}
Esempio n. 20
0
scatter_constant* init_scatter_constant(void* array, size_t array_count, size_t elt_size, void* constant, size_t nrequests_max, MPI_Datatype dt) {
  scatter_constant* sc = (scatter_constant*)xmalloc(sizeof(scatter_constant));
  sc->array = array;
  sc->elt_size = elt_size;
  sc->constant = constant;
  sc->datatype = dt;
  sc->valid = 0;
  MPI_Win_create(array, array_count * elt_size, elt_size, MPI_INFO_NULL, MPI_COMM_WORLD, &sc->win);
  return sc;
}
Esempio n. 21
0
MPIMutex::MPIMutex(MPI_Comm _comm) {
	int nproc, rank;
	id = details::mutex_count++;
	comm = _comm;
	MPI_Comm_rank(comm, &rank);
  MPI_Comm_size(comm, &nproc);
	MPI_Alloc_mem(nproc, MPI_INFO_NULL, &lock_vector);
	bzero(lock_vector, nproc);
	MPI_Win_create(lock_vector, nproc, sizeof(byte), MPI_INFO_NULL, comm, &win);
};
Esempio n. 22
0
gather* init_gather(void* input, size_t input_count, size_t elt_size, void* output, size_t output_count, size_t nrequests_max, MPI_Datatype dt) {
  gather* g = (gather*)xmalloc(sizeof(gather));
  g->input = input;
  g->elt_size = elt_size;
  g->output = output;
  g->datatype = dt;
  g->valid = 0;
  MPI_Win_create(input, input_count * elt_size, elt_size, MPI_INFO_NULL, MPI_COMM_WORLD, &g->win);
  return g;
}
Esempio n. 23
0
int main(int argc, char *argv[])
{
    int rank, nprocs, A[SIZE], B[SIZE], i;
    MPI_Comm CommDeuce;
    MPI_Win win;
    int errs = 0;

    MTest_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (nprocs < 2) {
        printf("Run this program with 2 or more processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_split(MPI_COMM_WORLD, (rank < 2), rank, &CommDeuce);

    if (rank < 2) {
        if (rank == 0) {
            for (i = 0; i < SIZE; i++)
                A[i] = B[i] = i;
        }
        else {
            for (i = 0; i < SIZE; i++) {
                A[i] = (-3) * i;
                B[i] = (-4) * i;
            }
        }

        MPI_Win_create(B, SIZE * sizeof(int), sizeof(int), MPI_INFO_NULL, CommDeuce, &win);

        MPI_Win_fence(0, win);

        if (rank == 0) {
            for (i = 0; i < SIZE - 1; i++)
                MPI_Put(A + i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
        }
        else {
            for (i = 0; i < SIZE - 1; i++)
                MPI_Get(A + i, 1, MPI_INT, 0, i, 1, MPI_INT, win);

            MPI_Accumulate(A + i, 1, MPI_INT, 0, i, 1, MPI_INT, MPI_SUM, win);
        }
        MPI_Win_fence(0, win);

        if (rank == 1) {
            for (i = 0; i < SIZE - 1; i++) {
                if (A[i] != B[i]) {
                    SQUELCH(printf("Put/Get Error: A[i]=%d, B[i]=%d\n", A[i], B[i]););
                    errs++;
                }
            }
        }
Esempio n. 24
0
int main(int argc, char *argv[])
{
  MPI_Win win;
  MPI_Init(&argc, &argv);
  MPI_Win_create(MPI_BOTTOM, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win);
  MPI_Win_post(MPI_GROUP_EMPTY, 0, win);
  MPI_Win_wait(win);
  MPI_Win_free(&win);
  MPI_Finalize();
  return 0;
}
Esempio n. 25
0
scatter* init_scatter(void* array, size_t array_count, size_t elt_size, size_t nrequests_max, MPI_Datatype dt) {
  scatter* sc = (scatter*)xmalloc(sizeof(scatter));
  sc->array = array;
  sc->elt_size = elt_size;
  sc->request_count = 0;
  sc->nrequests_max = nrequests_max;
  sc->send_data = xmalloc(nrequests_max * elt_size);
  sc->datatype = dt;
  sc->valid = 0;
  MPI_Win_create(array, array_count * elt_size, elt_size, MPI_INFO_NULL, MPI_COMM_WORLD, &sc->win);
  return sc;
}
Esempio n. 26
0
int main(int argc, char *argv[]) 
{ 
    int rank, destrank, nprocs, *A, *B, i;
    MPI_Comm CommDeuce;
    MPI_Group comm_group, group;
    MPI_Win win;
    int errs = 0;

    MTest_Init(&argc,&argv); 
    MPI_Comm_size(MPI_COMM_WORLD,&nprocs); 
    MPI_Comm_rank(MPI_COMM_WORLD,&rank); 

    if (nprocs < 2) {
        printf("Run this program with 2 or more processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_split(MPI_COMM_WORLD, (rank < 2), rank, &CommDeuce);

    if (rank < 2)
    {

        i = MPI_Alloc_mem(SIZE2 * sizeof(int), MPI_INFO_NULL, &A);
        if (i) {
            printf("Can't allocate memory in test program\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        i = MPI_Alloc_mem(SIZE2 * sizeof(int), MPI_INFO_NULL, &B);
        if (i) {
            printf("Can't allocate memory in test program\n");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        MPI_Comm_group(CommDeuce, &comm_group);

        if (rank == 0) {
            for (i=0; i<SIZE2; i++) A[i] = B[i] = i;
            MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, CommDeuce, &win);
            destrank = 1;
            MPI_Group_incl(comm_group, 1, &destrank, &group);
            MPI_Win_start(group, 0, win);
            for (i=0; i<SIZE1; i++)
                MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
            for (i=0; i<SIZE1; i++)
                MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win);

            MPI_Win_complete(win);

            for (i=0; i<SIZE1; i++)
                if (B[i] != (-4)*(i+SIZE1)) {
                    SQUELCH( printf("Get Error: B[i] is %d, should be %d\n", B[i], (-4)*(i+SIZE1)); );
                    errs++;
                }
Esempio n. 27
0
void test_put(void)
{
    int me, nproc;
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_rank(MPI_COMM_WORLD, &me);
    MPI_Win dst_win;
    double *dst_buf;
    double src_buf[MAXELEMS];
    int i, j;

    MPI_Alloc_mem(sizeof(double) * nproc * MAXELEMS, MPI_INFO_NULL, &dst_buf);
    MPI_Win_create(dst_buf, sizeof(double) * nproc * MAXELEMS, 1, MPI_INFO_NULL, MPI_COMM_WORLD,
                   &dst_win);

    for (i = 0; i < MAXELEMS; i++)
        src_buf[i] = me + 1.0;

    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, me, 0, dst_win);

    for (i = 0; i < nproc * MAXELEMS; i++)
        dst_buf[i] = 0.0;

    MPI_Win_unlock(me, dst_win);

    MPI_Barrier(MPI_COMM_WORLD);

    for (i = 0; i < nproc; i++) {
        int target = i;

        for (j = 0; j < COUNT; j++) {
            if (verbose)
                printf("%2d -> %2d [%2d]\n", me, target, j);
            MPI_Win_lock(MPI_LOCK_EXCLUSIVE, target, 0, dst_win);
            MPI_Put(&src_buf[j], sizeof(double), MPI_BYTE, target,
                    (me * MAXELEMS + j) * sizeof(double), sizeof(double), MPI_BYTE, dst_win);
            MPI_Win_unlock(target, dst_win);
        }

        for (j = 0; j < COUNT; j++) {
            if (verbose)
                printf("%2d <- %2d [%2d]\n", me, target, j);
            MPI_Win_lock(MPI_LOCK_EXCLUSIVE, target, 0, dst_win);
            MPI_Get(&src_buf[j], sizeof(double), MPI_BYTE, target,
                    (me * MAXELEMS + j) * sizeof(double), sizeof(double), MPI_BYTE, dst_win);
            MPI_Win_unlock(target, dst_win);
        }
    }

    MPI_Barrier(MPI_COMM_WORLD);

    MPI_Win_free(&dst_win);
    MPI_Free_mem(dst_buf);
}
Esempio n. 28
0
/**
 * @brief Client reads the data using the MPI_Get function
 *
 * @param len      Number of data structures sent.
 * @param source   MPI rank of the sending process.
 */
int process_mpi_get(
        const int len,
        const int seed,
        const int validate,
        const int source)
{
    int nbytes = len*sizeof(data_t);

    log_level debug_level = xfer_debug_level;
    int rc = 0;
    MPI_Status status;
    MPI_Request req;
    MPI_Win win;
    MPI_Group comm_group, group;
    int rank;
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    log_debug(debug_level, "%d: starting process_mpi_get(len=%d, seed=%d, validate=%d)",
            rank, len, seed, validate);

    // Allocate space for the incoming buffer
    data_array_t array;
    array.data_array_t_len = len;
    array.data_array_t_val = new data_t[len];

    log_debug(debug_level, "%d: creating window", rank);

    // initialize the array
    xfer_init_data_array(seed, &array);

    // Collective call to create a window for the data being transferred (like registering the memory)
    MPI_Win_create(array.data_array_t_val, sizeof(data_t)*len,
            1, MPI_INFO_NULL, MPI_COMM_WORLD, &win);


    MPI_Win_fence(0, win);
    log_debug(debug_level, "Waiting for client to get data");
    MPI_Win_fence(0, win);


    log_debug(debug_level, "Data should be transferred");


    // clean up
    MPI_Win_free(&win);


    delete [] array.data_array_t_val;


    return rc;
}
Esempio n. 29
0
/*
 * Class:     mpi_Win
 * Method:    createWin
 * Signature: (Ljava/nio/Buffer;IIJJ)J
 */
JNIEXPORT jlong JNICALL Java_mpi_Win_createWin(
        JNIEnv *env, jobject jthis, jobject jBase,
        jint size, jint dispUnit, jlong info, jlong comm)
{
    void *base = (*env)->GetDirectBufferAddress(env, jBase);
    MPI_Win win;

    int rc = MPI_Win_create(base, (MPI_Aint)size, dispUnit,
                            (MPI_Info)info, (MPI_Comm)comm, &win);

    ompi_java_exceptionCheck(env, rc);
    return (jlong)win;
}
Esempio n. 30
0
int main(int argc, char **argv)
{
    int i, j, rank, nranks, peer, bufsize, errors;
    double *buffer, *src_buf;
    MPI_Win buf_win;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nranks);

    bufsize = XDIM * YDIM * sizeof(double);
    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &buffer);
    MPI_Alloc_mem(bufsize, MPI_INFO_NULL, &src_buf);

    for (i = 0; i < XDIM * YDIM; i++) {
        *(buffer + i) = 1.0 + rank;
        *(src_buf + i) = 1.0 + rank;
    }

    MPI_Win_create(buffer, bufsize, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &buf_win);

    peer = (rank + 1) % nranks;

    for (i = 0; i < ITERATIONS; i++) {

        MPI_Win_lock(MPI_LOCK_EXCLUSIVE, peer, 0, buf_win);

        for (j = 0; j < YDIM; j++) {
            MPI_Accumulate(src_buf + j * XDIM, XDIM, MPI_DOUBLE, peer,
                           j * XDIM * sizeof(double), XDIM, MPI_DOUBLE, MPI_SUM, buf_win);
        }

        MPI_Win_unlock(peer, buf_win);
    }

    MPI_Barrier(MPI_COMM_WORLD);

    MPI_Win_lock(MPI_LOCK_EXCLUSIVE, rank, 0, buf_win);
    for (i = errors = 0; i < XDIM; i++) {
        for (j = 0; j < YDIM; j++) {
            const double actual = *(buffer + i + j * XDIM);
            const double expected =
                (1.0 + rank) + (1.0 + ((rank + nranks - 1) % nranks)) * (ITERATIONS);
            if (fabs(actual - expected) > 1.0e-10) {
                SQUELCH(printf("%d: Data validation failed at [%d, %d] expected=%f actual=%f\n",
                               rank, j, i, expected, actual););
                errors++;
                fflush(stdout);
            }
        }