Example #1
0
void
KDA_Comm_set_name(KDA_Neighbor* neighbor)
{
  NOTE_FX(neighbor->id);
  char name[32];
  sprintf(name, "%X", neighbor->id);
  MPI_Comm_set_name(neighbor->node.comm, name);
}
Example #2
0
/**
 * Initialize internal state needed by libcircle. This should be called before
 * any other libcircle API call.
 *
 * @param argc the number of arguments passed into the program.
 * @param argv the vector of arguments passed into the program.
 *
 * @return the rank value of the current process.
 */
__inline__ int32_t CIRCLE_init(int argc, char* argv[], int user_options)
{
    CIRCLE_debug_stream = stdout;
    CIRCLE_debug_level = CIRCLE_LOG_INFO;

    /* initialize callback pointers */
    CIRCLE_INPUT_ST.create_cb      = NULL;
    CIRCLE_INPUT_ST.process_cb     = NULL;
    CIRCLE_INPUT_ST.reduce_init_cb = NULL;
    CIRCLE_INPUT_ST.reduce_op_cb   = NULL;
    CIRCLE_INPUT_ST.reduce_fini_cb = NULL;

    /* initialize user reduction buffer */
    CIRCLE_INPUT_ST.reduce_buf      = NULL;
    CIRCLE_INPUT_ST.reduce_buf_size = 0;

    CIRCLE_set_options(user_options);

    /* determine whether we need to initialize MPI,
     * and remember if we did so we finalize later */
    CIRCLE_must_finalize_mpi = 0;
    int mpi_initialized;

    if(MPI_Initialized(&mpi_initialized) != MPI_SUCCESS) {
        LOG(CIRCLE_LOG_FATAL, "Unable to initialize MPI.");
        return -1;
    }

    if(! mpi_initialized) {
        /* not already initialized, so intialize MPI now */
        if(MPI_Init(&argc, &argv) != MPI_SUCCESS) {
            LOG(CIRCLE_LOG_FATAL, "Unable to initialize MPI.");
            return -1;
        }

        /* remember that we must finalize later */
        CIRCLE_must_finalize_mpi = 1;
    }

    MPI_Comm_dup(MPI_COMM_WORLD, &CIRCLE_INPUT_ST.comm);
    MPI_Comm_set_name(CIRCLE_INPUT_ST.comm, CIRCLE_WORK_COMM_NAME);
    MPI_Comm_rank(CIRCLE_INPUT_ST.comm, &CIRCLE_global_rank);

    CIRCLE_INPUT_ST.queue = CIRCLE_internal_queue_init();

    if(CIRCLE_INPUT_ST.queue == NULL) {
        return -1;
    }
    else {
        return CIRCLE_global_rank;
    }
}
Example #3
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    MPI_Comm comm;
    int cnt, rlen;
    char name[MPI_MAX_OBJECT_NAME], nameout[MPI_MAX_OBJECT_NAME];
    MTest_Init( &argc, &argv );

    /* Check world and self firt */
    nameout[0] = 0;
    MPI_Comm_get_name( MPI_COMM_WORLD, nameout, &rlen );
    if (strcmp(nameout,"MPI_COMM_WORLD")) {
	errs++;
	printf( "Name of comm world is %s, should be MPI_COMM_WORLD\n", 
		nameout );
    }

    nameout[0] = 0;
    MPI_Comm_get_name( MPI_COMM_SELF, nameout, &rlen );
    if (strcmp(nameout,"MPI_COMM_SELF")) {
	errs++;
	printf( "Name of comm self is %s, should be MPI_COMM_SELF\n", 
		nameout );
    }

    /* Now, handle other communicators, including world/self */
    cnt = 0;
    while (MTestGetComm( &comm, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
    
	sprintf( name, "comm-%d", cnt );
	cnt++;
	MPI_Comm_set_name( comm, name );
	nameout[0] = 0;
	MPI_Comm_get_name( comm, nameout, &rlen );
	if (strcmp( name, nameout )) {
	    errs++;
	    printf( "Unexpected name, was %s but should be %s\n",
		    nameout, name );
	}
	
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #4
0
/* This is a program to enable testing and demonstration of the debugger
   interface, particularly in terms of showing message queues.  To use
   this, run with a few processes and attach with the debugger when the
   program stops.  You can change the variable "hold" to 0 to allow the
   program to complete. */
int main( int argc, char *argv[] )
{
    int wsize, wrank;
    int source, dest, i;
    int buf1[10], buf2[10], buf3[10];
    MPI_Request r[3];
    volatile int hold = 1; 
    MPI_Comm dupcomm;
    MPI_Status status;

    MPI_Init( &argc, &argv );

    MPI_Comm_size( MPI_COMM_WORLD, &wsize );
    MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
    
    /* Set the source and dest in a ring */
    source = (wrank + 1) % wsize;
    dest   = (wrank + wsize - 1) % wsize;

    MPI_Comm_dup( MPI_COMM_WORLD, &dupcomm );
    MPI_Comm_set_name( dupcomm, "Dup of comm world" );

    for (i=0; i<3; i++) {
	MPI_Irecv( MPI_BOTTOM, 0, MPI_INT, source, i + 100, MPI_COMM_WORLD, 
		   &r[i] );
    }

    MPI_Send( buf2, 8, MPI_INT, dest, 1, MPI_COMM_WORLD );
    MPI_Send( buf3, 4, MPI_INT, dest, 2, dupcomm );
    
    while (hold) ;

    MPI_Recv( buf1, 10, MPI_INT, source, 1, MPI_COMM_WORLD, &status );
    MPI_Recv( buf1, 10, MPI_INT, source, 1, dupcomm, &status );

    for (i=0; i<3; i++) {
	MPI_Cancel( &r[i] );
    }

    MPI_Comm_free( &dupcomm );
    
    
    MPI_Finalize();
    
    return 0;
}
Example #5
0
void DWorldCreate(double gxs, double gxe, double gys, double gye, DWorld *world) {
	int i;
	struct _DWorld *w;
	w = malloc(sizeof(struct _DWorld));

	w->localSize = 0;
	for (i = 0; i < BSIZE; ++i) {
		w->localcells[i] = NULL;
	}

	int ndims = 2;
	int dims[3] = {0,0,0};
	int wrap[3] = {0,0,0};
	int reorder = 0;
	int size, rank;
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);
	MPI_Comm_size(MPI_COMM_WORLD,&size);
	MPI_Dims_create(size,ndims,dims);
	MPI_Cart_create(MPI_COMM_WORLD,ndims,dims,wrap,reorder,&w->comm);
	MPI_Comm_set_name(w->comm,"My Cart Comm");

	int coor[2];
	MPI_Cart_coords(w->comm,rank,ndims,coor);
	GetNeiRanks(w->comm, dims, coor, w->neiRanks, &w->numNei ); // in petsc: DAGetNeighbors()

	w->gxs = gxs;
	w->gys = gys;
	w->dx = (gxe - gxs) / dims[0];
	w->dy = (gye - gys) / dims[1];
	w->xs = gxs + coor[0] * w->dx;
//	w->xe = gxs + (coor[0]+1) * w->dx;
	w->ys = gys + coor[1] * w->dy;
//	w->ye = gys + (coor[1]+1) * w->dy;

	if( rank == 0 ) {
		printf("Dims: [%d, %d] at (%f, %f)\n", dims[0], dims[1], w->dx, w->dy);
	}

	//TODO: Make buffer size parameter adjustable
	w->bufSize = 50000;
	w->rPack = malloc(w->bufSize);
	w->sPack = malloc(w->bufSize);

	*world = w;
}
//
// Set a name on a communicator:
//
SCM
guile_comm_set_name (SCM world, SCM name)
{
    // extract MPI_Comm, verifies the type:
    MPI_Comm comm = scm_to_comm (world);

    // some communicators have names associated with them:
    char cname[MPI_MAX_OBJECT_NAME];

    // does not null-terninate:
    int len = scm_to_locale_stringbuf (name, cname, MPI_MAX_OBJECT_NAME);

    if ( len > MPI_MAX_OBJECT_NAME )
        len = MPI_MAX_OBJECT_NAME;

    cname[len] = '\0';

    int ierr = MPI_Comm_set_name (comm, cname);
    assert (MPI_SUCCESS==ierr);

    return scm_from_int (len);
}
Example #7
0
int main(int argc, char *argv[])
{
    int provided, wrank, wsize, nmsg, i, tag;
    int *(buf[MAX_TARGETS]), bufsize[MAX_TARGETS];
    MPI_Request r[MAX_TARGETS];
    MPI_Comm commDup, commEven;

    MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided);
    MPI_Comm_rank(MPI_COMM_WORLD, &wrank);
    MPI_Comm_size(MPI_COMM_WORLD, &wsize);

    if (wsize < 4) {
        fprintf(stderr, "This test requires at least 4 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Create several communicators */
    MPI_Comm_dup(MPI_COMM_WORLD, &commDup);
    MPI_Comm_set_name(commDup, "User dup of comm world");

    MPI_Comm_split(MPI_COMM_WORLD, wrank & 0x1, wrank, &commEven);
    if (wrank & 0x1)
        MPI_Comm_free(&commEven);
    else
        MPI_Comm_set_name(commEven, "User split to even ranks");

    /* Create a collection of pending sends and receives
     * We use tags on the sends and receives (when ANY_TAG isn't used)
     * to provide an easy way to check that the proper requests are present.
     * TAG values use fields, in decimal (for easy reading):
     * 0-99: send/recv type:
     * 0 - other
     * 1 - irecv
     * 2 - isend
     * 3 - issend
     * 4 - ibsend
     * 5 - irsend
     * 6 - persistent recv
     * 7 - persistent send
     * 8 - persistent ssend
     * 9 - persistent rsend
     * 10 - persistent bsend
     * 100-999: destination (for send) or source, if receive.  999 = any-source
     * (rank is value/100)
     * 1000-2G: other values
     */
    /* Create the send/receive buffers */
    nmsg = 10;
    for (i = 0; i < nmsg; i++) {
        bufsize[i] = i;
        if (i) {
            buf[i] = (int *) calloc(bufsize[i], sizeof(int));
            if (!buf[i]) {
                fprintf(stderr, "Unable to allocate %d words\n", bufsize[i]);
                MPI_Abort(MPI_COMM_WORLD, 2);
            }
        } else
            buf[i] = 0;
    }

    /* Partial implementation */
    if (wrank == 0) {
        nmsg = 0;
        tag = 2 + 1 * 100;
        MPI_Isend(buf[0], bufsize[0], MPI_INT, 1, tag, MPI_COMM_WORLD, &r[nmsg++]);
        tag = 3 + 2 * 100;
        MPI_Issend(buf[1], bufsize[1], MPI_INT, 2, tag, MPI_COMM_WORLD, &r[nmsg++]);
        tag = 1 + 3 * 100;
        MPI_Irecv(buf[2], bufsize[2], MPI_INT, 3, tag, MPI_COMM_WORLD, &r[nmsg++]);
    } else if (wrank == 1) {
    } else if (wrank == 2) {
    } else if (wrank == 3) {
    }

    /* provide a convenient place to wait */
    MPI_Barrier(MPI_COMM_WORLD);
    printf("Barrier 1 finished\n");

    /* Match up (or cancel) the requests */
    if (wrank == 0) {
        MPI_Waitall(nmsg, r, MPI_STATUSES_IGNORE);
    } else if (wrank == 1) {
        tag = 2 + 1 * 100;
        MPI_Recv(buf[0], bufsize[0], MPI_INT, 0, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    } else if (wrank == 2) {
        tag = 3 + 2 * 100;
        MPI_Recv(buf[1], bufsize[1], MPI_INT, 0, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    } else if (wrank == 3) {
        tag = 1 + 3 * 100;
        MPI_Send(buf[2], bufsize[2], MPI_INT, 0, tag, MPI_COMM_WORLD);
    }

    MPI_Barrier(MPI_COMM_WORLD);
    printf("Barrier 2 finished\n");

    MPI_Comm_free(&commDup);
    if (commEven != MPI_COMM_NULL)
        MPI_Comm_free(&commEven);

    MPI_Finalize();
    return 0;
}