Beispiel #1
0
void ADIOI_PVFS2_IwriteContig(ADIO_File fd, void *buf, int count, 
                MPI_Datatype datatype, int file_ptr_type,
                ADIO_Offset offset, ADIO_Request *request, int *error_code)  
{
    ADIO_Status status;
    int len, typesize;

    *request = ADIOI_Malloc_request();
    (*request)->optype = ADIOI_WRITE;
    (*request)->fd = fd;
    (*request)->queued = 0;
    (*request)->datatype = datatype;

    MPI_Type_size(datatype, &typesize);
    len = count * typesize;
    ADIOI_PVFS2_WriteContig(fd, buf, len, MPI_BYTE, file_ptr_type, offset, &status,
		    error_code);  

#ifdef HAVE_STATUS_SET_BYTES
    if (*error_code == MPI_SUCCESS) {
	MPI_Get_elements(&status, MPI_BYTE, &len);
	(*request)->nbytes = len;
    }
#endif
    fd->async_count++;
}
Beispiel #2
0
/* Generic implementation of IreadContig calls the blocking ReadContig
 * immediately.
 */
void ADIOI_FAKE_IreadContig(ADIO_File fd, void *buf, int count,
                            MPI_Datatype datatype, int file_ptr_type,
                            ADIO_Offset offset, ADIO_Request *request,
                            int *error_code)
{
    ADIO_Status status;
    int len, typesize;

    *request = ADIOI_Malloc_request();
    (*request)->optype = ADIOI_READ;
    (*request)->fd = fd;
    (*request)->queued = 0;
    (*request)->datatype = datatype;

    MPI_Type_size(datatype, &typesize);
    len = count * typesize;

    /* Call the blocking function.  It will create an error code
     * if necessary.
     */
    ADIO_ReadContig(fd, buf, len, MPI_BYTE, file_ptr_type, offset,
                    &status, error_code);

    fd->async_count++;

#ifdef HAVE_STATUS_SET_BYTES
    if (*error_code == MPI_SUCCESS) {
        MPI_Get_elements(&status, MPI_BYTE, &len);
        (*request)->nbytes = len;
    }
#endif
}
void ADIOI_PVFS_IreadContig(ADIO_File fd, void *buf, int count, 
                MPI_Datatype datatype, int file_ptr_type,
                ADIO_Offset offset, ADIO_Request *request, int *error_code)  
{
    ADIO_Status status;
    int len, typesize;

/* PVFS does not support nonblocking I/O. Therefore, use blocking I/O */

    *request = ADIOI_Malloc_request();
    (*request)->optype = ADIOI_READ;
    (*request)->fd = fd;
    (*request)->queued = 0;
    (*request)->datatype = datatype;

    MPI_Type_size(datatype, &typesize);
    len = count * typesize;
    ADIOI_PVFS_ReadContig(fd, buf, len, MPI_BYTE, file_ptr_type, offset, &status,
		    error_code);  

#ifdef HAVE_STATUS_SET_BYTES
    if (*error_code == MPI_SUCCESS) {
	MPI_Get_elements(&status, MPI_BYTE, &len);
	(*request)->nbytes = len;
    }
#endif
    fd->async_count++;
}
Beispiel #4
0
void ADIOI_SCI_IreadContig(ADIO_File fd, void *buf, int count, 
                MPI_Datatype datatype, int file_ptr_type,
                ADIO_Offset offset, ADIO_Request *request, int *error_code)  
{
    int len, typesize;
#ifdef NO_AIO
    ADIO_Status status;
#else
    int err=-1;
#ifndef PRINT_ERR_MSG
    static char myname[] = "ADIOI_SCI_IREADCONTIG";
#endif
#endif

    (*request) = ADIOI_Malloc_request();
    (*request)->optype = ADIOI_READ;
    (*request)->fd = fd;
    (*request)->datatype = datatype;

    MPI_Type_size(datatype, &typesize);
    len = count * typesize;

#ifdef NO_AIO
    /* HP, FreeBSD, Linux */
    /* no support for nonblocking I/O. Use blocking I/O. */

    ADIOI_SCI_ReadContig(fd, buf, len, MPI_BYTE, file_ptr_type, offset, 
			 &status, error_code);  
    (*request)->queued = 0;
#ifdef HAVE_STATUS_SET_BYTES
    if (*error_code == MPI_SUCCESS) {
	MPI_Get_elements(&status, MPI_BYTE, &len);
	(*request)->nbytes = len;
    }
#endif

#else
    if (file_ptr_type == ADIO_INDIVIDUAL) offset = fd->fp_ind;
    err = ADIOI_SCI_aio(fd, buf, len, offset, 0, &((*request)->handle));
    if (file_ptr_type == ADIO_INDIVIDUAL) fd->fp_ind += len;

    (*request)->queued = 1;
    ADIOI_Add_req_to_list(request);

#ifdef PRINT_ERR_MSG
    *error_code = (err == -1) ? MPI_ERR_UNKNOWN : MPI_SUCCESS;
#else
    if (err == -1) {
	*error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	ADIOI_Error(fd, *error_code, myname);	    
    }
    else *error_code = MPI_SUCCESS;
#endif
#endif

    fd->fp_sys_posn = -1;   /* set it to null. */
    fd->async_count++;
}
/* send a { double, int, double} tuple and receive as a pair of
 * MPI_DOUBLE_INTs. this should (a) be valid, and (b) result in an
 * element count of 3.
 */
int double_int_test(void)
{
    int err, errs = 0, count;

    struct {
        double a;
        int b;
        double c;
    } foo;
    struct {
        double a;
        int b;
        double c;
        int d;
    } bar;

    int blks[3] = { 1, 1, 1 };
    MPI_Aint disps[3] = { 0, 0, 0 };
    MPI_Datatype types[3] = { MPI_DOUBLE, MPI_INT, MPI_DOUBLE };
    MPI_Datatype stype;

    MPI_Status recvstatus;

    /* fill in disps[1..2] with appropriate offset */
    disps[1] = (MPI_Aint) ((char *) &foo.b - (char *) &foo.a);
    disps[2] = (MPI_Aint) ((char *) &foo.c - (char *) &foo.a);

    MPI_Type_create_struct(3, blks, disps, types, &stype);
    MPI_Type_commit(&stype);

    err = MPI_Sendrecv((const void *) &foo, 1, stype, 0, 0,
                       (void *) &bar, 2, MPI_DOUBLE_INT, 0, 0, MPI_COMM_SELF, &recvstatus);
    if (err != MPI_SUCCESS) {
        errs++;
        if (verbose)
            fprintf(stderr, "MPI_Sendrecv returned error (%d)\n", err);
        return errs;
    }

    err = MPI_Get_elements(&recvstatus, MPI_DOUBLE_INT, &count);
    if (err != MPI_SUCCESS) {
        errs++;
        if (verbose)
            fprintf(stderr, "MPI_Get_elements returned error (%d)\n", err);
    }

    if (count != 3) {
        errs++;
        if (verbose)
            fprintf(stderr, "MPI_Get_elements returned count of %d, should be 3\n", count);
    }

    MPI_Type_free(&stype);

    return errs;
}
Beispiel #6
0
JNIEXPORT jint JNICALL Java_mpi_Status_getElements(
        JNIEnv *env, jobject jthis, jint source, jint tag,
        jint error, jint cancelled, jlong ucount, jlong jType)
{
    int count;
    MPI_Status stat;
    getStatus(&stat, source, tag, error, cancelled, ucount);
    MPI_Datatype datatype = (MPI_Datatype)jType;
    int rc = MPI_Get_elements(&stat, datatype, &count);
    ompi_java_exceptionCheck(env, rc);
    return count;
}
Beispiel #7
0
task_t *wait_completion(task_list_t *tlist, proc_list_t *plist,
			req_list_t *rlist) {
  task_t *ptr;
  MPI_Status status;
  int index;

  MPI_Waitany(rlist->nreqs, rlist->reqs, &index, &status);
  if(index!=MPI_UNDEFINED) {
    int nelem;
    MPI_Get_elements(&status, MPI_INT, &nelem);
    assert(nelem == 2);
    ptr = rlist->tasks[index];
    assert(ptr->v[1] == ptr->tskid);
      
    req_list_return_as_idle(rlist, index);
    return ptr;
  }
  return NULL;
}
Beispiel #8
0
int main( int argc, char *argv[] )
{
    int i, count, err, errs = 0;
    MPI_Datatype type_ia;
    char omessage[256], imessage[256];
    MPI_Status recvstatus, sendstatus;
    MPI_Request request;
    
    MTest_Init( &argc, &argv );
    
    MPI_Type_contiguous(4,MPI_DOUBLE_INT,&type_ia);
    MPI_Type_commit(&type_ia);
    for (i=1;i<256;i++) omessage[i] = i;
    
    MPI_Isend(omessage, 33, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &request);
    MPI_Recv(imessage, 33, MPI_CHAR, 0, 0, MPI_COMM_WORLD, &recvstatus);
    MPI_Wait(&request, &sendstatus);
    
    /* In versions of MPICH2 through 1.0.7, this call would segfault on
       some platforms (at least on BlueGene).  On others, it would fail
       to report an error.  
       It should return an error (it is invalid to use a different type 
       here than was used in the recv) or at least return MPI_UNDEFINED.
       
       If it returns MPI_UNDEFINED as the count, that will often be
       acceptable.
    */
    MPI_Errhandler_set( MPI_COMM_WORLD, MPI_ERRORS_RETURN );
    err = MPI_Get_elements(&recvstatus, type_ia, &count);
    if (err == MPI_SUCCESS && count != MPI_UNDEFINED) {
	errs++;
	printf( "MPI_Get_elements did not report an error\n" );
    }

    MPI_Type_free( &type_ia );

    MTest_Finalize( errs );
    MPI_Finalize();

  return 0;
}
Beispiel #9
0
main( int argc, char **argv )
{
    int myrank, commsize;
    MPI_File outfile;
    MPI_Status status;
    int nbytes, myarray[array_size], mode, i;

    /* initialize MPI */
    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
    MPI_Comm_size( MPI_COMM_WORLD, &commsize );

    /* initialize array */
    for (i=0; i < array_size; i++) {
      myarray[i] = 10*myrank*array_size+i;
    }

    /* open file */
    mode = MPI_MODE_CREATE | MPI_MODE_WRONLY;

    MPI_File_open( MPI_COMM_WORLD, filename, mode, MPI_INFO_NULL, &outfile );

    /* set file view */
    MPI_File_set_view( outfile, myrank*array_size*sizeof(MPI_INT), MPI_INT, MPI_INT, "native", MPI_INFO_NULL );

    /*  write buffer to file */
    MPI_File_write( outfile, &myarray[0], array_size, MPI_INT, &status );

    /* print out number of bytes written */
    MPI_Get_elements( &status, MPI_CHAR, &nbytes );
    printf( "TASK %d ====== number of bytes written = %d ======\n", myrank, nbytes );

    /* close file */
    MPI_File_close( &outfile );

    /* finalize MPI */
    MPI_Finalize();
}
Beispiel #10
0
/* ADIOI_TESTFS_IreadContig()
 *
 * Implemented by immediately calling ReadContig()
 */
void ADIOI_TESTFS_IreadContig(ADIO_File fd, void *buf, int count, 
			      MPI_Datatype datatype, int file_ptr_type,
			      ADIO_Offset offset, ADIO_Request *request, int
			      *error_code)
{
    ADIO_Status status;
    int myrank, nprocs, typesize, len;

    *error_code = MPI_SUCCESS;

    *request = ADIOI_Malloc_request();
    (*request)->optype = ADIOI_WRITE;
    (*request)->fd = fd;
    (*request)->queued = 0;
    (*request)->datatype = datatype;

    MPI_Type_size(datatype, &typesize);
    MPI_Comm_size(fd->comm, &nprocs);
    MPI_Comm_rank(fd->comm, &myrank);
    FPRINTF(stdout, "[%d/%d] ADIOI_TESTFS_IreadContig called on %s\n", 
	    myrank, nprocs, fd->filename);
    FPRINTF(stdout, "[%d/%d]    calling ADIOI_TESTFS_ReadContig\n", 
	    myrank, nprocs);

    len = count * typesize;
    ADIOI_TESTFS_ReadContig(fd, buf, len, MPI_BYTE, file_ptr_type, 
			    offset, &status, error_code);

#ifdef HAVE_STATUS_SET_BYTES
    if (*error_code == MPI_SUCCESS) {
	MPI_Get_elements(&status, MPI_BYTE, &len);
	(*request)->nbytes = len;
    }
#endif
    fd->async_count++;
}
Beispiel #11
0
task_t *poll_completion(task_list_t *tlist, proc_list_t *plist,
			req_list_t *rlist) {
  task_t *ptr;
  MPI_Status status;
  int flag, id;

#if 0
  for(id=rlist->running_id; id!=ID_NULL; id=rlist->next_ids[id]) {
    MPI_Test(&rlist->reqs[id], &flag, &status);

    if(flag) {
      int nelem;
      MPI_Get_elements(&status, MPI_INT, &nelem);
      assert(nelem == 2);
      ptr = rlist->tasks[id];
      assert(ptr->v[1] == ptr->tskid);
      return ptr;
    }    
  }
#else
  assert(0); /*No implementation as yet of polling with this data structure*/
#endif
  return NULL;
}
Beispiel #12
0
int main(int argc, char *argv[])  
{
	int numtasks, rank, rc, tag=1;  
	MPI_Status status;

	MPI_Init(&argc,&argv);
	MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);

	int vec[SIZE];
	MPI_Datatype MPI_ONE_FIFTH;
	int stride = 5;
	int blen = 1;
	int dcount = SIZE / stride;
	//int MPI_Type_vector(int count, int blocklength, int stride, MPI_Datatype oldtype, MPI_Datatype *newtype)
	rc = MPI_Type_vector(dcount, blen, stride, MPI_INT, &MPI_ONE_FIFTH);
	//int MPI_Type_commit(MPI_Datatype *datatype)
	rc = MPI_Type_commit(&MPI_ONE_FIFTH);
	
	if (rank == 0) {
		int dest = 1;

		int i = 0;
		for (; i < SIZE; ++i) {
			vec[i] = i;
		}

		rc = MPI_Send(vec, 1, MPI_ONE_FIFTH, dest, tag, MPI_COMM_WORLD);
	} else if (rank == 1) {
		int source = 0;

		int i = 0;
		for (; i < SIZE; ++i) {
			vec[i] = -i;
		}

		rc = MPI_Recv(vec, 1, MPI_ONE_FIFTH, source, tag, MPI_COMM_WORLD, &status);
	}

	if (rank == 1) {
		int num_element;
		int num_raw_element; 
		MPI_Get_count(&status, MPI_ONE_FIFTH, &num_element);
		//int MPI_Get_elements(const MPI_Status *status, MPI_Datatype datatype, int *count)
		MPI_Get_elements(&status, MPI_INT, &num_raw_element);
		printf("get %d MPI_ONE_FIFTH\n", num_element);
		printf("get %d MPI_INT\n", num_raw_element);

		int i = 0;
		for (; i < SIZE; ++i) {
			printf("loc %d : %d\n", i, vec[i]);
		}

		MPI_Aint extent;
		//int MPI_Type_extent(MPI_Datatype datatype, MPI_Aint *extent)
		MPI_Type_extent(MPI_ONE_FIFTH, &extent);
		printf("extent %zu\n", extent);
	}
	
	//int MPI_Type_free(MPI_Datatype *datatype)
	MPI_Type_free(&MPI_ONE_FIFTH);
	MPI_Finalize();
	return 0;
}
Beispiel #13
0
FORT_DLL_SPEC void FORT_CALL mpi_get_elements_ ( MPI_Fint *v1, MPI_Fint *v2, MPI_Fint *v3, MPI_Fint *ierr ){
    *ierr = MPI_Get_elements( (MPI_Status *)(v1), (MPI_Datatype)(*v2), v3 );
}
Beispiel #14
0
int main(int argc, char *argv[])
{
    int errs = 0;
    MPI_Datatype outtype, oldtypes[2];
    MPI_Aint offsets[2];
    int blklens[2];
    MPI_Comm comm;
    int size, rank, src, dest, tag;

    MTest_Init(&argc, &argv);

    comm = MPI_COMM_WORLD;

    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);

    if (size < 2) {
        errs++;
        printf("This test requires at least 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    src = 0;
    dest = 1;

    if (rank == src) {
        int buf[128], position, cnt;
        MTEST_VG_MEM_INIT(buf, 128 * sizeof(buf[0]));
        /* sender */

        /* Create a datatype and send it (multiple of sizeof(int)) */
        /* Create a send struct type */
        oldtypes[0] = MPI_INT;
        oldtypes[1] = MPI_CHAR;
        blklens[0] = 1;
        blklens[1] = 4 * sizeof(int);
        offsets[0] = 0;
        offsets[1] = sizeof(int);
        MPI_Type_struct(2, blklens, offsets, oldtypes, &outtype);
        MPI_Type_commit(&outtype);

        buf[0] = 4 * sizeof(int);
        /* printf("About to send to %d\n", dest); */
        MPI_Send(buf, 1, outtype, dest, 0, comm);
        MPI_Type_free(&outtype);

        /* Create a datatype and send it (not a multiple of sizeof(int)) */
        /* Create a send struct type */
        oldtypes[0] = MPI_INT;
        oldtypes[1] = MPI_CHAR;
        blklens[0] = 1;
        blklens[1] = 4 * sizeof(int) + 1;
        offsets[0] = 0;
        offsets[1] = sizeof(int);
        MPI_Type_struct(2, blklens, offsets, oldtypes, &outtype);
        MPI_Type_commit(&outtype);

        buf[0] = 4 * sizeof(int) + 1;
        MPI_Send(buf, 1, outtype, dest, 1, comm);
        MPI_Type_free(&outtype);

        /* Pack data and send as packed */
        position = 0;
        cnt = 7;
        MPI_Pack(&cnt, 1, MPI_INT, buf, 128 * sizeof(int), &position, comm);
        MPI_Pack((void *) "message", 7, MPI_CHAR, buf, 128 * sizeof(int), &position, comm);
        MPI_Send(buf, position, MPI_PACKED, dest, 2, comm);
    }
    else if (rank == dest) {
        MPI_Status status;
        int buf[128], i, elms, count;

        /* Receiver */
        /* Create a receive struct type */
        oldtypes[0] = MPI_INT;
        oldtypes[1] = MPI_CHAR;
        blklens[0] = 1;
        blklens[1] = 256;
        offsets[0] = 0;
        offsets[1] = sizeof(int);
        MPI_Type_struct(2, blklens, offsets, oldtypes, &outtype);
        MPI_Type_commit(&outtype);

        for (i = 0; i < 3; i++) {
            tag = i;
            /* printf("about to receive tag %d from %d\n", i, src); */
            MPI_Recv(buf, 1, outtype, src, tag, comm, &status);
            MPI_Get_elements(&status, outtype, &elms);
            if (elms != buf[0] + 1) {
                errs++;
                printf("For test %d, Get elements gave %d but should be %d\n", i, elms, buf[0] + 1);
            }
            MPI_Get_count(&status, outtype, &count);
            if (count != MPI_UNDEFINED) {
                errs++;
                printf("For partial send, Get_count did not return MPI_UNDEFINED\n");
            }
        }
        MPI_Type_free(&outtype);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;

}
Beispiel #15
0
/* tests with an hindexed type with all zero length blocks */
int hindexed_zerotype_test(void)
{
    int err, errs = 0;
    int count, elements;
    MPI_Datatype mytype;
    MPI_Request request;
    MPI_Status status;

    int blks[]       = { 0, 0, 0 };
    MPI_Aint disps[] = { 0, 4, 16 };

    err = MPI_Type_hindexed(3, blks, disps, MPI_INT, &mytype);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Type_hindexed returned error\n");
	}
    }

    MPI_Type_commit(&mytype);

    err = MPI_Irecv(NULL, 2, mytype, 0, 0, MPI_COMM_SELF, &request);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Irecv returned error\n");
	}
    }

    err = MPI_Send(NULL, 1, mytype, 0, 0, MPI_COMM_SELF);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Send returned error\n");
	}
    }

    err = MPI_Wait(&request, &status);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Wait returned error\n");
	}
    }

    /* verify count and elements */
    err = MPI_Get_count(&status, mytype, &count);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Get_count returned error\n");
	}
    }
    if (count != 0) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "count = %d; should be 0\n", count);
	}
    }

    err = MPI_Get_elements(&status, mytype, &elements);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Get_elements returned error\n");
	}
    }
    if (elements != 0) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "elements = %d; should be 0\n", elements);
	}
    }

   // MPI_Type_free(&mytype);

    return errs;
}
Beispiel #16
0
/* tests a short receive into a sparse hindexed type with a zero
 * length block in it.  sort of eccentric, but we've got the basic
 * stuff covered with other tests.
 */
int hindexed_sparsetype_test(void)
{
    int err, errs = 0;
    int i, count, elements;
    MPI_Datatype mytype;
    MPI_Request request;
    MPI_Status status;

    int sendbuf[6]   = { 1, 2, 3, 4, 5, 6 };
    int recvbuf[16];
    int correct[16] = { 1, -2, 4, -4, 2, 3, 5, -8, -9, -10, 6,
			-12, -13, -14, -15, -16 };

    int blks[]       = { 1, 0,             2,             1 };
    MPI_Aint disps[] = { 0, 1*sizeof(int), 4*sizeof(int), 2*sizeof(int) };

    err = MPI_Type_hindexed(4, blks, disps, MPI_INT, &mytype);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Type_hindexed returned error\n");
	}
    }

    MPI_Type_commit(&mytype);

    for (i=0; i < 16; i++) recvbuf[i] = -(i+1);

    err = MPI_Irecv(recvbuf, 2, mytype, 0, 0, MPI_COMM_SELF, &request);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Irecv returned error\n");
	}
    }

    err = MPI_Send(sendbuf, 6, MPI_INT, 0, 0, MPI_COMM_SELF);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Send returned error\n");
	}
    }

    err = MPI_Wait(&request, &status);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Wait returned error\n");
	}
    }
 
    /* verify data */
    for (i=0; i < 16; i++) {
	if (recvbuf[i] != correct[i]) {
	    errs++;
	    if (verbose) {
		fprintf(stderr, "recvbuf[%d] = %d; should be %d\n",
			i, recvbuf[i], correct[i]);
	    }
	}
    }

    /* verify count and elements */
    err = MPI_Get_count(&status, mytype, &count);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Get_count returned error\n");
	}
    }
    if (count != MPI_UNDEFINED) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "count = %d; should be MPI_UNDEFINED (%d)\n",
		    count, MPI_UNDEFINED);
	}
    }

    err = MPI_Get_elements(&status, mytype, &elements);
    if (err != MPI_SUCCESS) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "MPI_Get_elements returned error\n");
	}
    }
    if (elements != 6) {
	errs++;
	if (verbose) {
	    fprintf(stderr, "elements = %d; should be 6\n", elements);
	}
    }

//    MPI_Type_free(&mytype);

    return errs;
}
Beispiel #17
0
void ADIOI_UFS_IwriteContig(ADIO_File fd, void *buf, int count, 
                MPI_Datatype datatype, int file_ptr_type,
                ADIO_Offset offset, ADIO_Request *request, int *error_code)  
{
    int len, typesize;
#ifdef NO_AIO
    ADIO_Status status;
#else
    int err=-1;
#if defined(MPICH2) || !defined(PRINT_ERR_MSG)
    static char myname[] = "ADIOI_UFS_IWRITECONTIG";
#endif
#endif

    *request = ADIOI_Malloc_request();
    (*request)->optype = ADIOI_WRITE;
    (*request)->fd = fd;
    (*request)->datatype = datatype;

    MPI_Type_size(datatype, &typesize);
    len = count * typesize;

#ifdef NO_AIO
    /* HP, FreeBSD, Linux */
    /* no support for nonblocking I/O. Use blocking I/O. */

    ADIOI_UFS_WriteContig(fd, buf, len, MPI_BYTE, file_ptr_type, offset, 
			  &status, error_code);  
    (*request)->queued = 0;
#ifdef HAVE_STATUS_SET_BYTES
    if (*error_code == MPI_SUCCESS) {
	MPI_Get_elements(&status, MPI_BYTE, &len);
	(*request)->nbytes = len;
    }
#endif

#else
    if (file_ptr_type == ADIO_INDIVIDUAL) offset = fd->fp_ind;
    err = ADIOI_UFS_aio(fd, buf, len, offset, 1, &((*request)->handle));
    if (file_ptr_type == ADIO_INDIVIDUAL) fd->fp_ind += len;

    (*request)->queued = 1;
    ADIOI_Add_req_to_list(request);

    if (err == -1) {
#ifdef MPICH2
	*error_code = MPIR_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE, myname, __LINE__, MPI_ERR_IO, "**io",
	    "**io %s", strerror(errno));
	return;
#elif defined(PRINT_ERR_MSG)
			*error_code = MPI_ERR_UNKNOWN;
#else /* MPICH-1 */
	*error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	ADIOI_Error(fd, *error_code, myname);	    
#endif
    }
    else *error_code = MPI_SUCCESS;
#endif /* NO_AIO */

    fd->fp_sys_posn = -1;   /* set it to null. */
    fd->async_count++;
}
Beispiel #18
0
/** Client code. Receives signals from the server to process a task or
    terminate processing and return*/
void client_code() {
  int *buf = NULL, buf_size;
  int flag;
  MPI_Status status;
  Integer p_handle;
  int ntsks=0, src;
  const char *pname = "client_code";
  double e1, e2, e3, e4, e5, f1, f2, f3, f4,f5,f6,f7,f8;
  double t_prepar=0, t_wait_start=0, t_grp=0,t_sync=0,t_compl=0,t_dest=0;
/*   double get_doit_time_(); */
/*   double get_esp_time_(); */
/*   double get_gm_crt_time_(); */
/*   double get_chrg_set_time_(); */
/*   double get_gm_push_time_(); */
  const int server = GA_Pgroup_absolute_id(ga_pgroup_get_default_(),SVR);
  const int default_grp = ga_pgroup_get_default_();; /*default GA group for this dispatcher instance*/
  const int world_me = GA_Nodeid();
  const int nproc = GA_Nnodes();

  t_ptask = 0.0;
/*   fprintf(stderr, "%d: 0 server=%d %s\n", GA_Nodeid(), server,pname); */

  e1 = util_wallsec_();
/*   fprintf(stderr, "%d: 0 %s\n", GA_Nodeid(), pname); */

/*   GA_Pgroup_set_default(GA_Pgroup_get_world()); */

/*   fprintf(stderr, "%d: 1 %s\n", world_me, pname); */

  buf_size = 1+ /*action to perform*/
    1+ /*task id - if TASK_SIGNAL*/
    nproc /*process group info*/
    ;

/*   buf = (int *)malloc(buf_size*sizeof(int)); */
  buf = (int *)alloca(buf_size*sizeof(int));
  assert(buf != NULL);

/*   fprintf(stderr, "%d: 2 %s\n", world_me, pname); */

  e2 = util_wallsec_();
  while(1) {
    int nelem, grp_me;
    Integer tskid;

    f1 = util_wallsec_();
/*     fprintf(stderr, "%d:: Waiting for work\n", world_me); */
    MPI_Recv(buf, buf_size, MPI_INT, MPI_ANY_SOURCE, SIGNAL_TAG, MPI_COMM_WORLD, &status);
    f2 = util_wallsec_();
    t_wait_start += (f2-f1);
/*     fprintf(stderr, "%d:: Client got msg from %d\n", world_me, status.MPI_SOURCE); */

    MPI_Get_elements(&status, MPI_INT, &nelem);
    assert(nelem >= 1);
      
    if(buf[0] == TERM_CLIENT) {
      /*process termination and return*/
/*        fprintf(stderr, "%d:: Recv-ed term signal\n", GA_Nodeid()); */
/*       free(buf); */
/*       fprintf(stderr, "%d:: Terminating client\n", GA_Nodeid()); */
#ifdef LEADER_BCAST
      signal_termination(SVR,status.MPI_SOURCE);
#endif
      break;
    }
/*     fprintf(stderr, "%d:: got a task to process\n", world_me); */
    /*Got a task to process*/
    assert(buf[0] == TASK_START);
    ntsks += 1;

    if(status.MPI_SOURCE == server) {
      qsort(buf+2, nelem-2, sizeof(int), int_compare);
    }
    f3  = util_wallsec_();
    t_prepar += (f3-f2);

#if LEADER_BCAST
    src = (server==status.MPI_SOURCE)?buf[2]:status.MPI_SOURCE;
    broadcast(nelem-2,buf+2,buf[2],src,buf,nelem*sizeof(int));
#endif

    /*The proc ids are in world group. So create sub-group of world group*/
    GA_Pgroup_set_default(GA_Pgroup_get_world());
    p_handle = GA_Pgroup_create(&buf[2], nelem-2);
    GA_Pgroup_set_default(p_handle);
/*     GA_Pgroup_sync(p_handle); */
    f4 = MPI_Wtime();
    t_grp += (f4-f3);

    tskid = buf[1];
/*     fprintf(stderr, "%d(%d):: Invoking process task tskid=%d\n", grp_me, world_me, tskid); */
    process_task_(&tskid, &p_handle);
    f5 = MPI_Wtime();
    t_ptask += (f5-f4);
    
    GA_Pgroup_sync(p_handle);
    grp_me = GA_Nodeid();
    f6 = util_wallsec_();
    t_sync += (f6-f5);

    if(grp_me == 0) {
      int v[2] = {TASK_DONE, tskid};
/*        fprintf(stderr, "%d(%d):: Sending ack for task %d to %d\n", */
/*  	      grp_me, world_me, tskid, SERVER); */
      MPI_Send(v, 2, MPI_INT, server, SIGNAL_TAG, MPI_COMM_WORLD);
    }
    f7 = util_wallsec_();
    t_compl += (f7-f6);
/*     GA_Pgroup_sync(p_handle); */
    GA_Pgroup_destroy(p_handle);
    GA_Pgroup_set_default(default_grp);
    f8 = util_wallsec_();
    t_dest += (f8-f7);
  }
  e3 = util_wallsec_();
/*   fprintf(stderr, "%d:: CLIENT total time=%lf\n", ga_nodeid_(), e3-e1); */
/*   fprintf(stderr, "%d:: CLIENT ntsks=%d\n", ga_nodeid_(), ntsks); */
/*   fprintf(stderr, "%d:: CLIENT loop time=%lf\n", ga_nodeid_(), e3-e2); */
/*   fprintf(stderr, "%d:: CLIENT wait start time=%lf\n", ga_nodeid_(),t_wait_start); */
/*   fprintf(stderr, "%d:: CLIENT prepare time=%lf\n", ga_nodeid_(),t_prepar); */
/*   fprintf(stderr, "%d:: CLIENT grp crt time=%lf\n", ga_nodeid_(), t_grp); */
/*   fprintf(stderr, "%d:: CLIENT ptask time=%lf\n", ga_nodeid_(), t_ptask); */
/*   fprintf(stderr, "%d:: CLIENT sync time=%lf\n", ga_nodeid_(), t_sync); */
/*   fprintf(stderr, "%d:: CLIENT compl time=%lf\n", ga_nodeid_(), t_compl); */
/*   fprintf(stderr, "%d:: CLIENT grp dstry time=%lf\n", ga_nodeid_(), t_dest); */
/*   fflush(stdout); */
/*   fprintf(stderr, "%d:: CLIENT doit time=%lf\n",ga_nodeid_(),get_doit_time_()); */
/*   fprintf(stderr, "%d:: CLIENT esp time=%lf\n",ga_nodeid_(),get_esp_time_()); */
/*   fprintf(stderr, "%d:: CLIENT chrg_set time=%lf\n",ga_nodeid_(),get_chrg_set_time_()); */
/*   fprintf(stderr, "%d:: CLIENT gm_crt time=%lf\n",ga_nodeid_(),get_gm_crt_time_()); */
/*   fprintf(stderr, "%d:: CLIENT gm_push time=%lf\n",ga_nodeid_(),get_gm_push_time_()); */
}
Beispiel #19
0
int main( int argc, char **argv )
{
    int err = 0, toterr;
    MPI_Datatype contig1, varstruct1, oldtypes[2], varstruct2;
    MPI_Aint     displs[2];
    int          blens[2];
    MPI_Comm     comm;
    MPI_Status   status;
    int          world_rank;
    int          rank, size, partner, count, i;
    int          send_ibuf[4], recv_ibuf[4];
    buf_t        send_buf, recv_buf;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );

/* Form the datatypes */
    MPI_Type_contiguous( 4, MPI_INT, &contig1 );
    MPI_Type_commit( &contig1 );
    blens[0] = 1;
    blens[1] = 1000;
    oldtypes[0] = MPI_INT;
    oldtypes[1] = MPI_DOUBLE;
/* Note that the displacement for the data is probably double aligned */
    MPI_Address( &send_buf.len, &displs[0] );
    MPI_Address( &send_buf.data[0], &displs[1] );
/* Make relative */
    displs[1] = displs[1] - displs[0];
    displs[0] = 0;
    MPI_Type_struct( 2, blens, displs, oldtypes, &varstruct1 );
    MPI_Type_commit( &varstruct1 );

    comm = MPI_COMM_WORLD;

    MPI_Comm_size( comm, &size );
    MPI_Comm_rank( comm, &rank );

    if (size < 2) {
	fprintf( stderr, "This test requires at least 2 processes\n" );
	MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    if (rank == size - 1) {
	partner = 0;
	/* Send contiguous data */
	for (i=0; i<4; i++) 
	    send_ibuf[i] = i;
	MPI_Send( send_ibuf, 1, contig1, partner, 0, comm );

	/* Send partial structure */
	blens[1] = 23;
	MPI_Type_struct( 2, blens, displs, oldtypes, &varstruct2 );
	MPI_Type_commit( &varstruct2 );

	MPI_Send( &send_buf, 1, varstruct2, partner, 1, comm );
	MPI_Type_free( &varstruct2 );

	/* Send NO data */
	MPI_Send( MPI_BOTTOM, 0, MPI_INT, partner, 2, comm );
    }
    else if (rank == 0) {
	partner = size - 1;
	MPI_Recv( recv_ibuf, 1, contig1, partner, 0, comm, &status );
	MPI_Get_count( &status, MPI_INT, &count );
	if (count != 4) {
	    err++;
	    fprintf( stderr, 
		     "Wrong count for contig recv MPI_INT; got %d expected %d\n",
		     count, 4 );
	}
	MPI_Get_count( &status, contig1, &count );
	if (count != 1) {
	    err++;
	    fprintf( stderr, 
		     "Wrong count for contig recv (contig); got %d expected %d\n",
		     count, 1 );
	}
	MPI_Get_elements( &status, contig1, &count );
	if (count != 4) {
	    err++;
	    fprintf( stderr, 
		     "Wrong elements for contig recv contig; got %d expected %d\n",
		     count, 4 );
	}

	/* Now, try the partial structure */
	MPI_Recv( &recv_buf, 1, varstruct1, partner, 1, comm, &status );
	MPI_Get_elements( &status, varstruct1, &count );
	if (count != 24) {
	    err++;
	    fprintf( stderr, 
		     "Wrong number of elements for struct recv; got %d expected %d\n", 
		     count, 24 );
	}

	{
	    /* Receive nothing using a 0-sized type */
	    MPI_Datatype ztype;
	    MPI_Type_contiguous( 0, MPI_INT, &ztype );
	    MPI_Type_commit( &ztype );
	    MPI_Recv( &recv_buf, 10, ztype, partner, 2, comm, &status );
	    /* Current clarification requires 0 for the result */
	    MPI_Get_elements( &status, ztype, &count );
	    if (count != 0) {
		err++;
		fprintf( stderr, 
			 "Wrong number of elements for 0-size datatype; got %d\n",
			 count );
	    }
	    MPI_Get_count( &status, ztype, &count );
	    if (count != 0) {
		err++;
		fprintf( stderr, 
			 "Wrong count for 0-size datatype; got %d\n",
			 count );
	    }
	    MPI_Type_free( &ztype );
	}
    }
    MPI_Type_free( &contig1 );
    MPI_Type_free( &varstruct1 );
    
    MPI_Allreduce( &err, &toterr, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
    if (world_rank == 0) {
	    if (toterr == 0) 
		printf( " No Errors\n" );
	    else
		printf( "Found %d errors in MPI_Get_elements\n", toterr );
    }
    MPI_Finalize( );
    return toterr;
}
Beispiel #20
0
Datei: MPI-api.c Projekt: 8l/rose
void declareBindings (void)
{
  /* === Point-to-point === */
  void* buf;
  int count;
  MPI_Datatype datatype;
  int dest;
  int tag;
  MPI_Comm comm;
  MPI_Send (buf, count, datatype, dest, tag, comm); // L12
  int source;
  MPI_Status status;
  MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15
  MPI_Get_count (&status, datatype, &count);
  MPI_Bsend (buf, count, datatype, dest, tag, comm);
  MPI_Ssend (buf, count, datatype, dest, tag, comm);
  MPI_Rsend (buf, count, datatype, dest, tag, comm);
  void* buffer;
  int size;
  MPI_Buffer_attach (buffer, size); // L22
  MPI_Buffer_detach (buffer, &size);
  MPI_Request request;
  MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25
  MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Issend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irsend (buf, count, datatype, dest, tag, comm, &request);
  MPI_Irecv (buf, count, datatype, source, tag, comm, &request);
  MPI_Wait (&request, &status);
  int flag;
  MPI_Test (&request, &flag, &status); // L32
  MPI_Request_free (&request);
  MPI_Request* array_of_requests;
  int index;
  MPI_Waitany (count, array_of_requests, &index, &status); // L36
  MPI_Testany (count, array_of_requests, &index, &flag, &status);
  MPI_Status* array_of_statuses;
  MPI_Waitall (count, array_of_requests, array_of_statuses); // L39
  MPI_Testall (count, array_of_requests, &flag, array_of_statuses);
  int incount;
  int outcount;
  int* array_of_indices;
  MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L44--45
  MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices,
		array_of_statuses); // L46--47
  MPI_Iprobe (source, tag, comm, &flag, &status); // L48
  MPI_Probe (source, tag, comm, &status);
  MPI_Cancel (&request);
  MPI_Test_cancelled (&status, &flag);
  MPI_Send_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request);
  MPI_Recv_init (buf, count, datatype, source, tag, comm, &request);
  MPI_Start (&request);
  MPI_Startall (count, array_of_requests);
  void* sendbuf;
  int sendcount;
  MPI_Datatype sendtype;
  int sendtag;
  void* recvbuf;
  int recvcount;
  MPI_Datatype recvtype;
  MPI_Datatype recvtag;
  MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag,
		recvbuf, recvcount, recvtype, source, recvtag,
		comm, &status); // L67--69
  MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag,
			comm, &status); // L70--71
  MPI_Datatype oldtype;
  MPI_Datatype newtype;
  MPI_Type_contiguous (count, oldtype, &newtype); // L74
  int blocklength;
  {
    int stride;
    MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78
  }
  {
    MPI_Aint stride;
    MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82
  }
  int* array_of_blocklengths;
  {
    int* array_of_displacements;
    MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements,
		      oldtype, &newtype); // L87--88
  }
  {
    MPI_Aint* array_of_displacements;
    MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements,
                       oldtype, &newtype); // L92--93
    MPI_Datatype* array_of_types;
    MPI_Type_struct (count, array_of_blocklengths, array_of_displacements,
                     array_of_types, &newtype); // L95--96
  }
  void* location;
  MPI_Aint address;
  MPI_Address (location, &address); // L100
  MPI_Aint extent;
  MPI_Type_extent (datatype, &extent); // L102
  MPI_Type_size (datatype, &size);
  MPI_Aint displacement;
  MPI_Type_lb (datatype, &displacement); // L105
  MPI_Type_ub (datatype, &displacement);
  MPI_Type_commit (&datatype);
  MPI_Type_free (&datatype);
  MPI_Get_elements (&status, datatype, &count);
  void* inbuf;
  void* outbuf;
  int outsize;
  int position;
  MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114
  int insize;
  MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype,
	      comm); // L116--117
  MPI_Pack_size (incount, datatype, comm, &size);

  /* === Collectives === */
  MPI_Barrier (comm); // L121
  int root;
  MPI_Bcast (buffer, count, datatype, root, comm); // L123
  MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
	      root, comm); // L124--125
  int* recvcounts;
  int* displs;
  MPI_Gatherv (sendbuf, sendcount, sendtype,
               recvbuf, recvcounts, displs, recvtype,
	       root, comm); // L128--130
  MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
               root, comm); // L131--132
  int* sendcounts;
  MPI_Scatterv (sendbuf, sendcounts, displs, sendtype,
		recvbuf, recvcount, recvtype, root, comm); // L134--135
  MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
                 comm); // L136--137
  MPI_Allgatherv (sendbuf, sendcount, sendtype,
		  recvbuf, recvcounts, displs, recvtype,
		  comm); // L138--140
  MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype,
		comm); // L141--142
  int* sdispls;
  int* rdispls;
  MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype,
                 recvbuf, recvcounts, rdispls, recvtype,
		 comm); // L145--147
  MPI_Op op;
  MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149
#if 0
  MPI_User_function function;
  int commute;
  MPI_Op_create (function, commute, &op); // L153
#endif
  MPI_Op_free (&op); // L155
  MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm);
  MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm);
  MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm);

  /* === Groups, contexts, and communicators === */
  MPI_Group group;
  MPI_Group_size (group, &size); // L162
  int rank;
  MPI_Group_rank (group, &rank); // L164
  MPI_Group group1;
  int n;
  int* ranks1;
  MPI_Group group2;
  int* ranks2;
  MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170
  int result;
  MPI_Group_compare (group1, group2, &result); // L172
  MPI_Group newgroup;
  MPI_Group_union (group1, group2, &newgroup); // L174
  MPI_Group_intersection (group1, group2, &newgroup);
  MPI_Group_difference (group1, group2, &newgroup);
  int* ranks;
  MPI_Group_incl (group, n, ranks, &newgroup); // L178
  MPI_Group_excl (group, n, ranks, &newgroup);
  extern int ranges[][3];
  MPI_Group_range_incl (group, n, ranges, &newgroup); // L181
  MPI_Group_range_excl (group, n, ranges, &newgroup);
  MPI_Group_free (&group);
  MPI_Comm_size (comm, &size);
  MPI_Comm_rank (comm, &rank);
  MPI_Comm comm1;
  MPI_Comm comm2;
  MPI_Comm_compare (comm1, comm2, &result);
  MPI_Comm newcomm;
  MPI_Comm_dup (comm, &newcomm);
  MPI_Comm_create (comm, group, &newcomm);
  int color;
  int key;
  MPI_Comm_split (comm, color, key, &newcomm); // L194
  MPI_Comm_free (&comm);
  MPI_Comm_test_inter (comm, &flag);
  MPI_Comm_remote_size (comm, &size);
  MPI_Comm_remote_group (comm, &group);
  MPI_Comm local_comm;
  int local_leader;
  MPI_Comm peer_comm;
  int remote_leader;
  MPI_Comm newintercomm;
  MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag,
			&newintercomm); // L204--205
  MPI_Comm intercomm;
  MPI_Comm newintracomm;
  int high;
  MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209
  int keyval;
#if 0
  MPI_Copy_function copy_fn;
  MPI_Delete_function delete_fn;
  void* extra_state;
  MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215
#endif
  MPI_Keyval_free (&keyval); // L217
  void* attribute_val;
  MPI_Attr_put (comm, keyval, attribute_val); // L219
  MPI_Attr_get (comm, keyval, attribute_val, &flag);
  MPI_Attr_delete (comm, keyval);

  /* === Environmental inquiry === */
  char* name;
  int resultlen;
  MPI_Get_processor_name (name, &resultlen); // L226
  MPI_Errhandler errhandler;
#if 0
  MPI_Handler_function function;
  MPI_Errhandler_create (function, &errhandler); // L230
#endif
  MPI_Errhandler_set (comm, errhandler); // L232
  MPI_Errhandler_get (comm, &errhandler);
  MPI_Errhandler_free (&errhandler);
  int errorcode;
  char* string;
  MPI_Error_string (errorcode, string, &resultlen); // L237
  int errorclass;
  MPI_Error_class (errorcode, &errorclass); // L239
  MPI_Wtime ();
  MPI_Wtick ();
  int argc;
  char** argv;
  MPI_Init (&argc, &argv); // L244
  MPI_Finalize ();
  MPI_Initialized (&flag);
  MPI_Abort (comm, errorcode);
}
 MPI_Int elements(const MPI_Datatype& type) const
 {
     MPI_Int n;
     MPIWRAP_CALL(MPI_Get_elements(nc(&status), type, &n));
     return n;
 }