Beispiel #1
0
int main(int argc, char **argv)
{
    int rank, size, rc, ec, errs = 0;
    int flag = 1;
    MPI_Comm dup, shrunk;

    MPI_Init(&argc, &argv);
    MPI_Comm_dup(MPI_COMM_WORLD, &dup);
    MPI_Comm_rank(dup, &rank);
    MPI_Comm_size(dup, &size);
    MPI_Comm_set_errhandler(dup, MPI_ERRORS_RETURN);

    if (size < 4) {
        fprintf(stderr, "Must run with at least 4 processes\n");
        MPI_Abort(dup, 1);
    }

    if (2 == rank) exit(EXIT_FAILURE);

    if (MPI_SUCCESS == (rc = MPIX_Comm_agree(dup, &flag))) {
        MPI_Error_class(rc, &ec);
        fprintf(stderr, "[%d] Expected MPIX_ERR_PROC_FAILED after agree. Received: %d\n", rank, ec);
        errs++;
        MPI_Abort(dup, 1);
    }

    if (MPI_SUCCESS != (rc = MPIX_Comm_shrink(dup, &shrunk))) {
        MPI_Error_class(rc, &ec);
        fprintf(stderr, "[%d] Expected MPI_SUCCESS after shrink. Received: %d\n", rank, ec);
        errs++;
        MPI_Abort(dup, 1);
    }

    if (MPI_SUCCESS != (rc = MPIX_Comm_agree(shrunk, &flag))) {
        MPI_Error_class(rc, &ec);
        fprintf(stderr, "[%d] Expected MPI_SUCCESS after agree. Received: %d\n", rank, ec);
        errs++;
        MPI_Abort(dup, 1);
    }

    MPI_Comm_free(&shrunk);
    MPI_Comm_free(&dup);

    if (0 == rank) {
        if (errs)
            fprintf(stdout, " Found %d errors\n", errs);
        else
            fprintf(stdout, " No errors\n");
    }

    MPI_Finalize();
}
Beispiel #2
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    char string[MPI_MAX_ERROR_STRING], outstring[MPI_MAX_ERROR_STRING];
    int newclass[NCLASSES], newcode[NCLASSES][NCODES];
    int i, j, slen, outclass;

    MTest_Init( &argc, &argv );

    /* Initialize the new codes */
    for (i=0; i<NCLASSES; i++) {
	MPI_Add_error_class( &newclass[i] );
	for (j=0; j<NCODES; j++) {
	    MPI_Add_error_code( newclass[i], &newcode[i][j] );
	    sprintf( string, "code for class %d code %d\n", i, j );
	    MPI_Add_error_string( newcode[i][j], string );
	}
    }

    /* check the values */
    for (i=0; i<NCLASSES; i++) {
	MPI_Error_class( newclass[i], &outclass );
	if (outclass != newclass[i]) {
	    errs++;
	    printf( "Error class %d is not a valid error code %x %x\n", i,
		    outclass, newclass[i]);
	}
	for (j=0; j<NCODES; j++) {
	    MPI_Error_class( newcode[i][j], &outclass );
	    if (outclass != newclass[i]) {
		errs++;
		printf( "Class of code for %d is not correct %x %x\n", j,
			outclass, newclass[i] );
	    }
	    MPI_Error_string( newcode[i][j], outstring, &slen );
	    sprintf( string, "code for class %d code %d\n", i, j );
	    if (strcmp( outstring, string )) {
		errs++;
		printf( "Error string is :%s: but should be :%s:\n",
			outstring, string );
	    }
	}
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
  
}
int main(int argc, char* argv[])
{
    MPI_Comm comm, newcomm, scomm;
    MPI_Group group;
    MPI_Info newinfo;
    int rank, size, color;
    int errs = 0, errclass, mpi_errno;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_dup(MPI_COMM_WORLD, &comm);
    MPI_Comm_group(comm, &group);

    MPI_Comm_create(comm, group, &newcomm);
    color = rank % 2;
    MPI_Comm_split(MPI_COMM_WORLD, color, rank, &scomm);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /*test comm_split_type for NULL variable */
    mpi_errno = MPI_Comm_split_type(scomm, 2, 4, newinfo, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Comm_free(&newcomm);
    MPI_Comm_free(&scomm);
    MPI_Group_free(&group);
    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Beispiel #4
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int j, count;
    char *ap;

    MTest_Init(&argc, &argv);

    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    for (count = 1; count < 128000; count *= 2) {

        err = MPI_Alloc_mem(count, MPI_INFO_NULL, &ap);
        if (err) {
            int errclass;
            /* An error of  MPI_ERR_NO_MEM is allowed */
            MPI_Error_class(err, &errclass);
            if (errclass != MPI_ERR_NO_MEM) {
                errs++;
                MTestPrintError(err);
            }

        } else {
            /* Access all of this memory */
            for (j = 0; j < count; j++) {
                ap[j] = (char) (j & 0x7f);
            }
            MPI_Free_mem(ap);
        }
    }

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
int main(int argc, char **argv)
{
    MPI_Group basegroup;
    MPI_Group g1;
    MPI_Comm comm, newcomm;
    int rank, size;
    int worldrank;
    int errs = 0, errclass, mpi_errno;

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &worldrank);
    comm = MPI_COMM_WORLD;
    MPI_Comm_group(comm, &basegroup);
    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    MPI_Comm_split(comm, 0, size - rank, &newcomm);
    MPI_Comm_group(newcomm, &g1);

    /* Checking group_intersection for NULL variable */
    mpi_errno = MPI_Group_intersection(basegroup, g1, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Comm_free(&newcomm);
    MPI_Group_free(&basegroup);
    MPI_Group_free(&g1);
    MTest_Finalize(errs);
    return 0;
}
int main(int argc, char *argv[])
{
    int errs = 0, errclass, mpi_errno;
    int rank, size;
    MPI_Comm comm;
    MPI_Group group;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_dup(MPI_COMM_WORLD, &comm);
    MPI_Comm_group(comm, &group);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /*test comm_create for NULL variable */
    mpi_errno = MPI_Comm_create(comm, group, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Group_free(&group);
    MTest_Finalize(errs);
    return 0;
}
Beispiel #7
0
/*
 * This test ensures that shrink works correctly
 */
int main(int argc, char **argv)
{
    int rank, size, newsize, rc, errclass, errs = 0;
    MPI_Comm newcomm;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    if (size < 4) {
        fprintf(stderr, "Must run with at least 4 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (2 == rank)
        exit(EXIT_FAILURE);

    rc = MPIX_Comm_shrink(MPI_COMM_WORLD, &newcomm);
    if (rc) {
        MPI_Error_class(rc, &errclass);
        fprintf(stderr, "Expected MPI_SUCCESS from MPIX_Comm_shrink. Received: %d\n", errclass);
        errs++;
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_size(newcomm, &newsize);
    if (newsize != size - 1)
        errs++;

    rc = MPI_Barrier(newcomm);
    if (rc) {
        MPI_Error_class(rc, &errclass);
        fprintf(stderr, "Expected MPI_SUCCESS from MPI_BARRIER. Received: %d\n", errclass);
        errs++;
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_free(&newcomm);

    if (0 == rank)
        fprintf(stdout, " No Errors\n");

    MPI_Finalize();

    return 0;
}
Beispiel #8
0
int ReportErr(int errcode, const char name[])
{
    int errclass, errlen;
    char errmsg[MPI_MAX_ERROR_STRING];
    MPI_Error_class(errcode, &errclass);
    MPI_Error_string(errcode, errmsg, &errlen);
    fprintf(stderr, "In %s, error code %d(class %d) = %s\n", name, errcode, errclass, errmsg);
    return 1;
}
Beispiel #9
0
void MTestPrintErrorMsg(const char msg[], int errcode)
{
    int errclass, slen;
    char string[MPI_MAX_ERROR_STRING];

    MPI_Error_class(errcode, &errclass);
    MPI_Error_string(errcode, string, &slen);
    printf("%s: Error class %d (%s)\n", msg, errclass, string);
    fflush(stdout);
}
Beispiel #10
0
/* ------------------------------------------------------------------------ */
void MTestPrintError(int errcode)
{
    int errclass, slen;
    char string[MPI_MAX_ERROR_STRING];

    MPI_Error_class(errcode, &errclass);
    MPI_Error_string(errcode, string, &slen);
    printf("Error class %d (%s)\n", errclass, string);
    fflush(stdout);
}
Beispiel #11
0
/*
 * printMPIerr()
 * Wrapper for MPI_Error string.
 */
void printMPIerr(int err, char *actionName) {
  int len,eclass,i;
  char buffer[1024];

  MPI_Error_string(err,buffer,&len);
  MPI_Error_class(err,&eclass);
  // Remove newlines from MPI error string
  for (i=0; i<len; i++)
    if (buffer[i]=='\n') buffer[i]=':';
  fprintf(stdout,"[%i] MPI ERROR %d: %s: [%s]\n",worldrank,eclass,actionName,buffer);

  return;
}
Beispiel #12
0
void mvle_mpi_error(int error)
{
    int errorclass;
    int len;
    char *buffer = new char[MPI_MAX_ERROR_STRING + 1];
    buffer[MPI_MAX_ERROR_STRING] = '\0';

    MPI_Error_class(error, &errorclass);
    MPI_Error_string(errorclass,  buffer, &len);
    buffer[len] = '\0';

    mvle_print_error("%s", buffer);
}
Beispiel #13
0
void MpiNode::report_MPI_ERROR(int error_code)
{
	char error_string[200];
	int length_of_error_string, error_class;
	MPI_Error_class(error_code, &error_class);
	MPI_Error_string(error_class, error_string, &length_of_error_string);
	fprintf(stderr, "%3d: %s\n", rank, error_string);
	MPI_Error_string(error_code, error_string, &length_of_error_string);
	fprintf(stderr, "%3d: %s\n", rank, error_string);

	std::cerr.flush();
	REPORT_ERROR("Encountered an MPI-related error, see above. Now exiting...");

}
Beispiel #14
0
void weh(MPI_Win * win, int *err, ...)
{
    int errclass;
    MPI_Error_class(*err, &errclass);
    if (errclass != expected_err_class) {
        errs++;
        printf("Unexpected error code (class = %d)\n", errclass);
    }
    if (*win != mywin) {
        errs++;
        printf("Unexpected window (got %x expected %x)\n", (int) *win, (int) mywin);
    }
    calls++;
    return;
}
Beispiel #15
0
/*
 * This test attempts collective communication after a process in
 * the communicator has failed. Since all processes contribute to
 * the result of the operation, all process will receive an error.
 */
int main(int argc, char **argv)
{
    int rank, size, err, errclass;
    int sendbuf[1] = { 42 };
    int recvbuf[1];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    if (size < 3) {
        fprintf(stderr, "Must run with at least 3 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    err = MPI_Reduce(sendbuf, recvbuf, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0) {
#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
        MPI_Error_class(err, &errclass);
        if (errclass == MPIX_ERR_PROC_FAILED) {
            printf(" No Errors\n");
            fflush(stdout);
        }
        else {
            fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                    errclass);
        }
#else
        if (err) {
            printf(" No Errors\n");
            fflush(stdout);
        }
        else {
            fprintf(stderr, "Program reported MPI_SUCCESS, but an error code was expected.\n");
        }
#endif
    }

    MPI_Finalize();

    return 0;
}
int main(int argc, char **argv)
{
    int errcode, errclass, errs = 0;

    MTest_Init(&argc, &argv);

    MPI_Add_error_code(MPI_ERR_ARG, &errcode);
    MPI_Error_class(errcode, &errclass);

    if (errclass != MPI_ERR_ARG) {
        printf("ERROR: Got 0x%x, expected 0x%x\n", errclass, MPI_ERR_ARG);
        errs++;
    }

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
Beispiel #17
0
/*
 * This test attempts MPI_Recv with the source being a dead process. It should fail
 * and return an error. If we are testing sufficiently new MPICH, we look for the
 * MPIX_ERR_PROC_FAILED error code. These should be converted to look for the
 * standarized error code once it is finalized.
 */
int main(int argc, char **argv)
{
    int rank, size, err, errclass, toterrs = 0;
    char buf[10];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    if (size < 2) {
        fprintf(stderr, "Must run with at least 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    if (rank == 0) {
        MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
        err = MPI_Recv(buf, 1, MPI_CHAR, 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
        MPI_Error_class(err, &errclass);
        if (errclass == MPIX_ERR_PROC_FAILED) {
            printf(" No Errors\n");
            fflush(stdout);
        } else {
            fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                    errclass);
            toterrs++;
        }
#else
        if (err) {
            printf(" No Errors\n");
            fflush(stdout);
        } else {
            fprintf(stderr, "Program reported MPI_SUCCESS, but an error code was expected.\n");
            toterrs++;
        }
#endif
    }

    MPI_Finalize();

    return MTestReturnValue(toterrs);
}
Beispiel #18
0
/*
 * This test attempts to MPI_Isend with the destination being a dead process.
 * The communication should succeed or report an error. It must not block
 * indefinitely.
 */
int main(int argc, char **argv)
{
    int rank, size, err, errclass;
    char buf[100000];
    MPI_Request request;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    if (size < 2) {
        fprintf( stderr, "Must run with at least 2 processes\n" );
        MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    if (rank == 0) {
        MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
        err = MPI_Isend(buf, 100000, MPI_CHAR, 1, 0, MPI_COMM_WORLD, &request);
        if (err)
            fprintf(stderr, "MPI_Isend returned error\n");

        err = MPI_Wait(&request, MPI_STATUS_IGNORE);
#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
        MPI_Error_class(err, &errclass);
        if ((err) && (errclass != MPIX_ERR_PROC_FAILED)) {
            fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n", errclass);
        } else {
            printf(" No Errors\n");
            fflush(stdout);
        }
#else
        printf(" No Errors\n");
        fflush(stdout);
#endif
    }

    MPI_Finalize();

    return 0;
}
Beispiel #19
0
/**
\internal
\ingroup datasets
Provide open, read and close for use when searching for magic numbers
*/
static int
openmagic(struct MagicFile* file)
{
    int status = NC_NOERR;

    switch (file->model->iosp) {
    case NC_IOSP_MEMORY: {
	/* Get its length */
	NC_memio* meminfo = (NC_memio*)file->parameters;
        assert(meminfo != NULL);
	file->filelen = (long long)meminfo->size;
	} break;
    case NC_IOSP_FILE: {
#ifdef USE_PARALLEL
        if (file->use_parallel) {
	    int retval;
	    MPI_Offset size;
            assert(file->parameters != NULL);
	    if((retval = MPI_File_open(((NC_MPI_INFO*)file->parameters)->comm,
                                   (char*)file->path,MPI_MODE_RDONLY,
                                   ((NC_MPI_INFO*)file->parameters)->info,
                                   &file->fh)) != MPI_SUCCESS) {
#ifdef MPI_ERR_NO_SUCH_FILE
		int errorclass;
		MPI_Error_class(retval, &errorclass);
		if (errorclass == MPI_ERR_NO_SUCH_FILE)
#ifdef NC_ENOENT
		    status = NC_ENOENT;
#else
		    status = errno;
#endif
		else
#endif
		    status = NC_EPARINIT;
		goto done;
	    }
	    /* Get its length */
	    if((retval=MPI_File_get_size(file->fh, &size)) != MPI_SUCCESS)
	        {status = NC_EPARINIT; goto done;}
	    file->filelen = (long long)size;
	} else
Beispiel #20
0
int main(int argc, char *argv[])
{
    int rank, size;
    int errs = 0, errclass, mpi_errno;

    MTest_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /* Checking type_vector for NULL variable */
    mpi_errno = MPI_Type_vector(3, 2, 3, MPI_INT, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Beispiel #21
0
std::string tarch::parallel::MPIReturnValueToString( int result ) {
  std::ostringstream out;

  int   resultlen;
  char* string = new char[MPI_MAX_ERROR_STRING];  // (char *)malloc(MPI_MAX_ERROR_STRING * sizeof(char));
  MPI_Error_string(result, string, &resultlen);

  int   errorclass;
  MPI_Error_class(result, &errorclass);

  out << "mpi error class: " << errorclass << "="
      << ", mpi error text: " << string;

  switch ( errorclass ) {
    case MPI_SUCCESS:      out << "MPI_SUCCESS [no error]"; break;
    case MPI_ERR_BUFFER:   out << "MPI_ERR_BUFFER [invalid buffer pointer]"; break;
    case MPI_ERR_COUNT:    out << "MPI_ERR_COUNT [invalid count argument]"; break;
    case MPI_ERR_TYPE:     out << "MPI_ERR_TYPE [invalid datatype]"; break;
    case MPI_ERR_TAG:      out << "MPI_ERR_TAG [invalid tag]"; break;
    case MPI_ERR_COMM:     out << "MPI_ERR_COMM [invalid communicator]"; break;
    case MPI_ERR_RANK:     out << "MPI_ERR_RANK [invalid rank]"; break;
    case MPI_ERR_REQUEST:  out << "MPI_ERR_REQUEST [invalid request handle]"; break;
    case MPI_ERR_ROOT:     out << "MPI_ERR_ROOT [invalid root argument]"; break;
    case MPI_ERR_GROUP:    out << "MPI_ERR_GROUP [invalid group]"; break;
    case MPI_ERR_OP:       out << "MPI_ERR_OP [invalid operation]"; break;
    case MPI_ERR_TOPOLOGY: out << "MPI_ERR_TOPOLOGY [invalid topology]"; break;
    case MPI_ERR_DIMS:     out << "MPI_ERR_DIMS [invalid dimensions]"; break;
    case MPI_ERR_ARG:      out << "MPI_ERR_ARG [invalid argument]"; break;
    case MPI_ERR_UNKNOWN:  out << "MPI_ERR_UNKNOWN [unknown error]"; break;
    case MPI_ERR_TRUNCATE: out << "MPI_ERR_TRUNCATE [message has been truncated by receiver]"; break;
    case MPI_ERR_OTHER:    out << "MPI_ERR_OTHER [other unknown error]"; break;
    case MPI_ERR_INTERN:   out << "MPI_ERR_INTERN [internal mpi error]"; break;
    default: out << "unknown";
  }

  delete[] string;
  return out.str();
}
int main(int argc, char *argv[])
{
    int rank, size;
    MPI_Datatype type;
    int errs = 0, mpi_errno, errclass;

    MTest_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /* Checking type_size_x for NULL variable */
    type = MPI_INT;
    mpi_errno = MPI_Type_size_x(type, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Type_free(&type);
    MTest_Finalize(errs);
    return 0;
}
int main(int argc, char **argv)
{
    MPI_Group basegroup;
    MPI_Group g1;
    MPI_Comm comm, newcomm;
    int errs = 0, mpi_errno, errclass, rank, size;
    int range[1][3];
    int worldrank;

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &worldrank);
    comm = MPI_COMM_WORLD;
    MPI_Comm_group(comm, &basegroup);
    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);
    MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    MPI_Comm_split(comm, 0, size - rank, &newcomm);
    MPI_Comm_group(newcomm, &g1);

    /* Checking group_range_excl for NULL variable */
    range[0][0] = 1;
    range[0][1] = size-1;
    range[0][2] = 1;
    mpi_errno = MPI_Group_range_incl(basegroup, 1, range, NULL);
    MPI_Error_class(mpi_errno, &errclass);
    if (errclass != MPI_ERR_ARG)
        ++errs;

    MPI_Comm_free(&comm);
    MPI_Comm_free(&newcomm);
    MPI_Group_free(&basegroup);
    MPI_Group_free(&g1);
    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Beispiel #24
0
netaware_delay_fh netaware_delay_fopen(char * filename, int mode)
{
	int status, rank;
	char filename_str[256];
	struct netaware_delay_file * handle = malloc(
			sizeof(struct netaware_delay_file));
	handle->data = malloc(buffer_size);
	memset(handle->data, 0, buffer_size);
	handle->bfptr = 0;
	handle->bfsize = buffer_size;
	handle->bfstart = 0;
	handle->bfend = -1;
	handle->offset = 0.0f;
	handle->max_offset = 0.0f;
	handle->mode = mode;

	handle->meta_data = init_meta_queue();

	MP_Comm_rank(MPI_COMM_WORLD, &rank);
	sprintf(filename_str, "%s_%d.data", filename, rank);
	status = MPI_File_open(MPI_COMM_WORLD, filename_str, mode, MPI_INFO_NULL,
			&handle->fh);
	if (status != MPI_SUCCESS)
	{
		char error_string[MPI_MAX_ERROR_STRING];
		int length_of_error_string, error_class;
		MPI_Error_class(status, &error_class);
		MPI_Error_string(error_class, error_string, &length_of_error_string);
		fprintf(stderr, "%s\n", error_string);
		MPI_Error_string(status, error_string, &length_of_error_string);
		fprintf(stderr, "%s\n", error_string);
		MPI_Abort(MPI_COMM_WORLD, status);
	}
	handle->dist_buffer = netaware_delay_dist_fopen();

	return handle;
}
Beispiel #25
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    MPI_Comm comm;
    MPI_Request r[2];
    MPI_Status  s[2];
    int errval, errclass;
    int b1[20], b2[20], rank, size, src, dest, i;

    MTest_Init( &argc, &argv );

    /* Create some receive requests.  tags 0-9 will succeed, tags 10-19 
       will be used for ERR_TRUNCATE (fewer than 20 messages will be used) */
    comm = MPI_COMM_WORLD;

    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

    src  = 1;
    dest = 0;
    if (rank == dest) {
	MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
	errval = MPI_Irecv( b1, 10, MPI_INT, src, 0, comm, &r[0] );
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Irecv\n" );
	}
	errval = MPI_Irecv( b2, 10, MPI_INT, src, 10, comm, &r[1] );
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Irecv\n" );
	}

	errval = MPI_Barrier(comm);
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Barrier\n" );
	}
	for (i=0; i<2; i++) {
	    s[i].MPI_ERROR = -1;
	}
	errval = MPI_Waitall( 2, r, s );
        MPI_Error_class( errval, &errclass );
	if (errclass != MPI_ERR_IN_STATUS) {
	    errs++;
	    printf( "Did not get ERR_IN_STATUS in Waitall\n" );
	}
	else {
	    /* Check for success */
	    /* We allow ERR_PENDING (neither completed nor in error) in case
	       the MPI implementation exits the Waitall when an error 
	       is detected. Thanks to Jim Hoekstra of Iowa State University
	       and Kim McMahon for finding this bug in the test. */
	    for (i=0; i<2; i++) {
		if (s[i].MPI_TAG < 10 && (s[i].MPI_ERROR != MPI_SUCCESS &&
			                  s[i].MPI_ERROR != MPI_ERR_PENDING)) {
		    char msg[MPI_MAX_ERROR_STRING];
		    int msglen = MPI_MAX_ERROR_STRING;
		    errs++;
		    printf( "correct msg had error code %d\n", 
			    s[i].MPI_ERROR );
		    MPI_Error_string( s[i].MPI_ERROR, msg, &msglen );
		    printf( "Error message was %s\n", msg );
		}
		else if (s[i].MPI_TAG >= 10 && s[i].MPI_ERROR == MPI_SUCCESS) {
		    errs++;
		    printf( "truncated msg had MPI_SUCCESS\n" );
		}
	    }
	}

    }
    else if (rank == src) {
	/* Send messages, then barrier so that the wait does not start 
	   until we are sure that the sends have begun */
	MPI_Send( b1, 10, MPI_INT, dest, 0, comm );
	MPI_Send( b2, 11, MPI_INT, dest, 10, comm );
	MPI_Barrier(comm);
    }
    else {
	MPI_Barrier(comm);
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
  
}
FORT_DLL_SPEC void FORT_CALL mpi_error_class_ ( MPI_Fint *v1, MPI_Fint *v2, MPI_Fint *ierr ){
    *ierr = MPI_Error_class( *v1, v2 );
}
Beispiel #27
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    MPI_Comm comm;
    MPI_Request r[2];
    MPI_Status  s[2];
    int         indices[2], outcount;
    int errval, errclass;
    int b1[20], b2[20], rank, size, src, dest, i, j;

    MTest_Init( &argc, &argv );

    /* Create some receive requests.  tags 0-9 will succeed, tags 10-19 
       will be used for ERR_TRUNCATE (fewer than 20 messages will be used) */
    comm = MPI_COMM_WORLD;

    MPI_Comm_rank( comm, &rank );
    MPI_Comm_size( comm, &size );

    src  = 1;
    dest = 0;
    if (rank == dest) {
	MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );
	errval = MPI_Irecv( b1, 10, MPI_INT, src, 0, comm, &r[0] );
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Irecv\n" );
	}
	errval = MPI_Irecv( b2, 10, MPI_INT, src, 10, comm, &r[1] );
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Irecv\n" );
	}

	/* synchronize */
	errval = MPI_Recv(NULL, 0, MPI_INT, src, 10, comm, MPI_STATUS_IGNORE);
	if (errval) {
	    errs++;
	    MTestPrintError( errval );
	    printf( "Error returned from Recv\n" );
	}
	for (i=0; i<2; i++) {
	    s[i].MPI_ERROR = -1;
	}
	errval = MPI_Waitsome( 2, r, &outcount, indices, s );
	MPI_Error_class( errval, &errclass );
	if (errclass != MPI_ERR_IN_STATUS) {
	    errs++;
	    printf( "Did not get ERR_IN_STATUS in Waitsome.  Got %d.\n", errval );
	}
	else if (outcount != 2) {
	    errs++;
	    printf( "Wait returned outcount = %d\n", outcount );
	}
	else {
	    /* Check for success */
	    for (i=0; i<outcount; i++) {
		j = i;
		/* Indices is the request index */
		if (s[j].MPI_TAG < 10 && s[j].MPI_ERROR != MPI_SUCCESS) {
		    errs++;
		    printf( "correct msg had error class %d\n", 
			    s[j].MPI_ERROR );
		}
		else if (s[j].MPI_TAG >= 10 && s[j].MPI_ERROR == MPI_SUCCESS) {
		    errs++;
		    printf( "truncated msg had MPI_SUCCESS\n" );
		}
	    }
	}

    }
    else if (rank == src) {
	/* Send test messages, then send another message so that the test does
	   not start until we are sure that the sends have begun */
	MPI_Send( b1, 10, MPI_INT, dest, 0, comm );
	MPI_Send( b2, 11, MPI_INT, dest, 10, comm );

	/* synchronize */
	MPI_Ssend( NULL, 0, MPI_INT, dest, 10, comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
  
}
Beispiel #28
0
int test_communicators(void)
{
    MPI_Comm dup_comm_world, d2;
    int world_rank, world_size, key_1;
    int err, errs = 0;
    MPI_Aint value;

    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
#ifdef DEBUG
    if (world_rank == 0) {
        printf("*** Attribute copy/delete return codes ***\n");
    }
#endif

    MPI_Comm_dup(MPI_COMM_WORLD, &dup_comm_world);
    MPI_Barrier(dup_comm_world);

    MPI_Errhandler_set(dup_comm_world, MPI_ERRORS_RETURN);

    value = -11;
    if ((err = MPI_Comm_create_keyval(copybomb_fn, deletebomb_fn, &key_1, &value)))
        abort_msg("Keyval_create", err);

    err = MPI_Comm_set_attr(dup_comm_world, key_1, (void *) (MPI_Aint) world_rank);
    if (err) {
        errs++;
        printf("Error with first put\n");
    }

    err = MPI_Comm_set_attr(dup_comm_world, key_1, (void *) (MPI_Aint) (2 * world_rank));
    if (err == MPI_SUCCESS) {
        errs++;
        printf("delete function return code was MPI_SUCCESS in put\n");
    }

    /* Because the attribute delete function should fail, the attribute
     * should *not be removed* */
    err = MPI_Comm_delete_attr(dup_comm_world, key_1);
    if (err == MPI_SUCCESS) {
        errs++;
        printf("delete function return code was MPI_SUCCESS in delete\n");
    }

    err = MPI_Comm_dup(dup_comm_world, &d2);
    if (err == MPI_SUCCESS) {
        errs++;
        printf("copy function return code was MPI_SUCCESS in dup\n");
    }
    if (err != MPI_ERR_OTHER) {
        int lerrclass;
        MPI_Error_class(err, &lerrclass);
        if (lerrclass != MPI_ERR_OTHER) {
            errs++;
            printf("dup did not return an error code of class ERR_OTHER; ");
            printf("err = %d, class = %d\n", err, lerrclass);
        }
    }
#ifndef USE_STRICT_MPI
    /* Another interpretation is to leave d2 unchanged on error */
    if (err && d2 != MPI_COMM_NULL) {
        errs++;
        printf("dup did not return MPI_COMM_NULL on error\n");
    }
#endif

    delete_flag = 1;
    MPI_Comm_free(&dup_comm_world);

    MPI_Comm_free_keyval(&key_1);

    return errs;
}
Beispiel #29
0
/*
 * This test attempts collective bcast communication after a process in
 * the communicator has failed.
 */
int main(int argc, char **argv)
{
    int rank, size, rc, errclass, toterrs, errs = 0;
    int deadprocs[] = { 1 };
    char buf[100000];
    MPI_Group world, newgroup;
    MPI_Comm newcomm;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    MPI_Comm_group(MPI_COMM_WORLD, &world);
    MPI_Group_excl(world, 1, deadprocs, &newgroup);
    MPI_Comm_create_group(MPI_COMM_WORLD, newgroup, 0, &newcomm);

    if (size < 3) {
        fprintf(stderr, "Must run with at least 3 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    if (rank == 0) {
        strcpy(buf, "No Errors");
    }

    /* do a small bcast first */
    rc = MPI_Bcast(buf, 10, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                errclass);
        errs++;
    }
#endif

    /* reset the non-root buffers */
    if (rank != 0)
        memset(buf, 0, sizeof(buf));

    /* do a larger bcast */
    rc = MPI_Bcast(buf, 100000, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                errclass);
        errs++;
    }
#endif

    rc = MPI_Reduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, 0, newcomm);
    if (rc)
        fprintf(stderr, "Failed to get errors from other processes\n");

    if (rank == 0) {
        if (toterrs) {
            printf(" Found %d errors\n", toterrs);
        }
        else {
            printf(" No Errors\n");
        }
        fflush(stdout);
    }

    MPI_Group_free(&world);
    MPI_Group_free(&newgroup);
    MPI_Comm_free(&newcomm);
    MPI_Finalize();

    return 0;

}
Beispiel #30
0
void
ParallelZComposite(const MPI_Comm &comm,
                   int npixels,
                   const float *inz, const unsigned char *inrgba,
                   float *outz, unsigned char *outrgba,
                   unsigned char bgr, unsigned char bgg, unsigned char bgb)
{
    static bool MPIStuffInitialized = false;
    if (!MPIStuffInitialized)
    {
        InitializeMPIStuff();
        MPIStuffInitialized = true;
    }

    const int chunksize = 1 << 20;
    std::vector<Pixel> inpixels(chunksize);
    std::vector<Pixel> outpixels(chunksize);

    local_bg[0] = bgr;
    local_bg[1] = bgg;
    local_bg[2] = bgb;

    //cerr << "merging "<<npixels<<" pixels, bg="<<int(bgr)<<","<<int(bgg)<<","<<int(bgb)<<"\n";
    //cerr << "inpixel[0] = "<<int(inrgba[0])<<","<<int(inrgba[1])<<","<<int(inrgba[2])<<","<<int(inrgba[3])<<"\n";
    //cerr << "inzbuff[0] = "<<inz[0]<<endl;

    int i_in = 0, i_out = 0;
    while (npixels > 0)
    {
        int chunk = npixels < chunksize ? npixels : chunksize;

        for (int i=0; i<chunk; ++i, ++i_in)
        {
            inpixels[i].z = inz[i_in];
            inpixels[i].r = inrgba[i_in*4 + 0];
            inpixels[i].g = inrgba[i_in*4 + 1];
            inpixels[i].b = inrgba[i_in*4 + 2];
            inpixels[i].a = inrgba[i_in*4 + 3];
        }

        int err = MPI_Allreduce(&inpixels[0],  &outpixels[0], chunk,
                                mpiTypePixel, mpiOpMergePixelBuffers, comm);
        if (err != MPI_SUCCESS)
        {
            int errclass;
            MPI_Error_class(err,&errclass);
            char err_buffer[4096];
            int resultlen;
            MPI_Error_string(err,err_buffer,&resultlen);
            cerr << err_buffer << endl;
        }


        for (int i=0; i<chunk; ++i, ++i_out)
        {
            outz[i_out]          = outpixels[i].z;
            outrgba[i_out*4 + 0] = outpixels[i].r;
            outrgba[i_out*4 + 1] = outpixels[i].g;
            outrgba[i_out*4 + 2] = outpixels[i].b;
            outrgba[i_out*4 + 3] = outpixels[i].a;
        }

        npixels -= chunk;
    }

}