active_schedule_t *active_schedule_prepend(active_schedule_t *schedule,
                                           MPI_Comm comm,
                                           int peer,
                                           int direction,
                                           void *addr,
                                           MPI_Datatype type,
                                           int count,
                                           active_function_pointer_t pre_function,
                                           void *pre_data,
                                           active_function_pointer_t post_function,
                                           void *post_data)
{
    active_schedule_t *new_schedule;

    new_schedule = (active_schedule_t *) malloc(sizeof(active_schedule_t));
    if (!new_schedule) {
        perror("out of memory");
    }
    new_schedule->next = schedule;
    new_schedule->comm = comm;
    new_schedule->peer = peer;
    new_schedule->direction = direction;
    new_schedule->addr = addr;
    MPI_Type_dup(type, &new_schedule->type);
    new_schedule->pre_function = pre_function;
    new_schedule->pre_data = pre_data;
    new_schedule->post_function = post_function;
    new_schedule->post_data = post_data;
    new_schedule->count = count;
    new_schedule->req = MPI_REQUEST_NULL;
    return new_schedule;
}
JNIEXPORT jlong JNICALL Java_mpi_Datatype_dup(
        JNIEnv *env, jobject jthis, jlong oldType)
{
    MPI_Datatype newType;
    int rc = MPI_Type_dup((MPI_Datatype)oldType, &newType);
    ompi_java_exceptionCheck(env, rc);
    return (jlong)newType;
}
MPI_Datatype gen_test_receptionist_type() {
    static MPI_Datatype test_receptionist_type = MPI_DATATYPE_NULL;

    if(test_receptionist_type == MPI_DATATYPE_NULL) {
        MPI_Datatype test_actor_type = gen_test_actor_type();
        MPI_Type_dup(test_actor_type, &test_receptionist_type);
    }

    return test_receptionist_type;
}
Exemple #4
0
/*@C
   PetscSFWindowGetDataTypes - gets composite local and remote data types for each rank

   Not Collective

   Input Arguments:
+  sf - star forest
-  unit - data type for each node

   Output Arguments:
+  localtypes - types describing part of local leaf buffer referencing each remote rank
-  remotetypes - types describing part of remote root buffer referenced for each remote rank

   Level: developer

.seealso: PetscSFSetGraph(), PetscSFView()
@*/
static PetscErrorCode PetscSFWindowGetDataTypes(PetscSF sf,MPI_Datatype unit,const MPI_Datatype **localtypes,const MPI_Datatype **remotetypes)
{
  PetscSF_Window    *w = (PetscSF_Window*)sf->data;
  PetscErrorCode    ierr;
  PetscSFDataLink   link;
  PetscInt          i,nranks;
  const PetscInt    *roffset,*rmine,*rremote;
  const PetscMPIInt *ranks;

  PetscFunctionBegin;
  /* Look for types in cache */
  for (link=w->link; link; link=link->next) {
    PetscBool match;
    ierr = MPIPetsc_Type_compare(unit,link->unit,&match);CHKERRQ(ierr);
    if (match) {
      *localtypes  = link->mine;
      *remotetypes = link->remote;
      PetscFunctionReturn(0);
    }
  }

  /* Create new composite types for each send rank */
  ierr = PetscSFGetRanks(sf,&nranks,&ranks,&roffset,&rmine,&rremote);CHKERRQ(ierr);
  ierr = PetscMalloc(sizeof(*link),&link);CHKERRQ(ierr);
  ierr = MPI_Type_dup(unit,&link->unit);CHKERRQ(ierr);
  ierr = PetscMalloc2(nranks,&link->mine,nranks,&link->remote);CHKERRQ(ierr);
  for (i=0; i<nranks; i++) {
    PETSC_UNUSED PetscInt rcount = roffset[i+1] - roffset[i];
    PetscMPIInt           *rmine,*rremote;
#if !defined(PETSC_USE_64BIT_INDICES)
    rmine   = sf->rmine + sf->roffset[i];
    rremote = sf->rremote + sf->roffset[i];
#else
    PetscInt j;
    ierr = PetscMalloc2(rcount,&rmine,rcount,&rremote);CHKERRQ(ierr);
    for (j=0; j<rcount; j++) {
      ierr = PetscMPIIntCast(sf->rmine[sf->roffset[i]+j],rmine+j);CHKERRQ(ierr);
      ierr = PetscMPIIntCast(sf->rremote[sf->roffset[i]+j],rremote+j);CHKERRQ(ierr);
    }
#endif
    ierr = MPI_Type_create_indexed_block(rcount,1,rmine,link->unit,&link->mine[i]);CHKERRQ(ierr);
    ierr = MPI_Type_create_indexed_block(rcount,1,rremote,link->unit,&link->remote[i]);CHKERRQ(ierr);
#if defined(PETSC_USE_64BIT_INDICES)
    ierr = PetscFree2(rmine,rremote);CHKERRQ(ierr);
#endif
    ierr = MPI_Type_commit(&link->mine[i]);CHKERRQ(ierr);
    ierr = MPI_Type_commit(&link->remote[i]);CHKERRQ(ierr);
  }
  link->next = w->link;
  w->link    = link;

  *localtypes  = link->mine;
  *remotetypes = link->remote;
  PetscFunctionReturn(0);
}
Exemple #5
0
void mpi_type_dup_f(MPI_Fint *type, MPI_Fint *newtype, MPI_Fint *ierr)
{
    MPI_Datatype c_type = MPI_Type_f2c(*type);
    MPI_Datatype c_new;

    *ierr = OMPI_INT_2_FINT(MPI_Type_dup(c_type, &c_new));

    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
        *newtype = MPI_Type_c2f(c_new);
    }
}
Exemple #6
0
int main(int argc, char *argv[])
{
    int mpi_errno;
    MPI_Datatype type, duptype;
    int rank;

    MPI_Init(&argc, &argv);
    
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );

    foo_initialize();

    mpi_errno = MPI_Type_contiguous(2, MPI_INT, &type);

    mpi_errno = MPI_Type_set_attr(type, foo_keyval, NULL);

    mpi_errno = MPI_Type_dup(type, &duptype);

    my_func = "Free of type";
    mpi_errno = MPI_Type_free(&type);

    my_func = "free of duptype";
    mpi_errno = MPI_Type_free(&duptype);

    foo_finalize();

    if (rank == 0) {
      int errs = 0;
      if (copy_called != 1) {
	printf( "Copy called %d times; expected once\n", copy_called );
	errs++;
      }
      if (delete_called != 2) {
	printf( "Delete called %d times; expected twice\n", delete_called );
	errs++;
      }
      if (errs == 0) {
	printf( " No Errors\n" );
      }
      else {
	printf( " Found %d errors\n", errs );
      }
      fflush(stdout);
    }

    MPI_Finalize();
    return 0;
}
int main( int argc, char *argv[] )
{
    int errs = 0;
    int attrval;
    int i, key[32], keyval, saveKeyval;
    MPI_Datatype type, duptype;
    MTestDatatype mstype, mrtype;
    char typename[MPI_MAX_OBJECT_NAME];
    int tnlen;

    MTest_Init( &argc, &argv );

    while (MTestGetDatatypes( &mstype, &mrtype, 1 )) {
        type = mstype.datatype;
        MPI_Type_create_keyval( copy_fn, delete_fn, &keyval, (void *)0 );
        saveKeyval = keyval;   /* in case we need to free explicitly */
        attrval = 1;
        MPI_Type_set_attr( type, keyval, (void*)&attrval );
        /* See MPI-1, 5.7.1.  Freeing the keyval does not remove it if it
           is in use in an attribute */
        MPI_Type_free_keyval( &keyval );

        /* We create some dummy keyvals here in case the same keyval
           is reused */
        for (i=0; i<32; i++) {
            MPI_Type_create_keyval( MPI_NULL_COPY_FN, MPI_NULL_DELETE_FN,
                                    &key[i], (void *)0 );
        }

        if (attrval != 1) {
            errs++;
            MPI_Type_get_name( type, typename, &tnlen );
            printf( "attrval is %d, should be 1, before dup in type %s\n",
                    attrval, typename );
        }
        MPI_Type_dup( type, &duptype );
        /* Check that the attribute was copied */
        if (attrval != 2) {
            errs++;
            MPI_Type_get_name( type, typename, &tnlen );
            printf( "Attribute not incremented when type dup'ed (%s)\n",
                    typename );
        }
Exemple #8
0
/*
 * Setup dup type info and handlers.
 *
 * A dup datatype is created by using following parameters.
 * oldtype:  Datatype of element.
 */
int MTestTypeDupCreate(MPI_Datatype oldtype, MTestDatatype * mtype)
{
    int merr = 0;

    MTestTypeReset(mtype);

    merr = MPI_Type_size(oldtype, &mtype->basesize);
    if (merr)
        MTestPrintError(merr);

    merr = MPI_Type_dup(oldtype, &mtype->datatype);
    if (merr)
        MTestPrintError(merr);

    /* dup'ed types are already committed if the original type
     * was committed (MPI-2, section 8.8) */

    mtype->InitBuf = MTestTypeContigInit;
    mtype->FreeBuf = MTestTypeFree;
    mtype->CheckBuf = MTestTypeContigCheckbuf;

    return merr;
}
Exemple #9
0
FORT_DLL_SPEC void FORT_CALL mpi_type_dup_ ( MPI_Fint *v1, MPI_Fint *v2, MPI_Fint *ierr ){
    *ierr = MPI_Type_dup( (MPI_Datatype)(*v1), (MPI_Datatype *)(v2) );
}
Exemple #10
0
void ctoctest_( MPI_Fint *errs )
{
    int errcnt = *errs;
    int baseattrval = (1 << (sizeof(int)*8-2))-3;
    MPI_Datatype cduptype;
    MPI_Comm   cdup;

    /* MPI-1 function */
    ccomm1Attr = baseattrval;
    MPI_Attr_put( MPI_COMM_SELF, ccomm1Key, &ccomm1Attr );
    /* Test that we have the same value */
    errcnt += cmpi1read( MPI_COMM_SELF, ccomm1Key, &ccomm1Attr, "C to C" );

    /* Dup, check that the copy routine does what is expected */
    MPI_Comm_dup( MPI_COMM_SELF, &cdup );
    errcnt += cmpi1read( cdup, ccomm1Key, &ccomm1Attr, "C to C dup" );
    if (ccomm1Attr != baseattrval + 1) {
	printf( " Did not increment int in C to C dup: %d %d\n", 
		ccomm1Attr, baseattrval + 1 );
	errcnt ++;
    }

    MPI_Comm_free( &cdup );
    if (ccomm1Attr != baseattrval) {
	printf( " Did not increment int in C to C delete: %d %d\n", 
		ccomm1Attr, baseattrval );
	errcnt ++;
    }

    /* MPI-2 functions */
    ccomm1Attr = 0;
    ccomm2Attr = baseattrval;
    MPI_Comm_set_attr( MPI_COMM_SELF, ccomm2Key, &ccomm2Attr );
    /* Test that we have the same value */
    errcnt += cmpi2read( MPI_COMM_SELF, ccomm2Key, &ccomm2Attr, "C to C (2)" );

    /* Dup, check that the copy routine does what is expected */
    MPI_Comm_dup( MPI_COMM_SELF, &cdup );
    errcnt += cmpi2read( cdup, ccomm2Key, &ccomm2Attr, "C to C dup (2)" );
    if (ccomm2Attr != baseattrval + 1) {
	printf( " Did not increment int in C to C dup: %d %d\n", 
		ccomm2Attr, baseattrval + 1 );
	errcnt ++;
    }

    MPI_Comm_free( &cdup );
    if (ccomm2Attr != baseattrval) {
	printf( " Did not increment int in C to C delete (2): %d %d\n", 
		ccomm2Attr, baseattrval );
	errcnt ++;
    }

    /* MPI-2 functions */
    ctype2Attr = baseattrval;
    MPI_Type_set_attr( MPI_INTEGER, ctype2Key, &ctype2Attr );
    /* Test that we have the same value */
    errcnt += cmpi2readtype( MPI_INTEGER, ctype2Key, &ctype2Attr, "C to C type (2)" );

    /* Dup, check that the copy routine does what is expected */
    MPI_Type_dup( MPI_INTEGER, &cduptype );
    errcnt += cmpi2readtype( cduptype, ctype2Key, &ctype2Attr, "C to C typedup (2)" );
    if (ctype2Attr != baseattrval + 1) {
	printf( " Did not increment int in C to C typedup: %d %d\n", 
		ctype2Attr, baseattrval + 1 );
	errcnt ++;
    }
    ccomm1Attr = 0;

    MPI_Type_free( &cduptype );
    if (ctype2Attr != baseattrval) {
	printf( " Did not increment int in C to C typedelete (2): %d %d\n", 
		ctype2Attr, baseattrval );
	errcnt ++;
    }

    
    *errs = errcnt;
}
Exemple #11
0
/* Starts a "random" operation on "comm" corresponding to "rndnum" and returns
 * in (*req) a request handle corresonding to that operation.  This call should
 * be considered collective over comm (with a consistent value for "rndnum"),
 * even though the operation may only be a point-to-point request. */
static void start_random_nonblocking(MPI_Comm comm, unsigned int rndnum, MPI_Request *req, struct laundry *l)
{
    int i, j;
    int rank, size;
    int *buf = NULL;
    int *recvbuf = NULL;
    int *sendcounts = NULL;
    int *recvcounts = NULL;
    int *sdispls = NULL;
    int *rdispls = NULL;
    int *sendtypes = NULL;
    int *recvtypes = NULL;
    signed char *buf_alias = NULL;

    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);

    *req = MPI_REQUEST_NULL;

    l->case_num = -1;
    l->comm = comm;

    l->buf        = buf        = malloc(COUNT*size*sizeof(int));
    l->recvbuf    = recvbuf    = malloc(COUNT*size*sizeof(int));
    l->sendcounts = sendcounts = malloc(size*sizeof(int));
    l->recvcounts = recvcounts = malloc(size*sizeof(int));
    l->sdispls    = sdispls    = malloc(size*sizeof(int));
    l->rdispls    = rdispls    = malloc(size*sizeof(int));
    l->sendtypes  = sendtypes  = malloc(size*sizeof(MPI_Datatype));
    l->recvtypes  = recvtypes  = malloc(size*sizeof(MPI_Datatype));

#define NUM_CASES (21)
    l->case_num = rand_range(rndnum, 0, NUM_CASES);
    switch (l->case_num) {
        case 0: /* MPI_Ibcast */
            for (i = 0; i < COUNT; ++i) {
                if (rank == 0) {
                    buf[i] = i;
                }
                else {
                    buf[i] = 0xdeadbeef;
                }
            }
            MPI_Ibcast(buf, COUNT, MPI_INT, 0, comm, req);
            break;

        case 1: /* MPI_Ibcast (again, but designed to stress scatter/allgather impls) */
            /* FIXME fiddle with PRIME and buffer allocation s.t. PRIME is much larger (1021?) */
            buf_alias = (signed char *)buf;
            my_assert(COUNT*size*sizeof(int) > PRIME); /* sanity */
            for (i = 0; i < PRIME; ++i) {
                if (rank == 0)
                    buf_alias[i] = i;
                else
                    buf_alias[i] = 0xdb;
            }
            for (i = PRIME; i < COUNT * size * sizeof(int); ++i) {
                buf_alias[i] = 0xbf;
            }
            MPI_Ibcast(buf_alias, PRIME, MPI_SIGNED_CHAR, 0, comm, req);
            break;

        case 2: /* MPI_Ibarrier */
            MPI_Ibarrier(comm, req);
            break;

        case 3: /* MPI_Ireduce */
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, 0, comm, req);
            break;

        case 4: /* same again, use a user op and free it before the wait */
            {
                MPI_Op op = MPI_OP_NULL;
                MPI_Op_create(sum_fn, /*commute=*/1, &op);
                for (i = 0; i < COUNT; ++i) {
                    buf[i] = rank + i;
                    recvbuf[i] = 0xdeadbeef;
                }
                MPI_Ireduce(buf, recvbuf, COUNT, MPI_INT, op, 0, comm, req);
                MPI_Op_free(&op);
            }
            break;

        case 5: /* MPI_Iallreduce */
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Iallreduce(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
            break;

        case 6: /* MPI_Ialltoallv (a weak test, neither irregular nor sparse) */
            for (i = 0; i < size; ++i) {
                sendcounts[i] = COUNT;
                recvcounts[i] = COUNT;
                sdispls[i] = COUNT * i;
                rdispls[i] = COUNT * i;
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + (i * j);
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ialltoallv(buf, sendcounts, sdispls, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
            break;

        case 7: /* MPI_Igather */
            for (i = 0; i < size*COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
            break;

        case 8: /* same test again, just use a dup'ed datatype and free it before the wait */
            {
                MPI_Datatype type = MPI_DATATYPE_NULL;
                MPI_Type_dup(MPI_INT, &type);
                for (i = 0; i < size*COUNT; ++i) {
                    buf[i] = rank + i;
                    recvbuf[i] = 0xdeadbeef;
                }
                MPI_Igather(buf, COUNT, MPI_INT, recvbuf, COUNT, type, 0, comm, req);
                MPI_Type_free(&type); /* should cause implementations that don't refcount
                                         correctly to blow up or hang in the wait */
            }
            break;

        case 9: /* MPI_Iscatter */
            for (i = 0; i < size; ++i) {
                for (j = 0; j < COUNT; ++j) {
                    if (rank == 0)
                        buf[i*COUNT+j] = i + j;
                    else
                        buf[i*COUNT+j] = 0xdeadbeef;
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Iscatter(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
            break;

        case 10: /* MPI_Iscatterv */
            for (i = 0; i < size; ++i) {
                /* weak test, just test the regular case where all counts are equal */
                sendcounts[i] = COUNT;
                sdispls[i] = i * COUNT;
                for (j = 0; j < COUNT; ++j) {
                    if (rank == 0)
                        buf[i*COUNT+j] = i + j;
                    else
                        buf[i*COUNT+j] = 0xdeadbeef;
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Iscatterv(buf, sendcounts, sdispls, MPI_INT, recvbuf, COUNT, MPI_INT, 0, comm, req);
            break;

        case 11: /* MPI_Ireduce_scatter */
            for (i = 0; i < size; ++i) {
                recvcounts[i] = COUNT;
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + i;
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ireduce_scatter(buf, recvbuf, recvcounts, MPI_INT, MPI_SUM, comm, req);
            break;

        case 12: /* MPI_Ireduce_scatter_block */
            for (i = 0; i < size; ++i) {
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + i;
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ireduce_scatter_block(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
            break;

        case 13: /* MPI_Igatherv */
            for (i = 0; i < size*COUNT; ++i) {
                buf[i] = 0xdeadbeef;
                recvbuf[i] = 0xdeadbeef;
            }
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
            }
            for (i = 0; i < size; ++i) {
                recvcounts[i] = COUNT;
                rdispls[i] = i * COUNT;
            }
            MPI_Igatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, 0, comm, req);
            break;

        case 14: /* MPI_Ialltoall */
            for (i = 0; i < size; ++i) {
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + (i * j);
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ialltoall(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
            break;

        case 15: /* MPI_Iallgather */
            for (i = 0; i < size*COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Iallgather(buf, COUNT, MPI_INT, recvbuf, COUNT, MPI_INT, comm, req);
            break;

        case 16: /* MPI_Iallgatherv */
            for (i = 0; i < size; ++i) {
                for (j = 0; j < COUNT; ++j) {
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
                recvcounts[i] = COUNT;
                rdispls[i] = i * COUNT;
            }
            for (i = 0; i < COUNT; ++i)
                buf[i] = rank + i;
            MPI_Iallgatherv(buf, COUNT, MPI_INT, recvbuf, recvcounts, rdispls, MPI_INT, comm, req);
            break;

        case 17: /* MPI_Iscan */
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Iscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
            break;

        case 18: /* MPI_Iexscan */
            for (i = 0; i < COUNT; ++i) {
                buf[i] = rank + i;
                recvbuf[i] = 0xdeadbeef;
            }
            MPI_Iexscan(buf, recvbuf, COUNT, MPI_INT, MPI_SUM, comm, req);
            break;

        case 19: /* MPI_Ialltoallw (a weak test, neither irregular nor sparse) */
            for (i = 0; i < size; ++i) {
                sendcounts[i] = COUNT;
                recvcounts[i] = COUNT;
                sdispls[i] = COUNT * i * sizeof(int);
                rdispls[i] = COUNT * i * sizeof(int);
                sendtypes[i] = MPI_INT;
                recvtypes[i] = MPI_INT;
                for (j = 0; j < COUNT; ++j) {
                    buf[i*COUNT+j] = rank + (i * j);
                    recvbuf[i*COUNT+j] = 0xdeadbeef;
                }
            }
            MPI_Ialltoallw(buf, sendcounts, sdispls, sendtypes, recvbuf, recvcounts, rdispls, recvtypes, comm, req);
            break;

        case 20: /* basic pt2pt MPI_Isend/MPI_Irecv pairing */
            /* even ranks send to odd ranks, but only if we have a full pair */
            if ((rank % 2 != 0) || (rank != size-1)) {
                for (j = 0; j < COUNT; ++j) {
                    buf[j] = j;
                    recvbuf[j] = 0xdeadbeef;
                }
                if (rank % 2 == 0)
                    MPI_Isend(buf, COUNT, MPI_INT, rank+1, 5, comm, req);
                else
                    MPI_Irecv(recvbuf, COUNT, MPI_INT, rank-1, 5, comm, req);
            }
            break;

        default:
            fprintf(stderr, "unexpected value for l->case_num=%d)\n", (l->case_num));
            MPI_Abort(comm, 1);
            exit(1);
            break;
    }
}
Exemple #12
0
int main(int argc, char **argv)
{
  PetscInt    ierr;
  PetscSF     sf;
  Vec         A,Aout;
  PetscScalar *bufA;
  PetscScalar *bufAout;
  PetscMPIInt rank, size;
  PetscInt    nroots, nleaves;
  PetscInt    i;
  PetscInt    *ilocal;
  PetscSFNode *iremote;
  PetscBool   test_dupped_type;
  MPI_Datatype contig;

  ierr = PetscInitialize(&argc,&argv,NULL,help);if (ierr) return ierr;
  ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
  ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);

  if (size != 1) SETERRQ(PETSC_COMM_WORLD, PETSC_ERR_USER, "Only coded for one MPI process");

  ierr             = PetscOptionsBegin(PETSC_COMM_WORLD,"","PetscSF type freeing options","none");CHKERRQ(ierr);
  test_dupped_type = PETSC_FALSE;
  ierr             = PetscOptionsBool("-test_dupped_type", "Test dupped input type","",test_dupped_type,&test_dupped_type,NULL);CHKERRQ(ierr);
  ierr             = PetscOptionsEnd();CHKERRQ(ierr);

  ierr = PetscSFCreate(PETSC_COMM_WORLD,&sf);CHKERRQ(ierr);
  ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr);

  nleaves = 1;
  nroots = 1;
  ierr = PetscMalloc1(nleaves,&ilocal);CHKERRQ(ierr);

  for (i = 0; i<nleaves; i++) {
    ilocal[i] = i;
  }

  ierr = PetscMalloc1(nleaves,&iremote);CHKERRQ(ierr);
  iremote[0].rank = 0;
  iremote[0].index = 0;
  ierr = PetscSFSetGraph(sf,nroots,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER);CHKERRQ(ierr);
  ierr = PetscSFSetUp(sf);CHKERRQ(ierr);
  ierr = PetscSFView(sf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  ierr = VecCreate(PETSC_COMM_WORLD,&A);CHKERRQ(ierr);
  ierr = VecSetSizes(A,4,PETSC_DETERMINE);CHKERRQ(ierr);
  ierr = VecSetFromOptions(A);CHKERRQ(ierr);
  ierr = VecSetUp(A);CHKERRQ(ierr);

  ierr = VecDuplicate(A,&Aout);CHKERRQ(ierr);
  ierr = VecGetArray(A,&bufA);CHKERRQ(ierr);
  for (i=0; i<4; i++) {
    bufA[i] = (PetscScalar)i;
  }
  ierr = VecRestoreArray(A,&bufA);CHKERRQ(ierr);

  ierr = VecGetArrayRead(A,(const PetscScalar**)&bufA);CHKERRQ(ierr);
  ierr = VecGetArray(Aout,&bufAout);CHKERRQ(ierr);

  ierr = MPI_Type_contiguous(4, MPIU_SCALAR, &contig);CHKERRQ(ierr);
  ierr = MPI_Type_commit(&contig);CHKERRQ(ierr);

  if (test_dupped_type) {
    MPI_Datatype tmp;
    ierr = MPI_Type_dup(contig, &tmp);CHKERRQ(ierr);
    ierr = MPI_Type_free(&contig);CHKERRQ(ierr);
    contig = tmp;
  }
  for (i=0;i<10000;i++) {
    ierr = PetscSFBcastBegin(sf,contig,bufA,bufAout);CHKERRQ(ierr);
    ierr = PetscSFBcastEnd(sf,contig,bufA,bufAout);CHKERRQ(ierr);
  }
  ierr = VecRestoreArrayRead(A,(const PetscScalar**)&bufA);CHKERRQ(ierr);
  ierr = VecRestoreArray(Aout,&bufAout);CHKERRQ(ierr);

  ierr = VecView(Aout,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
  ierr = VecDestroy(&A);CHKERRQ(ierr);
  ierr = VecDestroy(&Aout);CHKERRQ(ierr);
  ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
  ierr = MPI_Type_free(&contig);CHKERRQ(ierr);
  ierr = PetscFinalize();
  return ierr;
}
Exemple #13
0
/* 
   Create a range of datatypes with a given count elements.
   This uses a selection of types, rather than an exhaustive collection.
   It allocates both send and receive types so that they can have the same
   type signature (collection of basic types) but different type maps (layouts
   in memory) 
 */
int MTestGetDatatypes( MTestDatatype *sendtype, MTestDatatype *recvtype,
		       int count )
{
    int merr;
    int i;

    sendtype->InitBuf	  = 0;
    sendtype->FreeBuf	  = 0;
    sendtype->CheckBuf	  = 0;
    sendtype->datatype	  = 0;
    sendtype->isBasic	  = 0;
    sendtype->printErrors = 0;
    recvtype->InitBuf	  = 0;
    recvtype->FreeBuf	  = 0;

    recvtype->CheckBuf	  = 0;
    recvtype->datatype	  = 0;
    recvtype->isBasic	  = 0;
    recvtype->printErrors = 0;

    sendtype->buf	  = 0;
    recvtype->buf	  = 0;

    /* Set the defaults for the message lengths */
    sendtype->count	  = count;
    recvtype->count	  = count;
    /* Use datatype_index to choose a datatype to use.  If at the end of the
       list, return 0 */
    switch (datatype_index) {
    case 0:
	sendtype->datatype = MPI_INT;
	sendtype->isBasic  = 1;
	recvtype->datatype = MPI_INT;
	recvtype->isBasic  = 1;
	break;
    case 1:
	sendtype->datatype = MPI_DOUBLE;
	sendtype->isBasic  = 1;
	recvtype->datatype = MPI_DOUBLE;
	recvtype->isBasic  = 1;
	break;
    case 2:
	sendtype->datatype = MPI_FLOAT_INT;
	sendtype->isBasic  = 1;
	recvtype->datatype = MPI_FLOAT_INT;
	recvtype->isBasic  = 1;
	break;
    case 3:
	merr = MPI_Type_dup( MPI_INT, &sendtype->datatype );
	if (merr) MTestPrintError( merr );
	merr = MPI_Type_set_name( sendtype->datatype,
                                  (char*)"dup of MPI_INT" );
	if (merr) MTestPrintError( merr );
	merr = MPI_Type_dup( MPI_INT, &recvtype->datatype );
	if (merr) MTestPrintError( merr );
	merr = MPI_Type_set_name( recvtype->datatype,
                                  (char*)"dup of MPI_INT" );
	if (merr) MTestPrintError( merr );
	/* dup'ed types are already committed if the original type 
	   was committed (MPI-2, section 8.8) */
	break;
    case 4:
	/* vector send type and contiguous receive type */
	/* These sizes are in bytes (see the VectorInit code) */
 	sendtype->stride   = 3 * sizeof(int);
	sendtype->blksize  = sizeof(int);
	sendtype->nelm     = recvtype->count;

	merr = MPI_Type_vector( recvtype->count, 1, 3, MPI_INT, 
				&sendtype->datatype );
	if (merr) MTestPrintError( merr );
        merr = MPI_Type_commit( &sendtype->datatype );
	if (merr) MTestPrintError( merr );
	merr = MPI_Type_set_name( sendtype->datatype,
                                  (char*)"int-vector" );
	if (merr) MTestPrintError( merr );
	sendtype->count    = 1;
 	recvtype->datatype = MPI_INT;
	recvtype->isBasic  = 1;
	sendtype->InitBuf  = MTestTypeVectorInit;
	recvtype->InitBuf  = MTestTypeContigInitRecv;
	sendtype->FreeBuf  = MTestTypeVectorFree;
	recvtype->FreeBuf  = MTestTypeContigFree;
	sendtype->CheckBuf = 0;
	recvtype->CheckBuf = MTestTypeContigCheckbuf;
	break;

    case 5:
	/* Indexed send using many small blocks and contig receive */
	sendtype->blksize  = sizeof(int);
	sendtype->nelm     = recvtype->count;
	sendtype->basesize = sizeof(int);
	sendtype->displs   = (int *)malloc( sendtype->nelm * sizeof(int) );
	sendtype->index    = (int *)malloc( sendtype->nelm * sizeof(int) );
	if (!sendtype->displs || !sendtype->index) {
	    MTestError( "Out of memory in type init\n" );
	}
	/* Make the sizes larger (4 ints) to help push the total
	   size to over 256k in some cases, as the MPICH code as of
	   10/1/06 used large internal buffers for packing non-contiguous
	   messages */
	for (i=0; i<sendtype->nelm; i++) {
	    sendtype->index[i]   = 4;
	    sendtype->displs[i]  = 5*i;
	}
	merr = MPI_Type_indexed( sendtype->nelm,
				 sendtype->index, sendtype->displs, 
				 MPI_INT, &sendtype->datatype );
	if (merr) MTestPrintError( merr );
        merr = MPI_Type_commit( &sendtype->datatype );
	if (merr) MTestPrintError( merr );
	merr = MPI_Type_set_name( sendtype->datatype,
                                  (char*)"int-indexed(4-int)" );
	if (merr) MTestPrintError( merr );
	sendtype->count    = 1;
	sendtype->InitBuf  = MTestTypeIndexedInit;
	sendtype->FreeBuf  = MTestTypeIndexedFree;
	sendtype->CheckBuf = 0;

 	recvtype->datatype = MPI_INT;
	recvtype->isBasic  = 1;
	recvtype->count    = count * 4;
	recvtype->InitBuf  = MTestTypeContigInitRecv;
	recvtype->FreeBuf  = MTestTypeContigFree;
	recvtype->CheckBuf = MTestTypeContigCheckbuf;
	break;

    case 6:
	/* Indexed send using 2 large blocks and contig receive */
	sendtype->blksize  = sizeof(int);
	sendtype->nelm     = 2;
	sendtype->basesize = sizeof(int);
	sendtype->displs   = (int *)malloc( sendtype->nelm * sizeof(int) );
	sendtype->index    = (int *)malloc( sendtype->nelm * sizeof(int) );
	if (!sendtype->displs || !sendtype->index) {
	    MTestError( "Out of memory in type init\n" );
	}
	/* index -> block size */
	sendtype->index[0]   = (recvtype->count + 1) / 2;
	sendtype->displs[0]  = 0;
	sendtype->index[1]   = recvtype->count - sendtype->index[0];
	sendtype->displs[1]  = sendtype->index[0] + 1; 
	/* There is a deliberate gap here */

	merr = MPI_Type_indexed( sendtype->nelm,
				 sendtype->index, sendtype->displs, 
				 MPI_INT, &sendtype->datatype );
	if (merr) MTestPrintError( merr );
        merr = MPI_Type_commit( &sendtype->datatype );
	if (merr) MTestPrintError( merr );
	merr = MPI_Type_set_name( sendtype->datatype,
                                  (char*)"int-indexed(2 blocks)" );
	if (merr) MTestPrintError( merr );
	sendtype->count    = 1;
	sendtype->InitBuf  = MTestTypeIndexedInit;
	sendtype->FreeBuf  = MTestTypeIndexedFree;
	sendtype->CheckBuf = 0;

 	recvtype->datatype = MPI_INT;
	recvtype->isBasic  = 1;
	recvtype->count    = sendtype->index[0] + sendtype->index[1];
	recvtype->InitBuf  = MTestTypeContigInitRecv;
	recvtype->FreeBuf  = MTestTypeContigFree;
	recvtype->CheckBuf = MTestTypeContigCheckbuf;
	break;

    case 7:
	/* Indexed receive using many small blocks and contig send */
	recvtype->blksize  = sizeof(int);
	recvtype->nelm     = recvtype->count;
	recvtype->basesize = sizeof(int);
	recvtype->displs   = (int *)malloc( recvtype->nelm * sizeof(int) );
	recvtype->index    = (int *)malloc( recvtype->nelm * sizeof(int) );
	if (!recvtype->displs || !recvtype->index) {
	    MTestError( "Out of memory in type recv init\n" );
	}
	/* Make the sizes larger (4 ints) to help push the total
	   size to over 256k in some cases, as the MPICH code as of
	   10/1/06 used large internal buffers for packing non-contiguous
	   messages */
	/* Note that there are gaps in the indexed type */
	for (i=0; i<recvtype->nelm; i++) {
	    recvtype->index[i]   = 4;
	    recvtype->displs[i]  = 5*i;
	}
	merr = MPI_Type_indexed( recvtype->nelm,
				 recvtype->index, recvtype->displs, 
				 MPI_INT, &recvtype->datatype );
	if (merr) MTestPrintError( merr );
        merr = MPI_Type_commit( &recvtype->datatype );
	if (merr) MTestPrintError( merr );
	merr = MPI_Type_set_name( recvtype->datatype,
                                  (char*)"recv-int-indexed(4-int)" );
	if (merr) MTestPrintError( merr );
	recvtype->count    = 1;
	recvtype->InitBuf  = MTestTypeIndexedInitRecv;
	recvtype->FreeBuf  = MTestTypeIndexedFree;
	recvtype->CheckBuf = MTestTypeIndexedCheckbuf;

 	sendtype->datatype = MPI_INT;
	sendtype->isBasic  = 1;
	sendtype->count    = count * 4;
	sendtype->InitBuf  = MTestTypeContigInit;
	sendtype->FreeBuf  = MTestTypeContigFree;
	sendtype->CheckBuf = 0;
	break;

	/* Less commonly used but still simple types */
    case 8:
	sendtype->datatype = MPI_SHORT;
	sendtype->isBasic  = 1;
	recvtype->datatype = MPI_SHORT;
	recvtype->isBasic  = 1;
	break;
    case 9:
	sendtype->datatype = MPI_LONG;
	sendtype->isBasic  = 1;
	recvtype->datatype = MPI_LONG;
	recvtype->isBasic  = 1;
	break;
    case 10:
	sendtype->datatype = MPI_CHAR;
	sendtype->isBasic  = 1;
	recvtype->datatype = MPI_CHAR;
	recvtype->isBasic  = 1;
	break;
    case 11:
	sendtype->datatype = MPI_UINT64_T;
	sendtype->isBasic  = 1;
	recvtype->datatype = MPI_UINT64_T;
	recvtype->isBasic  = 1;
	break;
    case 12:
	sendtype->datatype = MPI_FLOAT;
	sendtype->isBasic  = 1;
	recvtype->datatype = MPI_FLOAT;
	recvtype->isBasic  = 1;
	break;

#ifndef USE_STRICT_MPI
	/* MPI_BYTE may only be used with MPI_BYTE in strict MPI */
    case 13:
	sendtype->datatype = MPI_INT;
	sendtype->isBasic  = 1;
	recvtype->datatype = MPI_BYTE;
	recvtype->isBasic  = 1;
	recvtype->count    *= sizeof(int);
	break;
#endif
    default:
	datatype_index = -1;
    }

    if (!sendtype->InitBuf) {
	sendtype->InitBuf  = MTestTypeContigInit;
	recvtype->InitBuf  = MTestTypeContigInitRecv;
	sendtype->FreeBuf  = MTestTypeContigFree;
	recvtype->FreeBuf  = MTestTypeContigFree;
	sendtype->CheckBuf = MTestTypeContigCheckbuf;
	recvtype->CheckBuf = MTestTypeContigCheckbuf;
    }
    datatype_index++;

    if (dbgflag && datatype_index > 0) {
	int typesize;
	fprintf( stderr, "%d: sendtype is %s\n", wrank, MTestGetDatatypeName( sendtype ) );
	merr = MPI_Type_size( sendtype->datatype, &typesize );
	if (merr) MTestPrintError( merr );
	fprintf( stderr, "%d: sendtype size = %d\n", wrank, typesize );
	fprintf( stderr, "%d: recvtype is %s\n", wrank, MTestGetDatatypeName( recvtype ) );
	merr = MPI_Type_size( recvtype->datatype, &typesize );
	if (merr) MTestPrintError( merr );
	fprintf( stderr, "%d: recvtype size = %d\n", wrank, typesize );
	fflush( stderr );
	
    }
    else if (verbose && datatype_index > 0) {
	printf( "Get new datatypes: send = %s, recv = %s\n", 
		MTestGetDatatypeName( sendtype ), 
		MTestGetDatatypeName( recvtype ) );
	fflush( stdout );
    }

    return datatype_index;
}