static int dtcmp_select_local_randpartition_keys(
  void* buf,
  int num,
  int k,
  void* item,
  MPI_Datatype key,
  DTCMP_Op cmp,
  DTCMP_Flags hints)
{
  /* randomly pick a pivot value */
  int pivot = rand_r(&dtcmp_rand_seed) % num;

  /* partition around this value, and determine the rank of the pivot value */
  int pivot_rank;
  DTCMP_Partition_local_dtcpy(buf, num, pivot, &pivot_rank, key, key, cmp, hints);

  /* compare the rank of the pivot to the target rank we're looking for */
  if (k < pivot_rank) {
    /* the item is smaller than the pivot, so recurse into lower half of array */
    int num_left = pivot_rank;
    int rc = dtcmp_select_local_randpartition_keys(buf, num_left, k, item, key, cmp, hints);
    return rc;
  } else if (k > pivot_rank) {
    /* the item is larger than the pivot, so recurse into upper half of array */

    /* get lower bound and extent of key */
    MPI_Aint lb, extent;
    MPI_Type_get_extent(key, &lb, &extent);

    /* adjust pointer into array, rank, and number of remaining items */
    int after_pivot = pivot_rank + 1;
    char* offset = (char*)buf + after_pivot * extent;
    int num_left = num - after_pivot;
    int new_k    = k - after_pivot;
    int rc = dtcmp_select_local_randpartition_keys(offset, num_left, new_k, item, key, cmp, hints);
    return rc;
  } else { /* k == pivot_rank */
    /* in this case, the pivot rank is the target rank we're looking for,
     * copy the pivot item into the output item and return */

    /* get lower bound and extent of key */
    MPI_Aint lb, extent;
    MPI_Type_get_extent(key, &lb, &extent);

    /* copy the pivot value into item and return */
    char* pivot_offset = (char*)buf + pivot_rank * extent;
    DTCMP_Memcpy(item, 1, key, pivot_offset, 1, key);
    return DTCMP_SUCCESS;
  }
}
示例#2
0
int ZMPI_Alltoallv_proclists(void* sendbuf, int *sendcounts, int *sdispls, MPI_Datatype sendtype, int nsendprocs, int *sendprocs, void* recvbuf, int *recvcounts, int *rdispls, MPI_Datatype recvtype, int nrecvprocs, int *recvprocs, MPI_Comm comm) /* zmpi_func ZMPI_Alltoallv_proclists */
{
  int i, j;

  const int tag = 0;

  int nreqs;
  MPI_Request *reqs;
  MPI_Status *stats;

  MPI_Aint sendtype_lb, sendtype_extent, recvtype_lb, recvtype_extent;


  reqs = z_alloc(nrecvprocs + nsendprocs, sizeof(MPI_Request));
  stats = z_alloc(nrecvprocs + nsendprocs, sizeof(MPI_Status));

  MPI_Type_get_extent(sendtype, &sendtype_lb, &sendtype_extent);
  MPI_Type_get_extent(recvtype, &recvtype_lb, &recvtype_extent);

  nreqs = 0;

  for (i = 0; i < nrecvprocs; ++i)
  {
    j = recvprocs[i];
    if (recvcounts[j] > 0)
    {
      MPI_Irecv(((char *) recvbuf) + (rdispls[j] * recvtype_extent), recvcounts[j], recvtype, j, tag, comm, &reqs[nreqs]);
      ++nreqs;
    }
  }

  for (i = 0; i < nsendprocs; ++i)
  {
    j = sendprocs[i];
    if (sendcounts[j] > 0)
    {
      MPI_Isend(((char *) sendbuf) + (sdispls[j] * sendtype_extent), sendcounts[j], sendtype, j, tag, comm, &reqs[nreqs]);
      ++nreqs;
    }
  }

  MPI_Waitall(nreqs, reqs, stats);

  z_free(reqs);
  z_free(stats);

  return MPI_SUCCESS;
}
示例#3
0
/*
 * Initialize buffer of basic datatype
 */
static void *MTestTypeContigInit(MTestDatatype * mtype)
{
    MPI_Aint extent = 0, lb = 0, size;
    int merr;

    if (mtype->count > 0) {
        unsigned char *p;
        MPI_Aint i, totsize;
        merr = MPI_Type_get_extent(mtype->datatype, &lb, &extent);
        if (merr)
            MTestPrintError(merr);

        size = extent + lb;
        totsize = size * mtype->count;
        if (!mtype->buf) {
            mtype->buf = (void *) malloc(totsize);
        }
        p = (unsigned char *) (mtype->buf);
        if (!p) {
            char errmsg[128] = { 0 };
            sprintf(errmsg, "Out of memory in %s", __FUNCTION__);
            MTestError(errmsg);
        }
        for (i = 0; i < totsize; i++) {
            p[i] = (unsigned char) (0xff ^ (i & 0xff));
        }
    }
    else {
        if (mtype->buf) {
            free(mtype->buf);
        }
        mtype->buf = 0;
    }
    return mtype->buf;
}
示例#4
0
文件: ex8.c 项目: pombredanne/petsc
static PetscErrorCode MakeDatatype(MPI_Datatype *dtype)
{
  PetscErrorCode ierr;
  MPI_Datatype dtypes[3],tmptype;
  PetscMPIInt  lengths[3];
  MPI_Aint     displs[3];
  Unit         dummy;

  PetscFunctionBegin;
  dtypes[0] = MPIU_INT;
  dtypes[1] = MPIU_SCALAR;
  dtypes[2] = MPI_CHAR;
  lengths[0] = 1;
  lengths[1] = 1;
  lengths[2] = 3;
  /* Curse the evil beings that made std::complex a non-POD type. */
  displs[0] = (char*)&dummy.rank - (char*)&dummy;  /* offsetof(Unit,rank); */
  displs[1] = (char*)&dummy.value - (char*)&dummy; /* offsetof(Unit,value); */
  displs[2] = (char*)&dummy.ok - (char*)&dummy;    /* offsetof(Unit,ok); */
  ierr = MPI_Type_create_struct(3,lengths,displs,dtypes,&tmptype);CHKERRQ(ierr);
  ierr = MPI_Type_commit(&tmptype);CHKERRQ(ierr);
  ierr = MPI_Type_create_resized(tmptype,0,sizeof(Unit),dtype);CHKERRQ(ierr);
  ierr = MPI_Type_commit(dtype);CHKERRQ(ierr);
  ierr = MPI_Type_free(&tmptype);CHKERRQ(ierr);
  {
    MPI_Aint lb,extent;
    ierr = MPI_Type_get_extent(*dtype,&lb,&extent);CHKERRQ(ierr);
    if (extent != sizeof(Unit)) SETERRQ2(PETSC_COMM_WORLD,PETSC_ERR_LIB,"New type has extent %d != sizeof(Unit) %d",extent,(int)sizeof(Unit));
  }
  PetscFunctionReturn(0);
}
示例#5
0
void mpi_type_get_extent_f(MPI_Fint *type, MPI_Aint *lb,
			   MPI_Aint *extent, MPI_Fint *ierr)
{
    MPI_Datatype c_type = MPI_Type_f2c(*type);

    *ierr = OMPI_INT_2_FINT(MPI_Type_get_extent(c_type, lb, extent));
}
示例#6
0
文件: mpits.c 项目: pombredanne/petsc
static PetscErrorCode PetscCommBuildTwoSided_Ibarrier(MPI_Comm comm,PetscMPIInt count,MPI_Datatype dtype,PetscMPIInt nto,const PetscMPIInt *toranks,const void *todata,PetscMPIInt *nfrom,PetscMPIInt **fromranks,void *fromdata)
{
  PetscErrorCode ierr;
  PetscMPIInt    nrecvs,tag,done,i;
  MPI_Aint       lb,unitbytes;
  char           *tdata;
  MPI_Request    *sendreqs,barrier;
  PetscSegBuffer segrank,segdata;

  PetscFunctionBegin;
  ierr = PetscCommDuplicate(comm,&comm,&tag);CHKERRQ(ierr);
  ierr = MPI_Type_get_extent(dtype,&lb,&unitbytes);CHKERRQ(ierr);
  if (lb != 0) SETERRQ1(comm,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
  tdata = (char*)todata;
  ierr  = PetscMalloc1(nto,&sendreqs);CHKERRQ(ierr);
  for (i=0; i<nto; i++) {
    ierr = MPI_Issend((void*)(tdata+count*unitbytes*i),count,dtype,toranks[i],tag,comm,sendreqs+i);CHKERRQ(ierr);
  }
  ierr = PetscSegBufferCreate(sizeof(PetscMPIInt),4,&segrank);CHKERRQ(ierr);
  ierr = PetscSegBufferCreate(unitbytes,4*count,&segdata);CHKERRQ(ierr);

  nrecvs  = 0;
  barrier = MPI_REQUEST_NULL;
  for (done=0; !done; ) {
    PetscMPIInt flag;
    MPI_Status  status;
    ierr = MPI_Iprobe(MPI_ANY_SOURCE,tag,comm,&flag,&status);CHKERRQ(ierr);
    if (flag) {                 /* incoming message */
      PetscMPIInt *recvrank;
      void        *buf;
      ierr      = PetscSegBufferGet(segrank,1,&recvrank);CHKERRQ(ierr);
      ierr      = PetscSegBufferGet(segdata,count,&buf);CHKERRQ(ierr);
      *recvrank = status.MPI_SOURCE;
      ierr      = MPI_Recv(buf,count,dtype,status.MPI_SOURCE,tag,comm,MPI_STATUS_IGNORE);CHKERRQ(ierr);
      nrecvs++;
    }
    if (barrier == MPI_REQUEST_NULL) {
      PetscMPIInt sent,nsends;
      ierr = PetscMPIIntCast(nto,&nsends);CHKERRQ(ierr);
      ierr = MPI_Testall(nsends,sendreqs,&sent,MPI_STATUSES_IGNORE);CHKERRQ(ierr);
      if (sent) {
#if defined(PETSC_HAVE_MPI_IBARRIER)
        ierr = MPI_Ibarrier(comm,&barrier);CHKERRQ(ierr);
#elif defined(PETSC_HAVE_MPIX_IBARRIER)
        ierr = MPIX_Ibarrier(comm,&barrier);CHKERRQ(ierr);
#endif
        ierr = PetscFree(sendreqs);CHKERRQ(ierr);
      }
    } else {
      ierr = MPI_Test(&barrier,&done,MPI_STATUS_IGNORE);CHKERRQ(ierr);
    }
  }
  *nfrom = nrecvs;
  ierr   = PetscSegBufferExtractAlloc(segrank,fromranks);CHKERRQ(ierr);
  ierr   = PetscSegBufferDestroy(&segrank);CHKERRQ(ierr);
  ierr   = PetscSegBufferExtractAlloc(segdata,fromdata);CHKERRQ(ierr);
  ierr   = PetscSegBufferDestroy(&segdata);CHKERRQ(ierr);
  ierr   = PetscCommDestroy(&comm);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
示例#7
0
static int type_create_contiguous_x(MPI_Count count,
	MPI_Datatype oldtype, MPI_Datatype *newtype)
{
    /* to make 'count' fit MPI-3 type processing routines (which take integer
     * counts), we construct a type consisting of N INT_MAX chunks followed by
     * a remainder.  e.g for a count of 4000000000 bytes you would end up with
     * one 2147483647-byte chunk followed immediately by a 1852516353-byte
     * chunk */
    MPI_Datatype chunks, remainder;
    MPI_Aint lb, extent, disps[2];
    int blocklens[2];
    MPI_Datatype types[2];

    MPI_Count c = count/INT_MAX;
    MPI_Count r = count%INT_MAX;

    MPI_Type_vector(c, INT_MAX, INT_MAX, oldtype, &chunks);
    MPI_Type_contiguous(r, oldtype, &remainder);

    MPI_Type_get_extent(oldtype, &lb, &extent);

    blocklens[0] = 1;      blocklens[1] = 1;
    disps[0]     = 0;      disps[1]     = c*extent*INT_MAX;
    types[0]     = chunks; types[1]     = remainder;

    MPI_Type_create_struct(2, blocklens, disps, types, newtype);

    MPI_Type_free(&chunks);
    MPI_Type_free(&remainder);

    return MPI_SUCCESS;
}
示例#8
0
/*
 * Check value of received basic datatype buffer.
 */
static int MTestTypeContigCheckbuf(MTestDatatype * mtype)
{
    unsigned char *p;
    unsigned char expected;
    int err = 0, merr;
    MPI_Aint i, totsize, size, extent = 0, lb = 0;

    p = (unsigned char *) mtype->buf;
    if (p) {
        merr = MPI_Type_get_extent(mtype->datatype, &lb, &extent);
        if (merr)
            MTestPrintError(merr);

        size = lb + extent;
        totsize = size * mtype->count;
        for (i = 0; i < totsize; i++) {
            expected = (unsigned char) (0xff ^ (i & 0xff));
            if (p[i] != expected) {
                err++;
                if (mtype->printErrors && err < 10) {
                    printf("Data expected = %x but got p[%ld] = %x\n", expected, i, p[i]);
                    fflush(stdout);
                }
            }
        }
    }
    return err;
}
示例#9
0
static int do_test_for_ddt( MPI_Datatype sddt, MPI_Datatype rddt, int length )
{
    int i;
    MPI_Aint lb, extent;
    char *sbuf, *rbuf;

    MPI_Type_get_extent( sddt, &lb, &extent );
    sbuf = (char*)malloc( length );
    rbuf = (char*)malloc( length );
    printf( "# Isend recv\n" );
    for( i = 1; i <= (length/extent); i *= 2 ) {
        isend_recv( 10, sddt, i, sbuf, rddt, i, rbuf );
    }
    printf( "# Isend Irecv Wait\n" );
    for( i = 1; i <= (length/extent); i *= 2 ) {
        isend_irecv_wait( 10, sddt, i, sbuf, rddt, i, rbuf );
    }
    printf( "# Irecv send\n" );
    for( i = 1; i <= (length/extent); i *= 2 ) {
        irecv_send( 10, sddt, i, sbuf, rddt, i, rbuf );
    }
    printf( "# Irecv Isend Wait\n" );
    for( i = 1; i <= (length/extent); i *= 2 ) {
        irecv_isend_wait( 10, sddt, i, sbuf, rddt, i, rbuf );
    }
    free( sbuf );
    free( rbuf );
    return 0;
}
示例#10
0
void copy_key_if_valid(
  void* invec,
  void* inoutvec,
  int* len,
  MPI_Datatype* type)
{
  /* get extent of user's datatype */
  MPI_Aint lb, extent;
  MPI_Type_get_extent(*type, &lb, &extent);

  /* get pointers to start of input and output buffers */
  char* inbuf  = (char*) invec;
  char* outbuf = (char*) inoutvec;

  /* loop over each element provided in call */
  int i = 0;
  while (i < *len) {
    /* if our current entry is valid, keep it,
     * otherwise just copy over whatever first value is */
    int valid2 = *(int*) inoutvec;
    if (!valid2) {
      /* TODO: if type is big, could optimize by avoiding copy
       * if inbuf is also not valid */
      /* copy value from inbuf to outbuf */
      DTCMP_Memcpy(outbuf, 1, *type, inbuf, 1, *type);
    }

    /* increment pointers to handle next element */
    inbuf  += extent;
    outbuf += extent;
    i++;
  }
}
示例#11
0
static void print_schedule(active_schedule_t *schedule)
{
    int global_self;
    int self;
    MPI_Aint lb;
    MPI_Aint extent;
    
    MPI_Comm_rank(MPI_COMM_WORLD, &global_self);
    MPI_Comm_rank(schedule->comm, &self);
    MPI_Type_get_extent(schedule->type, &lb, &extent);

    fprintf(stderr,
            "%d: schedule=%p, comm=%p, "
            "type=%p(lb=%ld, extent=%ld), count=%d, %d %s %d\n",
            global_self,
            schedule,
            (void *) schedule->comm,
            (void *) schedule->type,
            (long) lb,
            (long) extent,
            schedule->count,
            self,
            schedule->direction == ACTIVE_SCHEDULE_SEND ? " -> " : " <- ",
            schedule->peer);
}            
示例#12
0
/* Extract an m x n submatrix within an m x N matrix and transpose it.
   Assume storage by rows; the defined datatype accesses by columns */
MPI_Datatype transpose_type(int N, int m, int n, MPI_Datatype type)
/* computes a datatype for the transpose of an mxn matrix 
   with entries of type type */
{
  MPI_Datatype subrow, subrow1, submatrix;
  MPI_Aint lb, extent;
  
  MPI_Type_vector(m, 1, N, type, &subrow);
  MPI_Type_get_extent(type, &lb, &extent);
  MPI_Type_create_resized(subrow, 0, extent, &subrow1);
  MPI_Type_contiguous(n, subrow1, &submatrix); 
  MPI_Type_commit(&submatrix);
  MPI_Type_free( &subrow );
  MPI_Type_free( &subrow1 );

  /* Add a consistency test: the size of submatrix should be
     n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
  {
      int      tsize;
      MPI_Aint textent, llb;
      MPI_Type_size( type, &tsize );
      MPI_Type_get_true_extent( submatrix, &llb, &textent );
      
      if (textent != tsize * (N * (m-1)+n)) {
	  fprintf( stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
		   (long)textent, (long)(tsize * (N * (m-1)+n)), N, n, m );
      }
  }

  return(submatrix);
}
示例#13
0
/* Define an n x m submatrix in a n x M local matrix (this is the 
   destination in the transpose matrix */
MPI_Datatype submatrix_type(int M, int m, int n, MPI_Datatype type)
/* computes a datatype for an mxn submatrix within an MxN matrix 
   with entries of type type */
{
  /* MPI_Datatype subrow; */
  MPI_Datatype submatrix;

  /* The book, MPI: The Complete Reference, has the wrong type constructor 
     here.  Since the stride in the vector type is relative to the input 
     type, the stride in the book's code is n times as long as is intended. 
     Since n may not exactly divide N, it is better to simply use the 
     blocklength argument in Type_vector */
  /*
  MPI_Type_contiguous(n, type, &subrow);
  MPI_Type_vector(m, 1, N, subrow, &submatrix);  
  */
  MPI_Type_vector(n, m, M, type, &submatrix );
  MPI_Type_commit(&submatrix);

  /* Add a consistency test: the size of submatrix should be
     n * m * sizeof(type) and the extent should be ((n-1)*M+m) * sizeof(type) */
  {
      int      tsize;
      MPI_Aint textent, lb;
      MPI_Type_size( type, &tsize );
      MPI_Type_get_extent( submatrix, &lb, &textent );
      
      if (textent != tsize * (M * (n-1)+m)) {
	  fprintf( stderr, "Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
		   (long)textent, (long)(tsize * (M * (n-1)+m)), M, n, m );
      }
  }
  return(submatrix);
}
示例#14
0
文件: utils.c 项目: kgururaj/BigMPI
/*
 * Synopsis
 *
 * void convert_vectors(..)
 *
 *  Input Parameter
 *
 *  int          num                length of all vectors (unless splat true)
 *  int          splat_old_count    if non-zero, use oldcount instead of iterating over vector (v-to-w)
 *  MPI_Count    oldcount           single count (ignored if splat_old_count==0)
 *  MPI_Count    oldcounts          vector of counts
 *  int          splat_old_type     if non-zero, use oldtype instead of iterating over vector (v-to-w)
 *  MPI_Datatype oldtype            single type (MPI_DATATYPE_NULL if splat_old_type==0)
 *  MPI_Datatype oldtypes           vector of types (NULL if splat_old_type!=0)
 *  int          zero_new_displs    set the displacement to zero (scatter/gather)
 *  MPI_Aint     olddispls          vector of displacements (NULL if zero_new_displs!=0)
 *
 * Output Parameters
 *
 *  int          newcounts
 *  MPI_Datatype newtypes
 *  MPI_Aint     newdispls
 *
 */
void BigMPI_Convert_vectors(int                num,
                            int                splat_old_count,
                            const MPI_Count    oldcount,
                            const MPI_Count    oldcounts[],
                            int                splat_old_type,
                            const MPI_Datatype oldtype,
                            const MPI_Datatype oldtypes[],
                            int                zero_new_displs,
                            const MPI_Aint     olddispls[],
                                  int          newcounts[],
                                  MPI_Datatype newtypes[],
                                  MPI_Aint     newdispls[])
{
    assert(splat_old_count || (oldcounts!=NULL));
    assert(splat_old_type  || (oldtypes!=NULL));
    assert(zero_new_displs || (olddispls!=NULL));

    MPI_Aint lb /* unused */, oldextent;
    if (splat_old_type) {
        MPI_Type_get_extent(oldtype, &lb, &oldextent);
    } else {
        /* !splat_old_type implies ALLTOALLW, which implies no displacement zeroing. */
        assert(!zero_new_displs);
    }

    for (int i=0; i<num; i++) {
        /* counts */
        newcounts[i] = 1;

        /* types */
        MPIX_Type_contiguous_x(oldcounts[i], splat_old_type ? oldtype : oldtypes[i], &newtypes[i]);
        MPI_Type_commit(&newtypes[i]);

        /* displacements */
        MPI_Aint newextent;
        /* If we are not splatting old type, it implies ALLTOALLW,
         * which does not scale the displacement by the type extent,
         * nor would we ever zero the displacements. */
        if (splat_old_type) {
            MPI_Type_get_extent(newtypes[i], &lb, &newextent);
            newdispls[i] = (zero_new_displs ? 0 : olddispls[i]*oldextent/newextent);
        } else {
            newdispls[i] = olddispls[i];
        }
    }
    return;
}
示例#15
0
文件: sysio.c 项目: petsc/petsc
/*
      MPICH does not provide the external32 representation for MPI_File_set_view() so we need to provide the functions.
    These are set into MPI in PetscInitialize() via MPI_Register_datarep()

    Note I use PetscMPIInt for the MPI error codes since that is what MPI uses (instead of the standard PetscErrorCode)

    The next three routines are not used because MPICH does not support their use

*/
PETSC_EXTERN PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype datatype,MPI_Aint *file_extent,void *extra_state)
{
  MPI_Aint    ub;
  PetscMPIInt ierr;

  ierr = MPI_Type_get_extent(datatype,&ub,file_extent);
  return ierr;
}
示例#16
0
    void print_pictogram(std::ostream& os, unsigned int width = 8) {
        std::pair<MPI_Aint, MPI_Aint> ext;
        std::pair<MPI_Aint, MPI_Aint> true_ext;
        MPI_Type_get_extent(type, &ext.first, &ext.second);
        MPI_Type_get_true_extent(type, &true_ext.first, &true_ext.second);

        if (ext.first != 0) {
            os << "Pictogram not available for types with lb != 0" << std::endl;
            return;
        }

        MPI_Aint ex = ext.second;

        if (ext.second < true_ext.second) {
            os << "Pictogram not available for types with ub < true_ub" << std::endl;
            return;
        }

        // use single letter type
        unsigned int pos = 0;
        os << "[";
        for (unsigned int i = 0; i < width; ++i) {
            os << "-";
        }
        os << "]" << std::endl << "[";
        for (auto it = m.begin(); it != m.end(); ++it) {
            while (pos < it->first) {
                if (pos % width == 0 && pos != 0)
                    os << "]" << std::endl << "[";
                os << " ";
                ++pos;
            }
            // get type size and type char
            char type_char = builtin_typename_map::get_typeid_name(it->second)[0];
            int size;
            MPI_Type_size(it->second, &size);
            // print the character `size` times
            for (int i = 0; i < size; ++i) {
                if (pos % width == 0 && pos != 0)
                    os << "]" << std::endl << "[";
                os << type_char;
                ++pos;
            }
        }

        while (pos < ex) {
            if (pos % width == 0 && pos != 0)
                os << "]" << std::endl << "[";
            os << " ";
            ++pos;
        }

        os << "]" << std::endl << "[";
        for (unsigned int i = 0; i < width; ++i) {
            os << "-";
        }
        os << "]" << std::endl;
    }
示例#17
0
/*
 * Initialize buffer of indexed-block datatype
 */
static void *MTestTypeIndexedBlockInit(MTestDatatype * mtype)
{
    MPI_Aint extent = 0, lb = 0, size, totsize, offset, dt_offset;
    int merr;

    if (mtype->count > 0) {
        unsigned char *p;
        MPI_Aint k, j;
        int i, nc;

        /* Allocate the send/recv buffer */
        merr = MPI_Type_get_extent(mtype->datatype, &lb, &extent);
        if (merr)
            MTestPrintError(merr);
        size = extent + lb;
        totsize = size * mtype->count;

        if (!mtype->buf) {
            mtype->buf = (void *) malloc(totsize);
        }
        p = (unsigned char *) (mtype->buf);
        if (!p) {
            char errmsg[128] = { 0 };
            sprintf(errmsg, "Out of memory in %s", __FUNCTION__);
            MTestError(errmsg);
        }

        /* First, set to -1 */
        for (k = 0; k < totsize; k++)
            p[k] = 0xff;

        /* Now, set the actual elements to the successive values.
         * We require that the base type is a contiguous type */
        nc = 0;
        dt_offset = 0;
        /* For each datatype */
        for (k = 0; k < mtype->count; k++) {
            /* For each block */
            for (i = 0; i < mtype->nblock; i++) {
                offset = dt_offset + mtype->displ_in_bytes[i];
                /* For each byte in the block */
                for (j = 0; j < mtype->blksize; j++) {
                    p[offset + j] = (unsigned char) (0xff ^ (nc++ & 0xff));
                }
            }
            dt_offset += size;
        }
    }
    else {
        /* count == 0 */
        if (mtype->buf) {
            free(mtype->buf);
        }
        mtype->buf = 0;
    }
    return mtype->buf;
}
示例#18
0
static int getTypeExtent(JNIEnv *env, MPI_Datatype type)
{
    MPI_Aint lb, extent;
    int rc = MPI_Type_get_extent(type, &lb, &extent);
    ompi_java_exceptionCheck(env, rc);
    int value = extent;
    assert(((MPI_Aint)value) == extent);
    return value;
}
示例#19
0
void ompi_type_get_extent_f(MPI_Fint *type, MPI_Aint *lb,
			   MPI_Aint *extent, MPI_Fint *ierr)
{
    int c_ierr;
    MPI_Datatype c_type = MPI_Type_f2c(*type);

    c_ierr = MPI_Type_get_extent(c_type, lb, extent);
    if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
}
int DTCMP_Select_local_randpartition(
  const void* buf,
  int num,
  int k,
  void* item,
  MPI_Datatype key,
  MPI_Datatype keysat,
  DTCMP_Op cmp,
  DTCMP_Flags hints)
{
  int rc = DTCMP_SUCCESS;

  /* get extent of keysat datatype */
  MPI_Aint lb, extent;
  MPI_Type_get_extent(keysat, &lb, &extent);

  /* get extent of key datatype */
  MPI_Aint key_lb, key_extent;
  MPI_Type_get_extent(key, &key_lb, &key_extent);

  /* get true extent of key datatype */
  MPI_Aint key_true_lb, key_true_extent;
  MPI_Type_get_extent(key, &key_true_lb, &key_true_extent);

  /* allocate an array to hold keys */
  size_t buf_size = key_true_extent * num;
  void* scratch = dtcmp_malloc(buf_size, 0, __FILE__, __LINE__);

  /* copy keys into buffer */
  int i;
  for (i = 0; i < num; i++) {
    char* pos1 = (char*)buf + i * extent;
    char* pos2 = (char*)scratch + i * key_extent;
    DTCMP_Memcpy(pos2, 1, key, pos1, 1, key);
  }

  /* find and copy target rank into item */
  dtcmp_select_local_randpartition_keys(scratch, num, k, item, key, cmp, hints);

  /* free memory */
  dtcmp_free(&scratch);

  return rc;
}
示例#21
0
/*---------------------------------------------------------------------
 * Function:         Build_cyclic_mpi_type
 * Purpose:          Build an MPI derived datatype that can be used with
 *                   cyclically distributed data.
 * In arg:
 *    loc_n:         The number of elements assigned to each process
 * Global out:
 *    cyclic_mpi_t:  An MPI datatype that can be used with cyclically
 *                   distributed data
 */
void Build_cyclic_mpi_type(int loc_n) {
   MPI_Datatype temp_mpi_t;
   MPI_Aint lb, extent;

   MPI_Type_vector(loc_n, 1, comm_sz, MPI_INT, &temp_mpi_t);
   MPI_Type_get_extent(MPI_INT, &lb, &extent);
   MPI_Type_create_resized(temp_mpi_t, lb, extent, &cyclic_mpi_t);
   MPI_Type_commit(&cyclic_mpi_t);

}  /* Build_cyclic_mpi_type */
示例#22
0
/*
 * Check value of received subarray datatype buffer
 */
static int MTestTypeSubarrayCheckbuf(MTestDatatype * mtype)
{
    unsigned char *p;
    unsigned char expected;
    int err = 0, merr;
    MPI_Aint size, offset, dt_offset, byte_offset, lb = 0, extent = 0;

    p = (unsigned char *) mtype->buf;
    if (p) {
        MPI_Aint k;
        int j, b, i, nc;
        merr = MPI_Type_get_extent(mtype->datatype, &lb, &extent);
        if (merr)
            MTestPrintError(merr);

        size = lb + extent;

        int ncol, sub_ncol, sub_nrow, sub_col_start, sub_row_start;
        ncol = mtype->arr_sizes[1];
        sub_nrow = mtype->arr_subsizes[0];
        sub_ncol = mtype->arr_subsizes[1];
        sub_row_start = mtype->arr_starts[0];
        sub_col_start = mtype->arr_starts[1];

        nc = 0;
        dt_offset = 0;
        /* For each datatype */
        for (k = 0; k < mtype->count; k++) {
            /* For each row */
            for (i = 0; i < sub_nrow; i++) {
                offset = (sub_row_start + i) * ncol + sub_col_start;
                /* For each element in row */
                for (j = 0; j < sub_ncol; j++) {
                    byte_offset = dt_offset + (offset + j) * mtype->basesize;
                    /* For each byte in element */
                    for (b = 0; b < mtype->basesize; b++) {
                        expected = (unsigned char) (0xff ^ (nc++ & 0xff));
                        if (p[byte_offset + b] != expected) {
                            err++;
                            if (mtype->printErrors && err < 10) {
                                printf("Data expected = %x but got p[%d,%d,%d] = %x\n",
                                       expected, i, j, b, p[byte_offset + b]);
                                fflush(stdout);
                            }
                        }
                    }
                }
            }
            dt_offset += size;
        }
    }
    if (err)
        printf("%s error\n", __FUNCTION__);
    return err;
}
示例#23
0
static void typestats(MPI_Datatype type)
{
    MPI_Aint lb, extent;
    MPI_Count size;

    MPI_Type_get_extent(type, &lb, &extent);
    MPI_Type_size_x(type, &size);

    printf("dtype %d: lb = %ld extent = %ld size = %ld...",
	    type, (long)lb, (long)extent, size);

}
示例#24
0
/*
 * Initialize buffer of vector datatype
 */
static void *MTestTypeVectorInit(MTestDatatype * mtype)
{
    MPI_Aint extent = 0, lb = 0, size, totsize, dt_offset, byte_offset;
    int merr;

    if (mtype->count > 0) {
        unsigned char *p;
        MPI_Aint k, j;
        int i, nc;

        merr = MPI_Type_get_extent(mtype->datatype, &lb, &extent);
        if (merr)
            MTestPrintError(merr);

        size = extent + lb;
        totsize = mtype->count * size;
        if (!mtype->buf) {
            mtype->buf = (void *) malloc(totsize);
        }
        p = (unsigned char *) (mtype->buf);
        if (!p) {
            char errmsg[128] = { 0 };
            sprintf(errmsg, "Out of memory in %s", __FUNCTION__);
            MTestError(errmsg);
        }

        /* First, set to -1 */
        for (k = 0; k < totsize; k++)
            p[k] = 0xff;

        /* Now, set the actual elements to the successive values.
         * We require that the base type is a contiguous type */
        nc = 0;
        dt_offset = 0;
        /* For each datatype */
        for (k = 0; k < mtype->count; k++) {
            /* For each block */
            for (i = 0; i < mtype->nblock; i++) {
                byte_offset = dt_offset + i * mtype->stride;
                /* For each byte */
                for (j = 0; j < mtype->blksize; j++) {
                    p[byte_offset + j] = (unsigned char) (0xff ^ (nc & 0xff));
                    nc++;
                }
            }
            dt_offset += size;
        }
    }
    else {
        mtype->buf = 0;
    }
    return mtype->buf;
}
示例#25
0
JNIEXPORT void JNICALL Java_mpi_Datatype_getLbExtent(
        JNIEnv *env, jobject jthis, jlong type, jintArray jLbExt)
{
    MPI_Aint lb, extent;
    int rc = MPI_Type_get_extent((MPI_Datatype)type, &lb, &extent);
    if(ompi_java_exceptionCheck(env, rc))
        return;

    jint *lbExt = (*env)->GetIntArrayElements(env, jLbExt, NULL);
    lbExt[0] = (jint)lb;
    lbExt[1] = (jint)extent;
    (*env)->ReleaseIntArrayElements(env, jLbExt, lbExt, 0);
}
示例#26
0
文件: dp_utils.c 项目: acochrane/goma
void 
ddd_set_commit(DDD p)
{
#ifdef PARALLEL
  MPI_Type_create_struct(p->num_members, p->block_count, p->address,
		  p->data_type, &p->new_type);
  MPI_Type_commit(&p->new_type);
  MPI_Type_get_extent(p->new_type, &p->lb, &p->extent);
  MPI_Type_size(p->new_type, &p->size);
  /*  rtn = MPI_Type_count(p->new_type, &p->count); */
#endif

  return;
}
示例#27
0
int ZMPI_Alltoallv_sendrecv(void* sendbuf, int *sendcounts, int *sdispls, MPI_Datatype sendtype, void* recvbuf, int *recvcounts, int *rdispls, MPI_Datatype recvtype, MPI_Comm comm)
{
  int s, r;
  int comm_size, comm_rank;
  MPI_Status stat;
  const int tag = 0;

  MPI_Aint sendtype_lb, sendtype_extent, recvtype_lb, recvtype_extent;


  MPI_Type_get_extent(sendtype, &sendtype_lb, &sendtype_extent);
  MPI_Type_get_extent(recvtype, &recvtype_lb, &recvtype_extent);

  MPI_Comm_size(comm, &comm_size);
  MPI_Comm_rank(comm, &comm_rank);

  for (s = 0; s < comm_size; ++s)
  for (r = 0; r < comm_size; ++r)
  MPI_Sendrecv(((char *) sendbuf) + (sdispls[r] * sendtype_extent), sendcounts[r], sendtype, (comm_rank == s)?r:MPI_PROC_NULL, tag,
               ((char *) recvbuf) + (rdispls[s] * recvtype_extent), recvcounts[s], recvtype, (comm_rank == r)?s:MPI_PROC_NULL, tag, comm, &stat);

  return MPI_SUCCESS;
}
示例#28
0
/*
 * Synopsis
 *
 * int BigMPI_Type_contiguous(MPI_Aint offset,
 *                            MPI_Count count,
 *                            MPI_Datatype   oldtype,
 *                            MPI_Datatype * newtype)
 *
 *  Input Parameters
 *
 *   offset            byte offset of the start of the contiguous chunk
 *   count             replication count (nonnegative integer)
 *   oldtype           old datatype (handle)
 *
 * Output Parameter
 *
 *   newtype           new datatype (handle)
 *
 * Notes
 *
 *   Following the addition of the offset argument, this function no longer
 *   matches the signature of MPI_Type_contiguous.  This may constitute
 *   breaking user experience for some people.  However, the value of
 *   adding it simplies the primary purpose of this function, which is to
 *   do the heavy lifting _inside_ of BigMPI.  In particular, it allows
 *   us to use MPI_Alltoallw instead of MPI_Neighborhood_alltoallw.
 *
 */
int BigMPI_Type_contiguous(MPI_Aint offset, MPI_Count count, MPI_Datatype oldtype, MPI_Datatype * newtype)
{
    /* The count has to fit into MPI_Aint for BigMPI to work. */
    if ((uint64_t)count>(uint64_t)bigmpi_count_max) {
        printf("count (%llu) exceeds bigmpi_count_max (%llu)\n",
               (long long unsigned)count, (long long unsigned)bigmpi_count_max);
        fflush(stdout);
    }

#ifdef BIGMPI_AVOID_TYPE_CREATE_STRUCT
    if (offset==0) {
        /* There is no need for this code path in homogeneous execution,
         * but it is useful to exercise anyways. */
        int a, b;
        int prime = BigMPI_Factorize_count(count, &a, &b);
        if (!prime) {
            MPI_Type_vector(a, b, b, oldtype, newtype);
            return MPI_SUCCESS;
        }
    }
#endif
    MPI_Count c = count/bigmpi_int_max;
    MPI_Count r = count%bigmpi_int_max;

    assert(c<bigmpi_int_max);
    assert(r<bigmpi_int_max);

    MPI_Datatype chunks;
    MPI_Type_vector(c, bigmpi_int_max, bigmpi_int_max, oldtype, &chunks);

    MPI_Datatype remainder;
    MPI_Type_contiguous(r, oldtype, &remainder);

    MPI_Aint lb /* unused */, extent;
    MPI_Type_get_extent(oldtype, &lb, &extent);

    MPI_Aint remdisp          = (MPI_Aint)c*bigmpi_int_max*extent;
    int blocklengths[2]       = {1,1};
    MPI_Aint displacements[2] = {offset,offset+remdisp};
    MPI_Datatype types[2]     = {chunks,remainder};
    MPI_Type_create_struct(2, blocklengths, displacements, types, newtype);

    MPI_Type_free(&chunks);
    MPI_Type_free(&remainder);

    return MPI_SUCCESS;
}
示例#29
0
/*
 * Check value of received indexed datatype buffer
 */
static int MTestTypeIndexedCheckbuf(MTestDatatype * mtype)
{
    unsigned char *p;
    unsigned char expected;
    int err = 0, merr;
    MPI_Aint size = 0, offset, dt_offset, extent = 0, lb = 0;

    p = (unsigned char *) mtype->buf;
    if (p) {
        MPI_Aint k, b;
        int i, j, nc;
        merr = MPI_Type_get_extent(mtype->datatype, &lb, &extent);
        if (merr)
            MTestPrintError(merr);

        size = lb + extent;
        nc = 0;
        dt_offset = 0;
        /* For each datatype */
        for (k = 0; k < mtype->count; k++) {
            /* For each block */
            for (i = 0; i < mtype->nblock; i++) {
                /* For each element in the block */
                for (j = 0; j < mtype->index[i]; j++) {
                    offset = dt_offset + mtype->displ_in_bytes[i]
                        + j * mtype->basesize;
                    /* For each byte in the element */
                    for (b = 0; b < mtype->basesize; b++) {
                        expected = (unsigned char) (0xff ^ (nc++ & 0xff));
                        if (p[offset + b] != expected) {
                            err++;
                            if (mtype->printErrors && err < 10) {
                                printf("Data expected = %x but got p[%d,%d] = %x\n",
                                       expected, i, j, p[offset + b]);
                                fflush(stdout);
                            }
                        }
                    }
                }
            }
            dt_offset += size;
        }
    }
    return err;
}
示例#30
0
EXTERN_C_BEGIN
/*
      MPICH does not provide the external32 representation for MPI_File_set_view() so we need to provide the functions.
    These are set into MPI in PetscInitialize() via MPI_Register_datarep()

    Note I use PetscMPIInt for the MPI error codes since that is what MPI uses (instead of the standard PetscErrorCode)

    The next three routines are not used because MPICH does not support their use

*/
PetscMPIInt PetscDataRep_extent_fn(MPI_Datatype datatype,MPI_Aint *file_extent,void *extra_state) 
{
  MPI_Aint    ub;
  PetscMPIInt ierr;
  
  ierr = MPI_Type_get_extent(datatype,&ub,file_extent);
  return ierr;
}