Esempio n. 1
0
static void
_setup_mpsort_mpi(struct crmpistruct * o,
                  struct crstruct * d,
                  void * myoutbase, size_t myoutnmemb,
                  MPI_Comm comm)
{

    o->comm = comm;

    MPI_Comm_size(comm, &o->NTask);
    MPI_Comm_rank(comm, &o->ThisTask);

    o->mybase = d->base;
    o->mynmemb = d->nmemb;
    o->myoutbase = myoutbase;
    o->myoutnmemb = myoutnmemb;

    MPI_Allreduce(&o->mynmemb, &o->nmemb, 1, MPI_TYPE_PTRDIFF, MPI_SUM, comm);
    MPI_Allreduce(&o->myoutnmemb, &o->outnmemb, 1, MPI_TYPE_PTRDIFF, MPI_SUM, comm);

    if(o->outnmemb != o->nmemb) {
        fprintf(stderr, "total number of items in the item does not match the input %ld != %ld\n",
                o->outnmemb, o->nmemb);
        abort();
    }


    MPI_Type_contiguous(d->rsize, MPI_BYTE, &o->MPI_TYPE_RADIX);
    MPI_Type_commit(&o->MPI_TYPE_RADIX);

    MPI_Type_contiguous(d->size, MPI_BYTE, &o->MPI_TYPE_DATA);
    MPI_Type_commit(&o->MPI_TYPE_DATA);

}
Esempio n. 2
0
static MPI_Datatype
create_indexed_gap_ddt( void )
{
    ddt_gap dt[2];
    MPI_Datatype dt1, dt2, dt3;
    int bLength[2] = { 2, 1 };
    MPI_Datatype types[2] = { MPI_INT, MPI_FLOAT };
    MPI_Aint displ[2];

    MPI_Get_address( &(dt[0].is[0].i[0]), &(displ[0]) );
    MPI_Get_address( &(dt[0].is[0].f), &(displ[1]) );
    displ[1] -= displ[0];
    displ[0] -= displ[0];
    MPI_Type_create_struct( 2, bLength, displ, types, &dt1 );
    /*MPI_DDT_DUMP( dt1 );*/
    MPI_Type_contiguous( 3, dt1, &dt2 );
    /*MPI_DDT_DUMP( dt2 );*/
    bLength[0] = 1;
    bLength[1] = 1;
    MPI_Get_address( &(dt[0].v1), &(displ[0]) );
    MPI_Get_address( &(dt[0].is[0]), &(displ[1]) );
    displ[1] -= displ[0];
    displ[0] -= displ[0];
    types[0] = MPI_INT;
    types[1] = dt2;
    MPI_Type_create_struct( 2, bLength, displ, types, &dt3 );
    /*MPI_DDT_DUMP( dt3 );*/
    MPI_Type_free( &dt1 );
    MPI_Type_free( &dt2 );
    MPI_Type_contiguous( 10, dt3, &dt1 );
    MPI_DDT_DUMP( dt1 );
    MPI_Type_free( &dt3 );
    MPI_Type_commit( &dt1 );
    return dt1;
}
Esempio n. 3
0
/* Returns MPI_SUCCESS on success, an MPI error code on failure.  Code above
 * needs to call MPIO_Err_return_xxx.
 */
int MPIOI_Type_block(int *array_of_gsizes, int dim, int ndims, int nprocs,
		     int rank, int darg, int order, MPI_Aint orig_extent,
		     MPI_Datatype type_old, MPI_Datatype *type_new,
		     MPI_Aint *st_offset) 
{
/* nprocs = no. of processes in dimension dim of grid
   rank = coordinate of this process in dimension dim */
    int blksize, global_size, mysize, i, j;
    MPI_Aint stride;
    
    global_size = array_of_gsizes[dim];

    if (darg == MPI_DISTRIBUTE_DFLT_DARG)
	blksize = (global_size + nprocs - 1)/nprocs;
    else {
	blksize = darg;

	/* --BEGIN ERROR HANDLING-- */
	if (blksize <= 0) {
	    return MPI_ERR_ARG;
	}

	if (blksize * nprocs < global_size) {
	    return MPI_ERR_ARG;
	}
	/* --END ERROR HANDLING-- */
    }

    j = global_size - blksize*rank;
    mysize = ADIOI_MIN(blksize, j);
    if (mysize < 0) mysize = 0;

    stride = orig_extent;
    if (order == MPI_ORDER_FORTRAN) {
	if (dim == 0) 
	    MPI_Type_contiguous(mysize, type_old, type_new);
	else {
	    for (i=0; i<dim; i++) stride *= array_of_gsizes[i];
	    MPI_Type_hvector(mysize, 1, stride, type_old, type_new);
	}
    }
    else {
	if (dim == ndims-1) 
	    MPI_Type_contiguous(mysize, type_old, type_new);
	else {
	    for (i=ndims-1; i>dim; i--) stride *= array_of_gsizes[i];
	    MPI_Type_hvector(mysize, 1, stride, type_old, type_new);
	}

    }

    *st_offset = blksize * rank;
     /* in terms of no. of elements of type oldtype in this dimension */
    if (mysize == 0) *st_offset = 0;

    return MPI_SUCCESS;
}
Esempio n. 4
0
/*@
    MPI_File_get_view - Returns the file view

Input Parameters:
. fh - file handle (handle)

Output Parameters:
. disp - displacement (nonnegative integer)
. etype - elementary datatype (handle)
. filetype - filetype (handle)
. datarep - data representation (string)

.N fortran
@*/
int MPI_File_get_view(MPI_File fh, MPI_Offset * disp, MPI_Datatype * etype,
                      MPI_Datatype * filetype, char *datarep)
{
    int error_code;
    ADIO_File adio_fh;
    static char myname[] = "MPI_FILE_GET_VIEW";
    int i, j, k, combiner;
    MPI_Datatype copy_etype, copy_filetype;

    ROMIO_THREAD_CS_ENTER();

    adio_fh = MPIO_File_resolve(fh);

    /* --BEGIN ERROR HANDLING-- */
    MPIO_CHECK_FILE_HANDLE(adio_fh, myname, error_code);

    if (datarep == NULL) {
        error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
                                          myname, __LINE__, MPI_ERR_ARG, "**iodatarepnomem", 0);
        error_code = MPIO_Err_return_file(adio_fh, error_code);
        goto fn_exit;
    }
    /* --END ERROR HANDLING-- */

    *disp = adio_fh->disp;
    ADIOI_Strncpy(datarep,
                  (adio_fh->is_external32 ? "external32" : "native"), MPI_MAX_DATAREP_STRING);

    MPI_Type_get_envelope(adio_fh->etype, &i, &j, &k, &combiner);
    if (combiner == MPI_COMBINER_NAMED)
        *etype = adio_fh->etype;
    else {
        /* FIXME: It is wrong to use MPI_Type_contiguous; the user could choose to
         * re-implement MPI_Type_contiguous in an unexpected way.  Either use
         * MPID_Barrier as in MPICH or PMPI_Type_contiguous */
        MPI_Type_contiguous(1, adio_fh->etype, &copy_etype);

        /* FIXME: Ditto for MPI_Type_commit - use NMPI or PMPI */
        MPI_Type_commit(&copy_etype);
        *etype = copy_etype;
    }
    /* FIXME: Ditto for MPI_Type_xxx - use NMPI or PMPI */
    MPI_Type_get_envelope(adio_fh->filetype, &i, &j, &k, &combiner);
    if (combiner == MPI_COMBINER_NAMED)
        *filetype = adio_fh->filetype;
    else {
        MPI_Type_contiguous(1, adio_fh->filetype, &copy_filetype);

        MPI_Type_commit(&copy_filetype);
        *filetype = copy_filetype;
    }

  fn_exit:
    ROMIO_THREAD_CS_EXIT();

    return MPI_SUCCESS;
}
void CMPICommunicator::Init(int argc, char *argv[])
{
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    
    MPI_Type_contiguous(sizeof(Cell), MPI_BYTE, &cellDatatype);
    MPI_Type_commit(&cellDatatype);
    
    MPI_Type_contiguous(sizeof(Status), MPI_BYTE, &statusDatatype);
    MPI_Type_commit(&statusDatatype);
}
Esempio n. 6
0
void Construct_MPI_Datatypes(int rows, int cols)
{
    // Contiguous memory vector
    MPI_Type_contiguous(cols, MPI_DOUBLE, &MPI_Vector);
    MPI_Type_commit(&MPI_Vector);

    // Contiguous memory matrix
    MPI_Type_contiguous(rows, MPI_Vector, &MPI_Matrix);
    MPI_Type_commit(&MPI_Matrix);

    return;
}
Esempio n. 7
0
void Build_matrix_type(
                       LOCAL_MATRIX_T*  local_A  /* in */) {
    MPI_Datatype  temp_mpi_t;
    int           block_lengths[2];
    MPI_Aint      displacements[2];
    MPI_Datatype  typelist[2];
    MPI_Aint      start_address;
    MPI_Aint      address;
    
    MPI_Type_contiguous(Order(local_A)*Order(local_A),
                        MPI_FLOAT, &temp_mpi_t);
    
    block_lengths[0] = block_lengths[1] = 1;
    
    typelist[0] = MPI_INT;
    typelist[1] = temp_mpi_t;
    
    MPI_Get_address(local_A, &start_address);
    MPI_Get_address(&(local_A->n_bar), &address);
    displacements[0] = address - start_address;
    
    MPI_Get_address(local_A->entries, &address);
    displacements[1] = address - start_address;
    
    MPI_Type_create_struct(2, block_lengths, displacements,
                           typelist, &local_matrix_mpi_t);
    MPI_Type_commit(&local_matrix_mpi_t);
}  /* Build_matrix_type */
Esempio n. 8
0
int
dgraphAllreduceMaxSum2 (
Gnum *                      reduloctab,           /* Pointer to array of local Gnum data   */
Gnum *                      reduglbtab,           /* Pointer to array of reduced Gnum data */
int                         redumaxsumnbr,        /* Number of max + sum Gnum operations   */
MPI_User_function *         redufuncptr,          /* Pointer to operator function          */
MPI_Comm                    proccomm)             /* Communicator to be used for reduction */
{
  MPI_Datatype      redutypedat;                  /* Data type for finding best separator              */
  MPI_Op            reduoperdat;                  /* Handle of MPI operator for finding best separator */

  if ((MPI_Type_contiguous (redumaxsumnbr, GNUM_MPI, &redutypedat) != MPI_SUCCESS) ||
      (MPI_Type_commit (&redutypedat)                              != MPI_SUCCESS) ||
      (MPI_Op_create (redufuncptr, 1, &reduoperdat)                != MPI_SUCCESS)) {
    errorPrint ("dgraphAllreduceMaxSum: communication error (1)");
    return     (1);
  }

  if (MPI_Allreduce (reduloctab, reduglbtab, 1, redutypedat, reduoperdat, proccomm) != MPI_SUCCESS) {
    errorPrint ("dgraphAllreduceMaxSum: communication error (2)");
    return     (1);
  }

  if ((MPI_Op_free   (&reduoperdat) != MPI_SUCCESS) ||
      (MPI_Type_free (&redutypedat) != MPI_SUCCESS)) {
    errorPrint ("dgraphAllreduceMaxSum: communication error (3)");
    return     (1);
  }

  return (0);
}
Esempio n. 9
0
void gather(int rank, int size, const int gran, body *bodies){
	int i, j;
	int sendto = (rank + 1) % size;
	int recvfrom = ((rank + size) - 1) % size;
	
	MPI_Datatype bodytype;
	MPI_Type_contiguous(3, MPI_DOUBLE, &bodytype);
	MPI_Type_commit(&bodytype);
	MPI_Status status;
	
	body *outbuf = (body *) malloc(gran*sizeof(body));
	if (rank != 0) {
		//memcpy(outbuf, bodies, gran*sizeof(body));
		MPI_Send(bodies, gran, bodytype, recvfrom, 0, MPI_COMM_WORLD);
		for(i=0; i<size-rank-1; i++){
			MPI_Recv(outbuf, gran, bodytype, sendto, 0, MPI_COMM_WORLD, &status);
			MPI_Send(outbuf, gran, bodytype, recvfrom, 0, MPI_COMM_WORLD);
		}
	}
	else {
		FILE *oFile;
		oFile = fopen("peval_out.txt", "w");
		//memcpy(outbuf, bodies, gran*sizeof(body));
		for(j=0; j<gran; j++)
			fprintf(oFile, "%15.10f %15.10f %15.10f\n", bodies[j].x, bodies[j].y, bodies[j].m);
		for(i=0; i<size-rank-1; i++){
			MPI_Recv(outbuf, gran, bodytype, sendto, 0, MPI_COMM_WORLD, &status);
			for(j=0; j<gran; j++)
				fprintf(oFile, "%15.10f %15.10f %15.10f\n", outbuf[j].x, outbuf[j].y, outbuf[j].m);
		}	
		fclose(oFile);
	}
	free(outbuf);
}
Esempio n. 10
0
void mpiReduce_pickerV3(float       *resDataAbsMaxPaddedGlobal,
                        size_t      *resDataMaxIndPaddedGlobal,
                        size_t      resSize,
                        eXCorrMerge bAbs)
{
    resSizeMPI = resSize;

    MPI_Datatype mpiType;
    MPI_Type_contiguous((int) 2, MPI_FLOAT, &mpiType);
    MPI_Type_commit(&mpiType);

    float     *resDataGlobalNode = NULL;
    float     *resDataGlobalNodeReduce = NULL;

    array_new(resDataGlobalNode, 2*resSize);
    array_new(resDataGlobalNodeReduce, 2*resSize);

    memcpy(resDataGlobalNode,
           resDataAbsMaxPaddedGlobal,
           resSize*sizeof(float));
    mpiOp_array_typecast(resDataMaxIndPaddedGlobal,
                         resDataGlobalNode+resSize,
                         resSize);

    MPI_Op mpiOp;

    switch (bAbs) {
        case XCORR_MERGE_NEGATIVE:
            MPI_Op_create((MPI_User_function *) mpiOp_xcorrMergeResultGlobalV3Abs,
                          1,            // commutative
                          &mpiOp);
            break;
        case XCORR_MERGE_POSITIVE:
            MPI_Op_create((MPI_User_function *) mpiOp_xcorrMergeResultGlobalV3,
                          1,            // commutative
                          &mpiOp);
            break;
        default:
            ERROR("mpiReduce_pickerV3", "unsupported merging mode");
    }
    MPI_Reduce(resDataGlobalNode,
               resDataGlobalNodeReduce,
               (int) resSize,       // resSize elements of size 2*sizeof(float)
               mpiType,
               mpiOp,
               0,
               MPI_COMM_WORLD);
    MPI_Op_free(&mpiOp);

    memcpy(resDataAbsMaxPaddedGlobal,
           resDataGlobalNodeReduce,
           resSize*sizeof(float));
    mpiOp_array_typecast(resDataGlobalNodeReduce+resSize,
                         resDataMaxIndPaddedGlobal,
                         resSize);

    array_delete(resDataGlobalNode);
    array_delete(resDataGlobalNodeReduce);
    MPI_Type_free(&mpiType);
}
Esempio n. 11
0
void create_pattern(gchar* name, PatternType type, gint iter, gint elem, gint level, GroupBlock* group)
{
	Verbose("Creating pattern%d \"%s\" elem %d level %d\n", type, name, elem, level);
	Pattern* pattern = pattern_new(type, iter, elem, level);

	gint groupSize = (group? group->groupsize : size);
	gint groupRank;
	if (group)
		MPI_Comm_rank(group->mpicomm, &groupRank);
	else
		groupRank = rank;

	Verbose("GroupSize = %d, GroupRank = %d\n", groupSize, groupRank);

	MPI_Type_contiguous(elem, MPI_BYTE, &pattern->eType);
	MPI_Type_commit(&pattern->eType);
	pattern->type_size = 1;

	switch (type) {
		/* contiguous data */
		case PATTERN1: {
			int array_sizes[] = { groupSize };
			int array_subsizes[] = { 1 };
			int array_starts[] = { groupRank };

			MPI_Type_create_subarray(
				1,				/* number of array dimensions*/
				array_sizes,	/* number of eTypes in each dimension of the full array*/
				array_subsizes,	/* number of eTypes in each dimension of the subarray */
				array_starts,	/* starting coordinates of the subarray in each dimension*/
				MPI_ORDER_C,	/* array storage order flag (state) */
				pattern->eType,	/* eType (old datatype) */
				&pattern->datatype);
			MPI_Type_commit(&pattern->datatype);
			break;
		}

		/* non-contiguous data */
		case PATTERN2: {
			int array_sizes[] = { iter, groupSize };
			int array_subsizes[] = { iter, 1 };
			int array_starts[] = { 0, groupRank };

			MPI_Type_create_subarray(
				2,				/* number of array dimensions*/
				array_sizes,	/* number of eTypes in each dimension of the full array*/
				array_subsizes,	/* number of eTypes in each dimension of the subarray */
				array_starts,	/* starting coordinates of the subarray in each dimension*/
				MPI_ORDER_C,	/* array storage order flag (state) */
				pattern->eType,	/* eType (old datatype) */
				&pattern->datatype);
			MPI_Type_commit(&pattern->datatype);
			break;
		}

		default: Error("Pattern%d not yet supported!\n", type);
	}

	g_hash_table_insert(patternMap, name, pattern);
}
Esempio n. 12
0
HYPRE_Int
hypre_MPI_Type_contiguous( HYPRE_Int           count,
                           hypre_MPI_Datatype  oldtype,
                           hypre_MPI_Datatype *newtype )
{
   return (HYPRE_Int) MPI_Type_contiguous((hypre_int)count, oldtype, newtype);
}
Esempio n. 13
0
/* ----------------- Initialize complex data type and ops for MPI ----*/
void parms_InitComplex()
{
  MPI_Type_contiguous(2, MPI_DOUBLE, &MPI_CMPLX);
  MPI_Type_commit( &MPI_CMPLX );
  
  MPI_Op_create((MPI_User_function *)complex_sum, true, &MPI_CMPLX_SUM);
}
Esempio n. 14
0
int main(int argc, char *argv[])
{
	int rank;
	MPI_Status status;
	MPI_Datatype type;
	double buffer[10] = {
	1.11, 2.22, 3.33, 4.44, 5.55, 6.66, 7.77, 8.88, 9.99, 10.1010
	};
 
	MPI_Init(&argc, &argv);
 
	MPI_Type_contiguous(5, MPI_DOUBLE, &type);
 
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
 
	if (rank == 0)
	{
		MPI_Send(buffer, 2, type, 1, 123, MPI_COMM_WORLD);
	}
	else if (rank == 1)
	{
	double b[10];
		MPI_Recv(b, 2, type, 0, 123, MPI_COMM_WORLD, &status);
	}
 
	MPI_Finalize();
	return 0;
}
Esempio n. 15
0
void BBLSGraph::createDatatypes() {
	// BBLSNode struct
	int block_lengths[5];
	block_lengths[0] = 1;
	block_lengths[1] = 1;
	block_lengths[2] = 1;
	block_lengths[3] = 1;
	block_lengths[4] = 1;
	
	MPI_Aint displacements[5];
	displacements[0] = offsetof(BBLSNode, type);
	displacements[1] = offsetof(BBLSNode, output);
	displacements[2] = offsetof(BBLSNode, inputLeft);
	displacements[3] = offsetof(BBLSNode, inputRight);
	displacements[4] = sizeof(BBLSNode);
	
	MPI_Datatype types[5];
	types[0] = MPI_INT;
	types[1] = MPI_UNSIGNED;
	types[2] = MPI_UNSIGNED;
	types[3] = MPI_UNSIGNED;
	types[4] = MPI_UB;
	
	MPI_Type_struct(5, block_lengths, displacements, types, &mpi_nodeType);
	MPI_Type_commit(&mpi_nodeType);
	
	// 3 BBLSNodes
	MPI_Type_contiguous(3, mpi_nodeType, &mpi_threeNodes);
	MPI_Type_commit(&mpi_threeNodes);
}
Esempio n. 16
0
void escrita()
{	
	int i;
	MPI_Type_contiguous(TAMTUPLA, MPI_INT,&tupla);    
    MPI_Type_commit(&tupla);

	ret = MPI_File_open(	MPI_COMM_WORLD, "arquivofinal.dat", 
							MPI_MODE_WRONLY | MPI_MODE_CREATE, 
							MPI_INFO_NULL, &arquivofinal);
	if (ret == 0)
		printf("Arquivo final aberto com sucesso no processo %d \n", meu_ranque);
	else 
	{
		printf("Arquivo final aberto com erro no processo %d \n", meu_ranque);
		MPI_Abort(MPI_COMM_WORLD, 1);
	}
	    
	MPI_File_set_view(	arquivofinal, 0,
						MPI_INT, MPI_INT, 
						"native", MPI_INFO_NULL);

	for (i = 0; i < TAMBUF; i+=TAMTUPLA)
		MPI_File_write_ordered(	arquivofinal, buf_leitura + i, 1, tupla, MPI_STATUS_IGNORE);
	
	MPI_File_close(&arquivofinal);
}
Esempio n. 17
0
static int type_create_contiguous_x(MPI_Count count,
	MPI_Datatype oldtype, MPI_Datatype *newtype)
{
    /* to make 'count' fit MPI-3 type processing routines (which take integer
     * counts), we construct a type consisting of N INT_MAX chunks followed by
     * a remainder.  e.g for a count of 4000000000 bytes you would end up with
     * one 2147483647-byte chunk followed immediately by a 1852516353-byte
     * chunk */
    MPI_Datatype chunks, remainder;
    MPI_Aint lb, extent, disps[2];
    int blocklens[2];
    MPI_Datatype types[2];

    MPI_Count c = count/INT_MAX;
    MPI_Count r = count%INT_MAX;

    MPI_Type_vector(c, INT_MAX, INT_MAX, oldtype, &chunks);
    MPI_Type_contiguous(r, oldtype, &remainder);

    MPI_Type_get_extent(oldtype, &lb, &extent);

    blocklens[0] = 1;      blocklens[1] = 1;
    disps[0]     = 0;      disps[1]     = c*extent*INT_MAX;
    types[0]     = chunks; types[1]     = remainder;

    MPI_Type_create_struct(2, blocklens, disps, types, newtype);

    MPI_Type_free(&chunks);
    MPI_Type_free(&remainder);

    return MPI_SUCCESS;
}
Esempio n. 18
0
/* Extract an m x n submatrix within an m x N matrix and transpose it.
   Assume storage by rows; the defined datatype accesses by columns */
MPI_Datatype transpose_type(int N, int m, int n, MPI_Datatype type)
/* computes a datatype for the transpose of an mxn matrix 
   with entries of type type */
{
  MPI_Datatype subrow, subrow1, submatrix;
  MPI_Aint lb, extent;
  
  MPI_Type_vector(m, 1, N, type, &subrow);
  MPI_Type_get_extent(type, &lb, &extent);
  MPI_Type_create_resized(subrow, 0, extent, &subrow1);
  MPI_Type_contiguous(n, subrow1, &submatrix); 
  MPI_Type_commit(&submatrix);
  MPI_Type_free( &subrow );
  MPI_Type_free( &subrow1 );

  /* Add a consistency test: the size of submatrix should be
     n * m * sizeof(type) and the extent should be ((m-1)*N+n) * sizeof(type) */
  {
      int      tsize;
      MPI_Aint textent, llb;
      MPI_Type_size( type, &tsize );
      MPI_Type_get_true_extent( submatrix, &llb, &textent );
      
      if (textent != tsize * (N * (m-1)+n)) {
	  fprintf( stderr, "Transpose Submatrix extent is %ld, expected %ld (%d,%d,%d)\n",
		   (long)textent, (long)(tsize * (N * (m-1)+n)), N, n, m );
      }
  }

  return(submatrix);
}
Esempio n. 19
0
DefineMPITypes()
{
  Winspecs winspecs;
  Flags flags;
  rect rectangle;

  int len[3], disp[3];
  MPI_Datatype types[3];

  NUM_type = MPI_DOUBLE;

  MPI_Type_contiguous(6, MPI_INT, &winspecs_type);
  MPI_Type_commit(&winspecs_type);

  len[0] = 10;
  len[1] = 2;
  len[2] = 6;
  disp[0] = (int) ((char *) (&(flags.breakout)) - (char *) (&(flags)));
  disp[1] = (int) ((char *) (&(flags.boundary_sq)) - (char *) (&(flags)));
  disp[2] = (int) ((char *) (&(flags.rmin)) - (char *) (&(flags)));
  types[0] = MPI_INT;
  types[1] = MPI_DOUBLE;
  types[2] = NUM_type;
  MPI_Type_struct(3, len, disp, types, &flags_type);
  MPI_Type_commit(&flags_type);

  len[0] = 5;
  disp[0] = (int) ((char *) (&(rectangle.l)) - (char *) (&(rectangle)));
  types[0] = MPI_INT;
  MPI_Type_struct(1, len, disp, types, &rect_type);
  MPI_Type_commit(&rect_type);

  return 0;
}
Esempio n. 20
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size;
    int *buf, bufsize;
    int *result;
    int *rmabuf, rsize, rcount;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Request req;
    MPI_Datatype derived_dtp;

    MTest_Init(&argc, &argv);

    bufsize = 256 * sizeof(int);
    buf = (int *) malloc(bufsize);
    if (!buf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    result = (int *) malloc(bufsize);
    if (!result) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    rcount = 16;
    rsize = rcount * sizeof(int);
    rmabuf = (int *) malloc(rsize);
    if (!rmabuf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Type_contiguous(2, MPI_INT, &derived_dtp);
    MPI_Type_commit(&derived_dtp);

    /* The following loop is used to run through a series of communicators
     * that are subsets of MPI_COMM_WORLD, of size 1 or greater. */
    while (MTestGetIntracommGeneral(&comm, 1, 1)) {
        int count = 0;

        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        MPI_Win_create(buf, bufsize, 2 * sizeof(int), MPI_INFO_NULL, comm, &win);
        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

        /** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
        MPI_Win_fence(0, win);

        TEST_FENCE_OP("Put", MPI_Put(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);

        TEST_FENCE_OP("Get", MPI_Get(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
Esempio n. 21
0
static MPI_Datatype
create_indexed_gap_optimized_ddt( void )
{
    MPI_Datatype dt1, dt2, dt3;
    int bLength[3];
    MPI_Datatype types[3];
    MPI_Aint displ[3];
   
    MPI_Type_contiguous( 40, MPI_BYTE, &dt1 );
    MPI_Type_create_resized( dt1, 0, 44, &dt2 );
   
    bLength[0] = 4;
    bLength[1] = 9;
    bLength[2] = 36;
   
    types[0] = MPI_BYTE;
    types[1] = dt2;
    types[2] = MPI_BYTE;

    displ[0] = 0;
    displ[1] = 8;
    displ[2] = 44 * 9 + 8;
   
    MPI_Type_create_struct( 3, bLength, displ, types, &dt3 );
   
    MPI_Type_free( &dt1 );
    MPI_Type_free( &dt2 );
    MPI_DDT_DUMP( dt3 );
    MPI_Type_commit( &dt3 );
    return dt3;
}
Esempio n. 22
0
int
main (int argc, char **argv)
{
  int nprocs = -1;
  int rank = -1;
  int comm = MPI_COMM_WORLD;
  char processor_name[128];
  int namelen = 128;
  MPI_Datatype newtype;

  /* init */
  MPI_Init (&argc, &argv);
  MPI_Comm_size (comm, &nprocs);
  MPI_Comm_rank (comm, &rank);
  MPI_Get_processor_name (processor_name, &namelen);
  printf ("(%d) is alive on %s\n", rank, processor_name);
  fflush (stdout);

  MPI_Barrier (comm);
  MPI_Type_contiguous (128, MPI_INT, &newtype);
  MPI_Type_commit (&newtype);
  MPI_Barrier (comm);

  printf ("(%d) Finished normally\n", rank);
  MPI_Finalize ();
}
Esempio n. 23
0
static void
reduce_results (perf_metric_t *s, VAC_t *a)
{
    VAC_t vac;
    MPI_Op var_op;
    MPI_Datatype vac_type;

    MPI_Type_contiguous (12, MPI_DOUBLE, &vac_type);    
    MPI_Type_commit (&vac_type);
    MPI_Op_create (reducer, 1, &var_op);
    
    vac.var = s->op_base.S;
    vac.accum = s->op_base.accum;
    vac.count = (double) s->op_base.op_count;
    vac.min = s->op_base.min;
    vac.max = s->op_base.max;
    vac.min_mean = s->op_base.accum/(double)s->op_base.op_count;
    vac.max_mean = s->op_base.accum/(double)s->op_base.op_count;
    vac.mean_accum = s->op_base.accum/(double)s->op_base.op_count;
    vac.mean_count = 1;
    vac.min_std = s->op_base.std;
    vac.max_std = s->op_base.std;
    vac.std_accum = s->op_base.std; 

    MPI_Allreduce (&vac, a, 1, vac_type, var_op, 
                   MPI_COMM_WORLD);

}
/**
 * @brief  Get an MPI_DATATYPE representing binned complex double precision.
 *
 * Creates (if it has not already been created) and returns a datatype handle
 * for an MPI datatype that represents an binned complex double precision type.
 *
 * This method may call @c MPI_Type_contiguous() and @c MPI_Type_commit().
 * If there is an error, this method will call @c MPI_Abort().
 *
 * @param fold the fold of the binned types
 *
 * @author Peter Ahrens
 * @date   18 Jun 2016
 */
MPI_Datatype binnedMPI_DOUBLE_COMPLEX_BINNED(const int fold){
  int rc;
  if(!types_initialized[fold]){
    rc = MPI_Type_contiguous(binned_zbnum(fold), MPI_DOUBLE, types + fold);
    if(rc != MPI_SUCCESS){
      if (rc == MPI_ERR_TYPE) {
        fprintf(stderr, "[%s.%d] ReproBLAS error: MPI_Type_contiguous error: MPI_ERR_TYPE\n", __FILE__, __LINE__);
      } else if (rc == MPI_ERR_COUNT) {
        fprintf(stderr, "[%s.%d] ReproBLAS error: MPI_Type_contiguous error: MPI_ERR_COUNT\n", __FILE__, __LINE__);
      } else if (rc == MPI_ERR_INTERN) {
        fprintf(stderr, "[%s.%d] ReproBLAS error: MPI_Type_contiguous error: MPI_ERR_INTERN\n", __FILE__, __LINE__);
      } else {
        fprintf(stderr, "[%s.%d] ReproBLAS error: MPI_Type_contiguous error: %d\n", __FILE__, __LINE__, rc);
      }
      MPI_Abort(MPI_COMM_WORLD, rc);
      return 0;
    }
    rc = MPI_Type_commit(types + fold);
    if(rc != MPI_SUCCESS){
      if (rc == MPI_ERR_TYPE) {
        fprintf(stderr, "[%s.%d] ReproBLAS error: MPI_Type_commit error: MPI_ERR_TYPE\n", __FILE__, __LINE__);
      } else {
        fprintf(stderr, "[%s.%d] ReproBLAS error: MPI_Type_commit error: %d\n", __FILE__, __LINE__, rc);
      }
      MPI_Abort(MPI_COMM_WORLD, rc);
      return 0;
    }
    types_initialized[fold] = 1;
  }
  return types[fold];
}
Esempio n. 25
0
int makeHDF5type1(MPI_Datatype *type)
{
    MPI_Datatype ctg, vect, structype, vec2, structype2,
                 vec3, structype3, vec4, structype4, vec5;

    int b[3];
    MPI_Aint d[3];
    MPI_Datatype t[3];

    MPI_Type_contiguous(4, MPI_BYTE, &ctg);

    MPI_Type_vector(1, 5, 1, ctg, &vect);

    b[0] =         b[1] =       b[2] = 1;
    d[0] = 0;      d[1] = 20;    d[2] = 40;
    t[0] = MPI_LB; t[1] = vect; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, &structype);

    MPI_Type_vector(1, 5, 1, structype, &vec2);

    b[0] =         b[1] =        b[2] = 1;
    d[0] = 0;      d[1] = 0;     d[2] = 400;
    t[0] = MPI_LB; t[1] = vec2; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, &structype2);

    MPI_Type_vector(1, 5, 1, structype2, &vec3);

    b[0] =         b[1] =        b[2] = 1;
    d[0] = 0;      d[1] = 0;     d[2] = 4000;
    t[0] = MPI_LB; t[1] = vec3; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, &structype3);

    MPI_Type_vector(1, 5, 1, structype3, &vec4);

    b[0] =         b[1] =        b[2] = 1;
    d[0] = 0;      d[1] = 0;     d[2] = 40000;
    t[0] = MPI_LB; t[1] = vec4; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, &structype4);

    MPI_Type_vector(1, 1, 1, structype4, &vec5);

    b[0] =         b[1] =         b[2] = 1;
    d[0] = 0;      d[1] = 160000; d[2] = 200000;
    t[0] = MPI_LB; t[1] = vec5; t[2] = MPI_UB;
    MPI_Type_create_struct(3, b, d, t, type);

    MPI_Type_free(&ctg);
    MPI_Type_free(&vect);
    MPI_Type_free(&structype);
    MPI_Type_free(&vec2);
    MPI_Type_free(&structype2);
    MPI_Type_free(&vec3);
    MPI_Type_free(&structype3);
    MPI_Type_free(&vec4);
    MPI_Type_free(&structype4);
    MPI_Type_free(&vec5);
    MPI_Type_commit(type);

    return 0;
}
Esempio n. 26
0
 /* Create the two ops that we created in mpi_triplet */
 void setup () {
   /* Set up the transmission channel */
   MPI_Type_contiguous (3, MPI_DOUBLE, &MPI_triplet);
   MPI_Type_commit (&MPI_triplet);
   MPI_Op_create (mpi_triplet_max,false,&MPI_MAX_OP);
   MPI_Op_create (mpi_triplet_min,false,&MPI_MIN_OP);
 }
Esempio n. 27
0
void exchange_boundary(field v, int Lx, int Ly)
{
  int y;
  MPI_Status status[4];
  MPI_Request req[4];

  MPI_Datatype Row;
  MPI_Type_contiguous(Lx+2, MPI_DOUBLE, &Row);
  MPI_Type_commit(&Row);

  MPI_Isend(&v[Ly][0],   1, Row, decomp.north, 0, MPI_COMM_WORLD, &req[0]);
  MPI_Irecv(&v[Ly+1][0], 1, Row, decomp.north, 0, MPI_COMM_WORLD, &status[0]);

  MPI_Isend(&v[1][0], 1, Row, decomp.south, 0, MPI_COMM_WORLD, &req[1]);
  MPI_Irecv(&v[0][0], 1, Row, decomp.south, 0, MPI_COMM_WORLD, &status[1]);

  MPI_Datatype Column;
  MPI_Type_vector(Ly+2, 1, Lx+2, MPI_DOUBLE, &Column);
  MPI_Type_commit(&Column);

  MPI_Isend(&v[0][Lx],   1, Column, decomp.east, 0, MPI_COMM_WORLD, &req[2]);
  MPI_Irecv(&v[0][Lx+1], 1, Column, decomp.east, 0, MPI_COMM_WORLD, &status[2]);

  MPI_Isend(&v[0][1], 1, Column, decomp.west, 0, MPI_COMM_WORLD, &req[3]);
  MPI_Irecv(&v[0][0], 1, Column, decomp.west, 0, MPI_COMM_WORLD, &status[3]);

  MPI_Waitall(4,req,status);

  MPI_Type_free(&Row);
  MPI_Type_free(&Column);
}
Esempio n. 28
0
 inline MPI_Datatype build_mpi_datatype_for_bool()
 {
   MPI_Datatype type;
   MPI_Type_contiguous(sizeof(bool), MPI_BYTE, &type);
   MPI_Type_commit(&type);
   return type;
 }
Esempio n. 29
0
void scatter(const char *pName, int rank, int size, const int gran, body *bodies){
	int i, j;
	int sendto = (rank + 1) % size;
	int recvfrom = ((rank + size) - 1) % size;
	
	MPI_Datatype bodytype;
	MPI_Type_contiguous(3, MPI_DOUBLE, &bodytype);
	MPI_Type_commit(&bodytype);
	MPI_Status status;
	
	body *outbuf = (body *) malloc(gran*sizeof(body));
	if(rank==0){
		FILE *pFile;
		pFile = fopen(pName, "rb");
		for(j=0; j<gran; j++)
			fscanf(pFile,"%lf %lf %lf", &bodies[j].x, &bodies[j].y, &bodies[j].m);					
		for(i=0; i<size-rank-1; i++){
			for(j=0; j<gran; j++)
				fscanf(pFile,"%lf %lf %lf", &outbuf[j].x, &outbuf[j].y, &outbuf[j].m);
			MPI_Send(outbuf, gran, bodytype, sendto, 0, MPI_COMM_WORLD);
		}	
		fclose(pFile);
	}	
	else{
		MPI_Recv(bodies, gran, bodytype, recvfrom, 0, MPI_COMM_WORLD, &status);
		for(i=0; i<size-rank-1; i++){
			MPI_Recv(outbuf, gran, bodytype, recvfrom, 0, MPI_COMM_WORLD, &status);
			MPI_Send(outbuf, gran, bodytype, sendto, 0, MPI_COMM_WORLD);
		}	
	}
	free(outbuf);
}
Esempio n. 30
0
int main(int argc, char *argv[])
{
  int np, rank;
  int size = 0;
  MPI_Init(&argc, &argv);
  
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  MPI_Comm_size(MPI_COMM_WORLD, &np);
  
  int i = rank + 1, sum = 0;
  
  MPI_Datatype ctype;
  MPI_Type_contiguous(2,MPI_DOUBLE,&ctype);
  MPI_Type_commit(&ctype);
  
  complex src = {1, 1};
  complex dst = {0, 0};
  
  //MPI_Type_size(ctype, &size);
  MPI_TReduce(&i, &sum, 1, MPI_INT, (union_func)myadd, MPI_COMM_WORLD);
  //printf("leng cnt is %d\n", size);
  MPI_TReduce(&src, &dst, 1, ctype, (union_func)cprod, MPI_COMM_WORLD); 
  
  // output result
  if(rank == 0) { 
    printf("reduce result is %d\n", sum);
    printf("complex reduce result is %lf + %lf * i\n", dst.real, dst.imag);
  }
  MPI_Finalize();
  return 0;
}