예제 #1
0
DefineMPITypes()
{
  Winspecs winspecs;
  Flags flags;
  rect rectangle;

  int len[3], disp[3];
  MPI_Datatype types[3];

  NUM_type = MPI_DOUBLE;

  MPI_Type_contiguous(6, MPI_INT, &winspecs_type);
  MPI_Type_commit(&winspecs_type);

  len[0] = 10;
  len[1] = 2;
  len[2] = 6;
  disp[0] = (int) ((char *) (&(flags.breakout)) - (char *) (&(flags)));
  disp[1] = (int) ((char *) (&(flags.boundary_sq)) - (char *) (&(flags)));
  disp[2] = (int) ((char *) (&(flags.rmin)) - (char *) (&(flags)));
  types[0] = MPI_INT;
  types[1] = MPI_DOUBLE;
  types[2] = NUM_type;
  MPI_Type_struct(3, len, disp, types, &flags_type);
  MPI_Type_commit(&flags_type);

  len[0] = 5;
  disp[0] = (int) ((char *) (&(rectangle.l)) - (char *) (&(rectangle)));
  types[0] = MPI_INT;
  MPI_Type_struct(1, len, disp, types, &rect_type);
  MPI_Type_commit(&rect_type);

  return 0;
}
int main(int argc, char *argv[])
{
    MPI_Datatype mystruct, vecs[3];
    MPI_Aint stride = 5, displs[3];
    int i=0, blockcount[3];
    int errs=0;

    MTest_Init( &argc, &argv );

    for(i = 0; i < 3; i++)
    {
        MPI_Type_hvector(i, 1, stride, MPI_INT, &vecs[i]);
        MPI_Type_commit(&vecs[i]);
        blockcount[i]=1;
    }
    displs[0]=0; displs[1]=-100; displs[2]=-200; /* irrelevant */

    MPI_Type_struct(3, blockcount, displs, vecs, &mystruct);
    MPI_Type_commit(&mystruct);

    MPI_Type_free(&mystruct);
    for(i = 0; i < 3; i++)
    {
        MPI_Type_free(&vecs[i]);
    }

    /* this time with the first argument always 0 */
    for(i = 0; i < 3; i++)
    {
        MPI_Type_hvector(0, 1, stride, MPI_INT, &vecs[i]);
        MPI_Type_commit(&vecs[i]);
        blockcount[i]=1;
    }
    displs[0]=0; displs[1]=-100; displs[2]=-200; /* irrelevant */

    MPI_Type_struct(3, blockcount, displs, vecs, &mystruct);
    MPI_Type_commit(&mystruct);

    MPI_Type_free(&mystruct);
    for(i = 0; i < 3; i++)
    {
        MPI_Type_free(&vecs[i]);
    }

    MTest_Finalize( errs );
    MPI_Finalize();

    return 0;
}
예제 #3
0
 void peano::applications::poisson::jacobitutorial::records::RegularGridCell::initDatatype() {
    const int Attributes = 1;
    MPI_Datatype subtypes[Attributes] = {
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridCell dummyRegularGridCell[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridCell[0]))), &base);
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridCell::Datatype );
    MPI_Type_commit( &RegularGridCell::Datatype );
    
 }
예제 #4
0
파일: BBLSGraph.cpp 프로젝트: Jnesselr/BBLS
void BBLSGraph::createDatatypes() {
	// BBLSNode struct
	int block_lengths[5];
	block_lengths[0] = 1;
	block_lengths[1] = 1;
	block_lengths[2] = 1;
	block_lengths[3] = 1;
	block_lengths[4] = 1;
	
	MPI_Aint displacements[5];
	displacements[0] = offsetof(BBLSNode, type);
	displacements[1] = offsetof(BBLSNode, output);
	displacements[2] = offsetof(BBLSNode, inputLeft);
	displacements[3] = offsetof(BBLSNode, inputRight);
	displacements[4] = sizeof(BBLSNode);
	
	MPI_Datatype types[5];
	types[0] = MPI_INT;
	types[1] = MPI_UNSIGNED;
	types[2] = MPI_UNSIGNED;
	types[3] = MPI_UNSIGNED;
	types[4] = MPI_UB;
	
	MPI_Type_struct(5, block_lengths, displacements, types, &mpi_nodeType);
	MPI_Type_commit(&mpi_nodeType);
	
	// 3 BBLSNodes
	MPI_Type_contiguous(3, mpi_nodeType, &mpi_threeNodes);
	MPI_Type_commit(&mpi_threeNodes);
}
예제 #5
0
파일: type.c 프로젝트: mfvalin/serial-mpi
FC_FUNC( mpi_type_struct, MPI_TYPE_STRUCT )
         (int * count,       int * blocklens, long * displacements,
          int *oldtypes_ptr, int *newtype,    int *ierror)
{
  *ierror=MPI_Type_struct(*count, blocklens, displacements, 
                                    oldtypes_ptr, newtype);
}
 void peano::applications::navierstokes::prototype1::repositories::PrototypeRepositoryStatePacked::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_CHAR,		 //reduceState
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1,		 //reduceState
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    PrototypeRepositoryStatePacked dummyPrototypeRepositoryStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._reduceState))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[1]._persistentRecords._action))), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &PrototypeRepositoryStatePacked::Datatype );
    MPI_Type_commit( &PrototypeRepositoryStatePacked::Datatype );
    
 }
예제 #7
0
 void peano::kernel::regulargrid::tests::records::TestCell::initDatatype() {
    const int Attributes = 1;
    MPI_Datatype subtypes[Attributes] = {
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    TestCell dummyTestCell[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyTestCell[0]))), &base);
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &TestCell::Datatype );
    MPI_Type_commit( &TestCell::Datatype );
    
 }
예제 #8
0
int main( int argc, char *argv[] )
{
    int    rank, size;
    double dbuff = 0x0;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );

    if ( rank != size-1 ) {
        /* create pathological case */
        MPI_Datatype types[2] = { MPI_INT, MPI_FLOAT };
        int          blks[2]  = { 1, 1};
        MPI_Aint     displs[2] = {0, sizeof(float) };
        MPI_Datatype flt_int_type;
        MPI_Type_struct( 2, blks, displs, types, &flt_int_type );
        MPI_Type_commit( &flt_int_type );
        MPI_Bcast( &dbuff, 1, flt_int_type, 0, MPI_COMM_WORLD );
        MPI_Type_free( &flt_int_type );
    }
    else
        MPI_Bcast( &dbuff, 1, MPI_FLOAT_INT, 0, MPI_COMM_WORLD );

    MPI_Finalize();
    return 0;
}
예제 #9
0
 void peano::applications::latticeboltzmann::blocklatticeboltzmann::forcerecords::BlockPositionPacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //_blockPosition
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       DIMENSIONS,		 //_blockPosition
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    BlockPositionPacked dummyBlockPositionPacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]._persistentRecords._blockPosition[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyBlockPositionPacked[1]._persistentRecords._blockPosition[0])), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockPositionPacked::Datatype );
    MPI_Type_commit( &BlockPositionPacked::Datatype );
    
 }
예제 #10
0
파일: derived.c 프로젝트: ajdecon/play
void Build_type( float* a, float* b, float* n, MPI_Datatype* point_t ) {

    int block_lengths[3];
    MPI_Aint displacements[3];
    MPI_Datatype typelist[3];
    MPI_Aint start_address;
    MPI_Aint address;

    block_lengths[0] = block_lengths[1] = block_lengths[2] = 1;
    typelist[0] = MPI_FLOAT;
    typelist[1] = MPI_FLOAT;
    typelist[2] = MPI_INT;

    displacements[0] = 0;
    MPI_Address(a, &start_address);
    MPI_Address(b, &address);
    displacements[1] = address - start_address;
    
    MPI_Address(n, &address);
    displacements[2] = address - start_address;

    MPI_Type_struct(3, block_lengths, displacements, typelist, point_t);
    MPI_Type_commit(point_t);

}
 void peano::applications::latticeboltzmann::blocklatticeboltzmann::repositories::BlockLatticeBoltzmannBatchJobRepositoryState::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_CHAR,		 //reduceState
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1,		 //reduceState
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    BlockLatticeBoltzmannBatchJobRepositoryState dummyBlockLatticeBoltzmannBatchJobRepositoryState[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._reduceState))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[1]._persistentRecords._action))), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
    MPI_Type_commit( &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
    
 }
예제 #12
0
 void peano::integration::partitioncoupling::builtin::records::ForceTorquePacked::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //_translationalForce
       MPI_DOUBLE,		 //_torque
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       3,		 //_translationalForce
       3,		 //_torque
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    ForceTorquePacked dummyForceTorquePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._translationalForce[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._torque[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyForceTorquePacked[1]._persistentRecords._translationalForce[0])), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &ForceTorquePacked::Datatype );
    MPI_Type_commit( &ForceTorquePacked::Datatype );
    
 }
예제 #13
0
파일: rnemd.c 프로젝트: sabeiro/Allink
void rnemd_init(struct beads *b)
{
	inuse = 1;

	if (N % 2 == 1) 
		fatal(EINVAL, "rnemd: N must be even");
	if (dd < 0 || dd >= 3 || dg < 0 || dg >= 3)
		fatal(EINVAL, "rnemd: gradient / velocity invalid");

	printf("rnemd: slabs=%ld swaps=%ld gradient=%ld velocity=%ld\n",
								N, sw, dg, dd);
	assert(N && sw);
	int blocklens[2] = {1, 2};
	struct rnemd_list q;
	ptrdiff_t indices[2] = {(ptrdiff_t)&q.v - (ptrdiff_t)&q,
		(ptrdiff_t)&q.pos - (ptrdiff_t)&q};
	MPI_Datatype old_types[2] = {MPI_DOUBLE, MPI_INT};
	MPI_Type_struct(ARRAY_SIZE(blocklens), blocklens, indices, old_types, &rnemd_type);
	MPI_Type_commit(&rnemd_type);
	
	MPI_Comm_rank(comm_grid, &rank);
	MPI_Comm_size(comm_grid, &size);
				
	max = MAX(0x10000, 2 * sw * size);
	list = calloc(max, sizeof(*list));
	if (list == NULL) novm("rnemd: list");

	t0 = b->time;
}
예제 #14
0
void Build_derived_type(border* indata, MPI_Datatype* message_type_ptr){
  int block_lengths[3];

  MPI_Aint displacements[3];
  MPI_Aint addresses[4];
  MPI_Datatype typelist[3];

  /* Создает производный тип данных, содержащий три int */

  /* Сначала нужно определить типы элементов */

  typelist[0]=MPI_INT;
  typelist[1]=MPI_INT; 
  typelist[2]=MPI_INT;

 

  /* Определить количество элементов каждого типа */
  block_lengths[0]=block_lengths[1]=block_lengths[2] = 1;
  
  /* Вычислить смещения элементов * относительно indata */
  MPI_Address(indata, &addresses[0]);
  MPI_Address(&(indata->left), &addresses[1]);
  MPI_Address(&(indata->right), &addresses[2]);
  MPI_Address(&(indata->length), &addresses[3]);

  displacements[0]=addresses[1]-addresses[0];
  displacements[1]=addresses[2]-addresses[0];
  displacements[2]=addresses[3]-addresses[0];
  
  /* Создать производный тип */
  MPI_Type_struct(3, block_lengths, displacements,typelist, message_type_ptr);
  /* Зарегистрировать его для использования */
  MPI_Type_commit(message_type_ptr);
} /* Build_derived_type */
예제 #15
0
파일: b.c 프로젝트: akihiko-fujii/mpi1
void mytype_commit(struct mystruct value){

  MPI_Aint indices[3];
  int blocklens[3];
  MPI_Datatype old_types[3];

  old_types[0] = MPI_CHAR;
  old_types[1] = MPI_INT;
  old_types[2] = MPI_DOUBLE;

  blocklens[0] = 1;
  blocklens[1] = 3;
  blocklens[2] = 5;

  MPI_Address(&value.ch, &indices[0]);
  MPI_Address(&value.a, &indices[1]);
  MPI_Address(&value.x, &indices[2]);

  indices[2] = indices[2] - indices[0];
  indices[1] = indices[1] - indices[0];
  indices[0] = 0;

  MPI_Type_struct(3,blocklens,indices,old_types,&mpistruct);

  MPI_Type_commit(&mpistruct);
}
 void peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    FaxenBatchJobRepositoryStatePacked dummyFaxenBatchJobRepositoryStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[1]._persistentRecords._action))), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &FaxenBatchJobRepositoryStatePacked::Datatype );
    MPI_Type_commit( &FaxenBatchJobRepositoryStatePacked::Datatype );
    
 }
예제 #17
0
static void 
InitializeMPIStuff(void)
{
    const int n = 5;
    int          lengths[n]       = {1, 1, 1, 1, 1};
    MPI_Aint     displacements[n] = {0, 0, 0, 0, 0};
    MPI_Datatype types[n] = {MPI_FLOAT,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR};

    // create the MPI data type for Pixel
    Pixel onePixel;
    MPI_Address(&onePixel.z, &displacements[0]);
    MPI_Address(&onePixel.r, &displacements[1]);
    MPI_Address(&onePixel.g, &displacements[2]);
    MPI_Address(&onePixel.b, &displacements[3]);
    MPI_Address(&onePixel.a, &displacements[4]);
    for (int i = n-1; i >= 0; i--)
        displacements[i] -= displacements[0];
    MPI_Type_struct(n, lengths, displacements, types,
                    &mpiTypePixel);
    MPI_Type_commit(&mpiTypePixel);

    // and the merge operation for a reduction
    MPI_Op_create((MPI_User_function *)MergePixelBuffersOp, 1,
                  &mpiOpMergePixelBuffers);
}
 void tarch::parallel::messages::RegisterAtNodePoolMessagePacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_SHORT,		 //nodeName
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       MPI_MAX_NAME_STRING_ADDED_ONE,		 //nodeName
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegisterAtNodePoolMessagePacked dummyRegisterAtNodePoolMessagePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]._persistentRecords._nodeName[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyRegisterAtNodePoolMessagePacked[1]._persistentRecords._nodeName[0])), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegisterAtNodePoolMessagePacked::Datatype );
    MPI_Type_commit( &RegisterAtNodePoolMessagePacked::Datatype );
    
 }
예제 #19
0
파일: fish.c 프로젝트: blickly/ptii
void
make_fishtype (MPI_Datatype* fishtype)
{
  int err, i;

  /* QQQ: How does the data type affect performance? */
#if 1
  MPI_Aint disp[8];
  MPI_Datatype types[8] = { MPI_LB, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE,
			    MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE, MPI_UB };
  int blocklen[8] = { 1, 1, 1, 1, 1, 1, 1, 1 };
  fish_t example[2];

  MPI_Address (&example[0], &disp[0]);
  MPI_Address (&example[0].x, &disp[1]);
  MPI_Address (&example[0].y, &disp[2]);
  MPI_Address (&example[0].vx, &disp[3]);
  MPI_Address (&example[0].vy, &disp[4]);
  MPI_Address (&example[0].ax, &disp[5]);
  MPI_Address (&example[0].ay, &disp[6]);
  MPI_Address (&example[1], &disp[7]);
  for (i = 7; i >= 0; --i) disp[i] -= disp[0];

  err = MPI_Type_struct (8, &blocklen[0], &disp[0], &types[0], fishtype);
#elif 0
  MPI_Aint disp[2];
  MPI_Aint base;
  MPI_Datatype types[2] = { MPI_DOUBLE, MPI_UB };
  int blocklen[2] = { 6, 1 };
  fish_t example[2];

  MPI_Address (&example[0], &base);
  MPI_Address (&example[0].x, &disp[0]);
  MPI_Address (&example[1], &disp[1]);
  disp[0] -= base;
  disp[1] -= base;
  err = MPI_Type_struct (2, blocklen, disp, types, fishtype);
#else
  err = MPI_Type_contiguous (6, MPI_DOUBLE, fishtype);
#endif

  if (err) {
    fprintf (stderr, "Error creating type: %d\n", err);
    MPI_Abort (MPI_COMM_WORLD, -29);
  }
  MPI_Type_commit (fishtype);
}
예제 #20
0
파일: structType.c 프로젝트: bertuccio/ASP
int main (int argc, char *argv[]){

	int   i,
		  numtasks, rank;
	int tag=1;
	float a[16] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0};
	float b[SIZE];
	int blockcounts[2] ={4, 2};
	MPI_Datatype oldtypes[2] = {MPI_FLOAT, MPI_INT};
	int offsets[2];
	MPI_Aint extent;
	MPI_Status status;
	MPI_Datatype particletype;
	Particle particles[NELEMENTS], p[NELEMENTS];

	MPI_Init(&argc,&argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank); 
	MPI_Comm_size(MPI_COMM_WORLD, &numtasks);


	MPI_Type_extent(MPI_FLOAT,&extent);
	offsets[0]=0;
	offsets[1]=4*extent;
	MPI_Type_struct(2,blockcounts,offsets,oldtypes,&particletype);
	MPI_Type_commit(&particletype);



	if (rank == RANK_MASTER) {

		for(i=0;i<NELEMENTS;i++){

			particles[i].x=1;
     		particles[i].y=2;
     		particles[i].z=3;
     		particles[i].velocity=4;
     		particles[i].n=5;
     		particles[i].type=6;

		}


		for (i=0; i<numtasks; i++)
		MPI_Send(particles,NELEMENTS, particletype, i, tag, MPI_COMM_WORLD);
	}

	MPI_Recv(p, NELEMENTS, particletype, 0, tag, MPI_COMM_WORLD, &status);

	for(i=0;i<NELEMENTS;i++)
		printf("RANK #%d: %.1f %.1f %.1f %.1f %d %d\n", rank,p[i].x,
     p[i].y,p[i].z,p[i].velocity,p[i].n,p[i].type);


	MPI_Type_free(&particletype);

	MPI_Finalize();
	return 0;

}  /* end of main */
예제 #21
0
파일: pm_genproc.c 프로젝트: pkestene/mpe
void 
DefineMPITypes()
{
  Flags flags;
  rect rectangle;
  MPI_Aint a, b;

  int len[4];
  MPI_Aint disp[4];
  MPI_Datatype types[4];

  NUM_type = MPI_DOUBLE;

  MPI_Type_contiguous( 8, MPI_INT, &winspecs_type );
  MPI_Type_commit( &winspecs_type );

  /* Skip the initial 4 pointers in flags, these should not
     be exchanged between processes.
   */
  len[0] = 12; /* 12 ints */
  len[1] = 2;  /* 2 doubles */
  len[2] = 6;  /* 6 NUM_types */

  MPI_Address( (void*)&flags.breakout, &a );
  MPI_Address( (void*)&flags, &b );
  disp[0] = a - b;
  MPI_Address( (void*)&flags.boundary_sq, &a );
  disp[1] = a - b;
  MPI_Address( (void*)&flags.rmin, &a );
  disp[2] = a - b;
  types[0] = MPI_INT;
  types[1] = MPI_DOUBLE;
  types[2] = NUM_type;
  MPI_Type_struct( 3, len, disp, types, &flags_type );
  MPI_Type_commit( &flags_type );

  len[0] = 5;
  MPI_Address( (void*)&rectangle.l, &a );
  MPI_Address( (void*)&rectangle, &b );
  disp[0] = a - b;
  types[0] = MPI_INT;
  MPI_Type_struct( 1, len, disp, types, &rect_type );
  MPI_Type_commit( &rect_type );

}
예제 #22
0
MPI_Datatype avtImgCommunicator::createMetaDataType(){
  MPI_Datatype _imgMeta_mpi;
  const int numItems = 7;
  int blockLengths[numItems] = {1, 1, 1, 2, 2, 2, 1};
  MPI_Datatype type[numItems] = { MPI_INT, MPI_INT, MPI_INT, MPI_INT, MPI_INT, MPI_INT, MPI_FLOAT };
  MPI_Aint offsets[numItems] = {0, sizeof(int), sizeof(int)*2, sizeof(int)*3, sizeof(int)*5, sizeof(int)*7, sizeof(int)*9 };
  MPI_Type_struct(numItems, blockLengths,  offsets, type, &_imgMeta_mpi);
  
  return _imgMeta_mpi;
}
예제 #23
0
MPI_Datatype createImgDataType(){ 
  MPI_Datatype _img_mpi;
  const int numItems = 8;
  int blockLengths[numItems] = {1, 1,   1, 1,   2, 2, 2, 1};
  MPI_Datatype type[numItems] = { MPI_INT, MPI_INT,    MPI_INT, MPI_INT,   MPI_INT, MPI_INT, MPI_INT,   MPI_FLOAT};
  MPI_Aint offsets[numItems] = {0, sizeof(int), sizeof(int)*2, sizeof(int)*3, sizeof(int)*4, sizeof(int)*6, sizeof(int)*8, sizeof(int)*10};
  MPI_Type_struct(numItems, blockLengths,  offsets, type, &_img_mpi);
  
  return _img_mpi;
}
예제 #24
0
// define a MPI struct for communication
MPI_Datatype createPlayerMatchStruct(){
	int blocklen[1] = {11} ;
	MPI_Datatype oldtype[1] = {MPI_INT}, newtype ;
	MPI_Aint disp[1]; 	
	disp[0] = 0;

	MPI_Type_struct(1, blocklen, disp, oldtype, &newtype); 

	return newtype;

}
예제 #25
0
파일: coll1.c 프로젝트: Shurakai/SimGrid
int main( int argc, char **argv )
{
    int              rank, size, i;
    int             *table;
    int              errors=0;
    MPI_Aint         address;
    MPI_Datatype     type, newtype;
    int              lens;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );

    /* Make data table */
    table = (int *) calloc (size, sizeof(int));
    table[rank] = rank + 1;

    MPI_Barrier ( MPI_COMM_WORLD );
    /* Broadcast the data */
    for ( i=0; i<size; i++ ) 
      MPI_Bcast( &table[i], 1, MPI_INT, i, MPI_COMM_WORLD );

    /* See if we have the correct answers */
    for ( i=0; i<size; i++ )
      if (table[i] != i+1) errors++;

    MPI_Barrier ( MPI_COMM_WORLD );

    /* Try the same thing, but with a derived datatype */
    for ( i=0; i<size; i++ ) 
	table[i] = 0;
    table[rank] = rank + 1;
    for ( i=0; i<size; i++ ) {
	//MPI_Address( &table[i], &address );
	address=0;
  type = MPI_INT;
	lens = 1;
	MPI_Type_struct( 1, &lens, &address, &type, &newtype );
	MPI_Type_commit( &newtype );
	MPI_Bcast( &table[i], 1, newtype, i, MPI_COMM_WORLD );
	MPI_Type_free( &newtype );
	}
    /* See if we have the correct answers */
    for ( i=0; i<size; i++ )
      if (table[i] != i+1) errors++;

    MPI_Barrier ( MPI_COMM_WORLD );

    Test_Waitforall( );
    MPI_Finalize();
    if (errors)
      printf( "[%d] done with ERRORS!\n", rank );
    return errors;
}
예제 #26
0
/*
 * This function sends the results collected by one worker back to
 * the master. It first assembles a result_buf in order to send
 * everything in one single operation.
 */
void p_vegasrewrite(binAccu r_Ab[FNMX], double r_d[NDMX][MXDIM], double r_di[NDMX][MXDIM])
{
  int i, j;
  
  /* assemble the send-buffer */
  for (j=0; j<functions; j++) {
    result_buf[j] = r_Ab[j].ti;
  }
  for (j=0; j<functions; j++) {
    result_buf[j+functions] = r_Ab[j].tsi;
  } 
  for (j=0; j<gndim; j++) {
    for (i=0; i<nd; i++) 
      result_buf[2*functions + j*nd + i] = r_d[i][j];
    for (i=0; i<nd; i++) 
      result_buf[2*functions + gndim*nd + j*nd + i] = r_di[i][j];
  }
  MPI_Send(result_buf, (2*functions + 2*(gndim*nd)), MPI_DOUBLE, 0, 1, MPI_COMM_WORLD);
  
  
// MARKUS: define MPI_HISTO for transmitting C histogram structs
  MPI_Datatype MPI_HISTO, oldtypes[2]; 
  int blockcounts[2];
  MPI_Aint offsets[2], extent;
  offsets[0] = 0;
  oldtypes[0] = MPI_DOUBLE;
  blockcounts[0] = 2*MXHISTOBINS;
  MPI_Type_extent(MPI_DOUBLE, &extent);
  offsets[1] = blockcounts[0] * extent;
  oldtypes[1] = MPI_INT;
  blockcounts[1] = 1*MXHISTOBINS; 
  MPI_Type_struct(2, blockcounts, offsets, oldtypes, &MPI_HISTO);
  MPI_Type_commit(&MPI_HISTO);   
  
  
  ReducedCHistogram CHisto[NUMHISTO];   /* MARKUS: declare C hisograms */
  
  int SelectHisto;
  for(SelectHisto=1; SelectHisto<=NUMHISTO; SelectHisto++) {     /* MARKUS: loop over all histograms and copy fortran type into C struct */
       modkinematics_mp_getredhisto_(&CHisto[SelectHisto-1],&SelectHisto);
  };
//   printf("Printing final CHistograms \n");
//   for (j=0; j<NUMHISTO; j++) { 
//   for (i=0; i<MXHISTOBINS; i++) {
//       printf(" %i %i %20.8e \n",j+1,i+1,CHisto[j].Value[i]);
//   };
//   };
  
      
//   printf("worker sending histo struct: %20.6e \n",CHisto.Value[0]);
  MPI_Send(CHisto,NUMHISTO,MPI_HISTO,0,2,MPI_COMM_WORLD);    /* MARKUS: send the array of C histogram structs back to the master  */


}
예제 #27
0
파일: Comm.cpp 프로젝트: eheien/vq
/*!
 Register the block ID/value MPI datatype and block sweep datatype.
 This must exactly match the contents of BlockVal and BlockSweepVals.
 */
VCComm::VCComm(void) {
#ifdef MPI_C_FOUND
    int             block_lengths[3];
    MPI_Aint        displacements[3];
    MPI_Datatype    datatypes[3];

    updateFieldCounts = updateFieldDisps = NULL;
    updateFieldSendBuf = updateFieldRecvBuf = NULL;
    updateFieldSendIDs = updateFieldRecvIDs = NULL;
    failBlockSendBuf = failBlockRecvBuf = NULL;

    // Register BlockVal datatype
    block_lengths[0] = block_lengths[1] = 1;    // 1 member for each block
    displacements[0] = 0;
    displacements[1] = sizeof(double);
    datatypes[0] = MPI_DOUBLE;
    datatypes[1] = MPI_INT;

    MPI_Type_struct(2, block_lengths, displacements, datatypes, &block_val_type);
    MPI_Type_commit(&block_val_type);

    // Register BlockVal related operations
    MPI_Op_create(BlockValMinimum, true, &bv_min_op);
    MPI_Op_create(BlockValMaximum, true, &bv_max_op);
    MPI_Op_create(BlockValSum, true, &bv_sum_op);

    // Register BlockSweepVals datatype
    block_lengths[0] = 5;
    block_lengths[1] = 1;
    block_lengths[2] = 1;
    displacements[0] = 0;
    displacements[1] = 5*sizeof(double);
    displacements[2] = 5*sizeof(double)+sizeof(unsigned int);
    datatypes[0] = MPI_DOUBLE;
    datatypes[1] = MPI_UNSIGNED;
    datatypes[2] = MPI_INT;

    MPI_Type_struct(3, block_lengths, displacements, datatypes, &element_sweep_type);
    MPI_Type_commit(&element_sweep_type);
#endif
}
예제 #28
0
int DefineMPITypes()
{
	Flags flags;
	rect rectangle;
	MPI_Aint a, b;
	
	int len[3];
	MPI_Aint disp[3];
	MPI_Datatype types[3];
	
	NUM_type = MPI_DOUBLE;
	
	MPI_Type_contiguous(6, MPI_INT, &winspecs_type);
	MPI_Type_commit(&winspecs_type);
	
	len[0] = 10;
	len[1] = 2;
	len[2] = 6;
	MPI_Address((void*)&flags.breakout, &a);
	MPI_Address((void*)&flags, &b);
	disp[0] = a - b;
	MPI_Address((void*)&flags.boundary_sq, &a);
	disp[1] = a - b;
	MPI_Address((void*)&flags.rmin, &a);
	disp[2] = a - b;
	types[0] = MPI_INT;
	types[1] = MPI_DOUBLE;
	types[2] = NUM_type;
	MPI_Type_struct(3, len, disp, types, &flags_type);
	MPI_Type_commit(&flags_type);
	
	len[0] = 5;
	MPI_Address((void*)&rectangle.l, &a);
	MPI_Address((void*)&rectangle, &b);
	disp[0] = a - b;
	types[0] = MPI_INT;
	MPI_Type_struct(1, len, disp, types, &rect_type);
	MPI_Type_commit(&rect_type);
	
	return 0;
}
예제 #29
0
 void peano::applications::poisson::multigrid::records::RegularGridState::initDatatype() {
    const int Attributes = 9;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //omega
       MPI_DOUBLE,		 //meshWidth
       MPI_DOUBLE,		 //numberOfInnerVertices
       MPI_DOUBLE,		 //numberOfBoundaryVertices
       MPI_DOUBLE,		 //numberOfOuterVertices
       MPI_DOUBLE,		 //numberOfInnerCells
       MPI_DOUBLE,		 //numberOfOuterCells
       MPI_CHAR,		 //gridIsStationary
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //omega
       DIMENSIONS,		 //meshWidth
       1,		 //numberOfInnerVertices
       1,		 //numberOfBoundaryVertices
       1,		 //numberOfOuterVertices
       1,		 //numberOfInnerCells
       1,		 //numberOfOuterCells
       1,		 //gridIsStationary
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridState dummyRegularGridState[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._omega))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._meshWidth[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfInnerVertices))), 		&disp[2] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfBoundaryVertices))), 		&disp[3] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfOuterVertices))), 		&disp[4] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfInnerCells))), 		&disp[5] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfOuterCells))), 		&disp[6] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._gridIsStationary))), 		&disp[7] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[1]._persistentRecords._omega))), 		&disp[8] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridState::Datatype );
    MPI_Type_commit( &RegularGridState::Datatype );
    
 }
예제 #30
0
 void peano::applications::puregrid::records::RegularGridStatePacked::initDatatype() {
    const int Attributes = 9;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //maxRefinementsPerIteration
       MPI_DOUBLE,		 //meshWidth
       MPI_DOUBLE,		 //numberOfInnerVertices
       MPI_DOUBLE,		 //numberOfBoundaryVertices
       MPI_DOUBLE,		 //numberOfOuterVertices
       MPI_DOUBLE,		 //numberOfInnerCells
       MPI_DOUBLE,		 //numberOfOuterCells
       MPI_SHORT,		 //_packedRecords0
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //maxRefinementsPerIteration
       DIMENSIONS,		 //meshWidth
       1,		 //numberOfInnerVertices
       1,		 //numberOfBoundaryVertices
       1,		 //numberOfOuterVertices
       1,		 //numberOfInnerCells
       1,		 //numberOfOuterCells
       1,		 //_packedRecords0
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridStatePacked dummyRegularGridStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._maxRefinementsPerIteration))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._meshWidth[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfInnerVertices))), 		&disp[2] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfBoundaryVertices))), 		&disp[3] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfOuterVertices))), 		&disp[4] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfInnerCells))), 		&disp[5] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfOuterCells))), 		&disp[6] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._packedRecords0))), 		&disp[7] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[1]._persistentRecords._maxRefinementsPerIteration))), 		&disp[8] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridStatePacked::Datatype );
    MPI_Type_commit( &RegularGridStatePacked::Datatype );
    
 }