void peano::applications::faxen::repositories::FaxenBatchJobRepositoryStatePacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    FaxenBatchJobRepositoryStatePacked dummyFaxenBatchJobRepositoryStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyFaxenBatchJobRepositoryStatePacked[1]._persistentRecords._action))), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &FaxenBatchJobRepositoryStatePacked::Datatype );
    MPI_Type_commit( &FaxenBatchJobRepositoryStatePacked::Datatype );
    
 }
 void peano::applications::latticeboltzmann::blocklatticeboltzmann::repositories::BlockLatticeBoltzmannBatchJobRepositoryState::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_CHAR,		 //reduceState
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1,		 //reduceState
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    BlockLatticeBoltzmannBatchJobRepositoryState dummyBlockLatticeBoltzmannBatchJobRepositoryState[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[0]._persistentRecords._reduceState))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockLatticeBoltzmannBatchJobRepositoryState[1]._persistentRecords._action))), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
    MPI_Type_commit( &BlockLatticeBoltzmannBatchJobRepositoryState::Datatype );
    
 }
Ejemplo n.º 3
0
void mytype_commit(struct mystruct value){

  MPI_Aint indices[3];
  int blocklens[3];
  MPI_Datatype old_types[3];

  old_types[0] = MPI_CHAR;
  old_types[1] = MPI_INT;
  old_types[2] = MPI_DOUBLE;

  blocklens[0] = 1;
  blocklens[1] = 3;
  blocklens[2] = 5;

  MPI_Address(&value.ch, &indices[0]);
  MPI_Address(&value.a, &indices[1]);
  MPI_Address(&value.x, &indices[2]);

  indices[2] = indices[2] - indices[0];
  indices[1] = indices[1] - indices[0];
  indices[0] = 0;

  MPI_Type_struct(3,blocklens,indices,old_types,&mpistruct);

  MPI_Type_commit(&mpistruct);
}
Ejemplo n.º 4
0
void Build_type( float* a, float* b, float* n, MPI_Datatype* point_t ) {

    int block_lengths[3];
    MPI_Aint displacements[3];
    MPI_Datatype typelist[3];
    MPI_Aint start_address;
    MPI_Aint address;

    block_lengths[0] = block_lengths[1] = block_lengths[2] = 1;
    typelist[0] = MPI_FLOAT;
    typelist[1] = MPI_FLOAT;
    typelist[2] = MPI_INT;

    displacements[0] = 0;
    MPI_Address(a, &start_address);
    MPI_Address(b, &address);
    displacements[1] = address - start_address;
    
    MPI_Address(n, &address);
    displacements[2] = address - start_address;

    MPI_Type_struct(3, block_lengths, displacements, typelist, point_t);
    MPI_Type_commit(point_t);

}
Ejemplo n.º 5
0
static void 
InitializeMPIStuff(void)
{
    const int n = 5;
    int          lengths[n]       = {1, 1, 1, 1, 1};
    MPI_Aint     displacements[n] = {0, 0, 0, 0, 0};
    MPI_Datatype types[n] = {MPI_FLOAT,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR,
                             MPI_UNSIGNED_CHAR};

    // create the MPI data type for Pixel
    Pixel onePixel;
    MPI_Address(&onePixel.z, &displacements[0]);
    MPI_Address(&onePixel.r, &displacements[1]);
    MPI_Address(&onePixel.g, &displacements[2]);
    MPI_Address(&onePixel.b, &displacements[3]);
    MPI_Address(&onePixel.a, &displacements[4]);
    for (int i = n-1; i >= 0; i--)
        displacements[i] -= displacements[0];
    MPI_Type_struct(n, lengths, displacements, types,
                    &mpiTypePixel);
    MPI_Type_commit(&mpiTypePixel);

    // and the merge operation for a reduction
    MPI_Op_create((MPI_User_function *)MergePixelBuffersOp, 1,
                  &mpiOpMergePixelBuffers);
}
Ejemplo n.º 6
0
 void peano::applications::latticeboltzmann::blocklatticeboltzmann::forcerecords::BlockPositionPacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //_blockPosition
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       DIMENSIONS,		 //_blockPosition
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    BlockPositionPacked dummyBlockPositionPacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyBlockPositionPacked[0]._persistentRecords._blockPosition[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyBlockPositionPacked[1]._persistentRecords._blockPosition[0])), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &BlockPositionPacked::Datatype );
    MPI_Type_commit( &BlockPositionPacked::Datatype );
    
 }
Ejemplo n.º 7
0
 void peano::applications::navierstokes::prototype1::repositories::PrototypeRepositoryStatePacked::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //action
       MPI_CHAR,		 //reduceState
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //action
       1,		 //reduceState
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    PrototypeRepositoryStatePacked dummyPrototypeRepositoryStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._action))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[0]._persistentRecords._reduceState))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyPrototypeRepositoryStatePacked[1]._persistentRecords._action))), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &PrototypeRepositoryStatePacked::Datatype );
    MPI_Type_commit( &PrototypeRepositoryStatePacked::Datatype );
    
 }
Ejemplo n.º 8
0
void Build_derived_type(border* indata, MPI_Datatype* message_type_ptr){
  int block_lengths[3];

  MPI_Aint displacements[3];
  MPI_Aint addresses[4];
  MPI_Datatype typelist[3];

  /* Создает производный тип данных, содержащий три int */

  /* Сначала нужно определить типы элементов */

  typelist[0]=MPI_INT;
  typelist[1]=MPI_INT; 
  typelist[2]=MPI_INT;

 

  /* Определить количество элементов каждого типа */
  block_lengths[0]=block_lengths[1]=block_lengths[2] = 1;
  
  /* Вычислить смещения элементов * относительно indata */
  MPI_Address(indata, &addresses[0]);
  MPI_Address(&(indata->left), &addresses[1]);
  MPI_Address(&(indata->right), &addresses[2]);
  MPI_Address(&(indata->length), &addresses[3]);

  displacements[0]=addresses[1]-addresses[0];
  displacements[1]=addresses[2]-addresses[0];
  displacements[2]=addresses[3]-addresses[0];
  
  /* Создать производный тип */
  MPI_Type_struct(3, block_lengths, displacements,typelist, message_type_ptr);
  /* Зарегистрировать его для использования */
  MPI_Type_commit(message_type_ptr);
} /* Build_derived_type */
Ejemplo n.º 9
0
 void peano::integration::partitioncoupling::builtin::records::ForceTorquePacked::initDatatype() {
    const int Attributes = 3;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //_translationalForce
       MPI_DOUBLE,		 //_torque
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       3,		 //_translationalForce
       3,		 //_torque
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    ForceTorquePacked dummyForceTorquePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._translationalForce[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyForceTorquePacked[0]._persistentRecords._torque[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyForceTorquePacked[1]._persistentRecords._translationalForce[0])), 		&disp[2] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &ForceTorquePacked::Datatype );
    MPI_Type_commit( &ForceTorquePacked::Datatype );
    
 }
Ejemplo n.º 10
0
 void tarch::parallel::messages::RegisterAtNodePoolMessagePacked::initDatatype() {
    const int Attributes = 2;
    MPI_Datatype subtypes[Attributes] = {
       MPI_SHORT,		 //nodeName
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       MPI_MAX_NAME_STRING_ADDED_ONE,		 //nodeName
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegisterAtNodePoolMessagePacked dummyRegisterAtNodePoolMessagePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegisterAtNodePoolMessagePacked[0]._persistentRecords._nodeName[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&dummyRegisterAtNodePoolMessagePacked[1]._persistentRecords._nodeName[0])), 		&disp[1] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegisterAtNodePoolMessagePacked::Datatype );
    MPI_Type_commit( &RegisterAtNodePoolMessagePacked::Datatype );
    
 }
Ejemplo n.º 11
0
/**
 * Initialises the command package MPI type, we use this to illustrate how additional information (this case
 * the parent rank) can be associated with commands
 */
static void initialiseType() {
    struct PP_Control_Package package;
    MPI_Aint pckAddress, dataAddress;
    MPI_Address(&package, &pckAddress);
    MPI_Address(&package.data, &dataAddress);
    int blocklengths[3] = {1,1}, nitems=2;
    MPI_Datatype types[3] = {MPI_CHAR, MPI_INT};
    MPI_Aint offsets[3] = {0, dataAddress - pckAddress};
    MPI_Type_create_struct(nitems, blocklengths, offsets, types, &PP_COMMAND_TYPE);
    MPI_Type_commit(&PP_COMMAND_TYPE);
}
Ejemplo n.º 12
0
 void peano::applications::puregrid::records::RegularGridStatePacked::initDatatype() {
    const int Attributes = 9;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //maxRefinementsPerIteration
       MPI_DOUBLE,		 //meshWidth
       MPI_DOUBLE,		 //numberOfInnerVertices
       MPI_DOUBLE,		 //numberOfBoundaryVertices
       MPI_DOUBLE,		 //numberOfOuterVertices
       MPI_DOUBLE,		 //numberOfInnerCells
       MPI_DOUBLE,		 //numberOfOuterCells
       MPI_SHORT,		 //_packedRecords0
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //maxRefinementsPerIteration
       DIMENSIONS,		 //meshWidth
       1,		 //numberOfInnerVertices
       1,		 //numberOfBoundaryVertices
       1,		 //numberOfOuterVertices
       1,		 //numberOfInnerCells
       1,		 //numberOfOuterCells
       1,		 //_packedRecords0
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridStatePacked dummyRegularGridStatePacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._maxRefinementsPerIteration))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._meshWidth[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfInnerVertices))), 		&disp[2] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfBoundaryVertices))), 		&disp[3] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfOuterVertices))), 		&disp[4] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfInnerCells))), 		&disp[5] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._numberOfOuterCells))), 		&disp[6] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[0]._persistentRecords._packedRecords0))), 		&disp[7] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridStatePacked[1]._persistentRecords._maxRefinementsPerIteration))), 		&disp[8] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridStatePacked::Datatype );
    MPI_Type_commit( &RegularGridStatePacked::Datatype );
    
 }
Ejemplo n.º 13
0
 void peano::applications::poisson::multigrid::records::RegularGridState::initDatatype() {
    const int Attributes = 9;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //omega
       MPI_DOUBLE,		 //meshWidth
       MPI_DOUBLE,		 //numberOfInnerVertices
       MPI_DOUBLE,		 //numberOfBoundaryVertices
       MPI_DOUBLE,		 //numberOfOuterVertices
       MPI_DOUBLE,		 //numberOfInnerCells
       MPI_DOUBLE,		 //numberOfOuterCells
       MPI_CHAR,		 //gridIsStationary
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //omega
       DIMENSIONS,		 //meshWidth
       1,		 //numberOfInnerVertices
       1,		 //numberOfBoundaryVertices
       1,		 //numberOfOuterVertices
       1,		 //numberOfInnerCells
       1,		 //numberOfOuterCells
       1,		 //gridIsStationary
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridState dummyRegularGridState[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._omega))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._meshWidth[0]))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfInnerVertices))), 		&disp[2] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfBoundaryVertices))), 		&disp[3] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfOuterVertices))), 		&disp[4] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfInnerCells))), 		&disp[5] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._numberOfOuterCells))), 		&disp[6] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[0]._persistentRecords._gridIsStationary))), 		&disp[7] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridState[1]._persistentRecords._omega))), 		&disp[8] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridState::Datatype );
    MPI_Type_commit( &RegularGridState::Datatype );
    
 }
Ejemplo n.º 14
0
void make_maskbase_struct(void)
{
    int blockcounts[2] = { 6, 5 };
    MPI_Datatype types[2] = { MPI_DOUBLE, MPI_INT };
    MPI_Aint displs[2];
    maskbase mbase;

    MPI_Address(&mbase.timesigma, &displs[0]);
    MPI_Address(&mbase.numchan, &displs[1]);
    displs[1] -= displs[0];
    displs[0] = 0;
    MPI_Type_struct(2, blockcounts, displs, types, &maskbase_type);
    MPI_Type_commit(&maskbase_type);
}
Ejemplo n.º 15
0
 //!
 //! \brief
 //!
 void append_to_message_real(
   std::vector< MPI_Aint >& displ
   , std::vector< int >& count
   )
 {
   MPI_Aint addr;
   // Append cell composition
   MPI_Address(&phi[0],&addr);
   displ.push_back(addr);
   count.push_back(phi.size());
   // Append other properties
   MPI_Address(&scalars,&addr);
   displ.push_back(addr);
   count.push_back(scalars.size());
 }
Ejemplo n.º 16
0
 void peano::kernel::regulargrid::tests::records::TestCell::initDatatype() {
    const int Attributes = 1;
    MPI_Datatype subtypes[Attributes] = {
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    TestCell dummyTestCell[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyTestCell[0]))), &base);
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &TestCell::Datatype );
    MPI_Type_commit( &TestCell::Datatype );
    
 }
Ejemplo n.º 17
0
 void peano::applications::poisson::jacobitutorial::records::RegularGridCell::initDatatype() {
    const int Attributes = 1;
    MPI_Datatype subtypes[Attributes] = {
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridCell dummyRegularGridCell[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridCell[0]))), &base);
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridCell::Datatype );
    MPI_Type_commit( &RegularGridCell::Datatype );
    
 }
Ejemplo n.º 18
0
VT_MPI_INT VTUnify_MPI_Address( void * location, VTUnify_MPI_Aint * address )
{
   VT_MPI_INT error;

   error = CALL_MPI( MPI_Address( location, (MPI_Aint*)address ) );

   return (error == MPI_SUCCESS) ? 1 : 0;
}
 void peano::applications::navierstokes::prototype2::records::RegularGridFluidStateEnhancedDivFreeEulerExplicit::initDatatype() {
    const int Attributes = 8;
    MPI_Datatype subtypes[Attributes] = {
       MPI_DOUBLE,		 //meshWidth
       MPI_DOUBLE,		 //numberOfInnerVertices
       MPI_DOUBLE,		 //numberOfBoundaryVertices
       MPI_DOUBLE,		 //numberOfOuterVertices
       MPI_DOUBLE,		 //numberOfInnerCells
       MPI_DOUBLE,		 //numberOfOuterCells
       MPI_CHAR,		 //gridIsStationary
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       DIMENSIONS,		 //meshWidth
       1,		 //numberOfInnerVertices
       1,		 //numberOfBoundaryVertices
       1,		 //numberOfOuterVertices
       1,		 //numberOfInnerCells
       1,		 //numberOfOuterCells
       1,		 //gridIsStationary
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    RegularGridFluidStateEnhancedDivFreeEulerExplicit dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._meshWidth[0]))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfInnerVertices))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfBoundaryVertices))), 		&disp[2] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfOuterVertices))), 		&disp[3] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfInnerCells))), 		&disp[4] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._numberOfOuterCells))), 		&disp[5] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[0]._persistentRecords._gridIsStationary))), 		&disp[6] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(dummyRegularGridFluidStateEnhancedDivFreeEulerExplicit[1]._persistentRecords._meshWidth.data())), 		&disp[7] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &RegularGridFluidStateEnhancedDivFreeEulerExplicit::Datatype );
    MPI_Type_commit( &RegularGridFluidStateEnhancedDivFreeEulerExplicit::Datatype );
    
 }
Ejemplo n.º 20
0
void 
avtWholeImageCompositerWithZ::InitializeMPIStuff(void)
{

#define UCH MPI_UNSIGNED_CHAR
#define FLT MPI_FLOAT
   int                lengths[] = {  1,   1,   1,   1};
   MPI_Aint     displacements[] = {  0,   0,   0,   0};
   MPI_Datatype         types[] = {FLT, UCH, UCH, UCH};
   ZFPixel_t    onePixel;
#undef UCH
#undef FLT

   // create the MPI data type for ZFPixel
   MPI_Address(&onePixel.z, &displacements[0]);
   MPI_Address(&onePixel.r, &displacements[1]);
   MPI_Address(&onePixel.g, &displacements[2]);
   MPI_Address(&onePixel.b, &displacements[3]);

   for (int i = 3; i >= 0; --i)
      displacements[i] -= displacements[0];

   MPI_Type_create_struct(4, lengths, displacements, types,
      &avtWholeImageCompositerWithZ::mpiTypeZFPixel);

   // check that the datatype has the correct extent
   MPI_Aint ext;
   MPI_Type_extent(avtWholeImageCompositerWithZ::mpiTypeZFPixel, &ext);
   if (ext != sizeof(onePixel))
   {
       MPI_Datatype tmp = avtWholeImageCompositerWithZ::mpiTypeZFPixel;
       MPI_Type_create_resized(tmp, 0, sizeof(ZFPixel_t),
           &avtWholeImageCompositerWithZ::mpiTypeZFPixel);
       MPI_Type_free(&tmp);
   }

   MPI_Type_commit(&avtWholeImageCompositerWithZ::mpiTypeZFPixel);

   MPI_Op_create((MPI_User_function *)MergeZFPixelBuffers, 1,
      &avtWholeImageCompositerWithZ::mpiOpMergeZFPixelBuffers);
}
Ejemplo n.º 21
0
int main( int argc, char **argv )
{
    int              rank, size, i;
    int             *table;
    int              errors=0;
    MPI_Aint         address;
    MPI_Datatype     type, newtype;
    int              lens;

    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &size );

    /* Make data table */
    table = (int *) calloc (size, sizeof(int));
    table[rank] = rank + 1;

    MPI_Barrier ( MPI_COMM_WORLD );
    /* Broadcast the data */
    for ( i=0; i<size; i++ )
        MPI_Bcast( &table[i], 1, MPI_INT, i, MPI_COMM_WORLD );

    /* See if we have the correct answers */
    for ( i=0; i<size; i++ )
        if (table[i] != i+1) errors++;

    MPI_Barrier ( MPI_COMM_WORLD );

    /* Try the same thing, but with a derived datatype */
    for ( i=0; i<size; i++ )
        table[i] = 0;
    table[rank] = rank + 1;
    for ( i=0; i<size; i++ ) {
        MPI_Address( &table[i], &address );
        type = MPI_INT;
        lens = 1;
        MPI_Type_struct( 1, &lens, &address, &type, &newtype );
        MPI_Type_commit( &newtype );
        MPI_Bcast( MPI_BOTTOM, 1, newtype, i, MPI_COMM_WORLD );
        MPI_Type_free( &newtype );
    }
    /* See if we have the correct answers */
    for ( i=0; i<size; i++ )
        if (table[i] != i+1) errors++;

    MPI_Barrier ( MPI_COMM_WORLD );

    Test_Waitforall( );
    MPI_Finalize();
    if (errors)
        printf( "[%d] done with ERRORS!\n", rank );
    return errors;
}
Ejemplo n.º 22
0
static MPI_Datatype make_stats_type(ADIO_File fd) {
    int lens[STAT_ITEMS];
    MPI_Aint offsets[STAT_ITEMS];
    MPI_Datatype types[STAT_ITEMS];
    MPI_Datatype newtype;

    lens[BLOCKSIZE] = 1;
    MPI_Address(&fd->blksize, &offsets[BLOCKSIZE]);
    types[BLOCKSIZE] = MPI_LONG;

    lens[STRIPE_SIZE]= lens[STRIPE_FACTOR]= lens[START_IODEVICE] = 1;
    types[STRIPE_SIZE] = types[STRIPE_FACTOR] =
	types[START_IODEVICE] = MPI_INT;
    MPI_Address(&fd->hints->striping_unit, &offsets[STRIPE_SIZE]);
    MPI_Address(&fd->hints->striping_factor, &offsets[STRIPE_FACTOR]);
    MPI_Address(&fd->hints->start_iodevice, &offsets[START_IODEVICE]);


    MPI_Type_create_struct(STAT_ITEMS, lens, offsets, types, &newtype);
    MPI_Type_commit(&newtype);
    return newtype;

}
Ejemplo n.º 23
0
void broadcast_proposal_info(void)
{
	int i;
	MPI_Aint indices[3];
	MPI_Datatype proposal_struct;

	int blocklens[3] = {1, Props.ndim, Props.ndim};
	MPI_Datatype old_types[3] = {MPI_INT, MPI_INT, MPI_DOUBLE};

	MPI_Address( &Props.ndim,     &indices[0] );
	MPI_Address( &Props.code[0],  &indices[1] );
	MPI_Address( &Props.width[0], &indices[2] );

	int base = indices[0];
	for (i=0; i <3; i++) indices[i] -= base;

	//for (i=0; i<3; i++) printf("%ld \n", indices[i]);

	MPI_Type_struct( 3, blocklens, indices, old_types, &proposal_struct);
	MPI_Type_commit( &proposal_struct );

	MPI_Bcast( &Props, 1, proposal_struct, 0, MPI_COMM_WORLD );
}
Ejemplo n.º 24
0
int main(int argc, char* argv[])
{
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &myid);
    MPI_Get_processor_name(processor_name,&namelen);

    /* 自定义MPI数据类型 */
    MPI_Type_contiguous(2, MPI_DOUBLE, &MPI_CITY);
    MPI_Type_commit(&MPI_CITY);

    MPI_Datatype types[4] = {MPI_CITY, MPI_DOUBLE, MPI_DOUBLE, MPI_DOUBLE};
    int lengths[4] = {MAXCITIES, 1, 1, 1};

    MPI_Aint disp[4];
    int base;
    MPI_Address(population, disp);
    MPI_Address(&population[0].fitness, disp+1);
    MPI_Address(&population[0].rfitness, disp+2);
    MPI_Address(&population[0].ifitness, disp+3);
    base = disp[0];
    for(int i = 0; i < 4; i++) disp[i] -= base;

    MPI_Type_struct(4, lengths, disp, types, &MPI_GENETYPE);
    MPI_Type_commit(&MPI_GENETYPE);
    /* 自定义结构结束  */

    init();

    if(myid == 0)
        coordinator();
    else
        worker();

    MPI_Finalize();
    return 0;
}
Ejemplo n.º 25
0
void mytype_commit2(struct mystruct value){

  MPI_Aint indices[1];
  int blocklens[1];
  MPI_Datatype old_types[1];

  old_types[0] = MPI_CHAR;
  blocklens[0] = sizeof(struct mystruct);
  MPI_Address(&value.ch, &indices[0]);
  indices[0] = 0;

  MPI_Type_struct(1,blocklens,indices,old_types,&mpistruct2);

  MPI_Type_commit(&mpistruct2);
}
void make_list_content_type(std::list<int>& l, MPI_Datatype& type)
{
  std::vector<int> lengths(l.size(), 1);
  std::vector<MPI_Aint> displacements;
  std::vector<MPI_Datatype> types(l.size(), MPI_INT);
  for (std::list<int>::iterator i = l.begin(); i != l.end(); ++i) {
    MPI_Aint addr;
    MPI_Address(&*i, &addr);
    displacements.push_back(addr);
  }

  MPI_Type_struct(l.size(), &lengths.front(), &displacements.front(),
                  &types.front(), &type);
  MPI_Type_commit(&type);
}
Ejemplo n.º 27
0
 void peano::integration::dataqueries::DataQueryPacked::initDatatype() {
    const int Attributes = 7;
    MPI_Datatype subtypes[Attributes] = {
       MPI_INT,		 //id
       MPI_INT,		 //recordsPerEntry
       MPI_INT,		 //scope
       MPI_DOUBLE,		 //boundingBoxOffset
       MPI_DOUBLE,		 //boundingBox
       MPI_INT,		 //resolution
       MPI_UB		 // end/displacement flag
    };
    
    int blocklen[Attributes] = {
       1,		 //id
       1,		 //recordsPerEntry
       1,		 //scope
       DIMENSIONS,		 //boundingBoxOffset
       DIMENSIONS,		 //boundingBox
       DIMENSIONS,		 //resolution
       1		 // end/displacement flag
    };
    
    MPI_Aint     disp[Attributes];
    DataQueryPacked dummyDataQueryPacked[2];
    
    MPI_Aint base;
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyDataQueryPacked[0]))), &base);
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyDataQueryPacked[0]._persistentRecords._id))), 		&disp[0] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyDataQueryPacked[0]._persistentRecords._recordsPerEntry))), 		&disp[1] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyDataQueryPacked[0]._persistentRecords._scope))), 		&disp[2] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyDataQueryPacked[0]._persistentRecords._boundingBoxOffset[0]))), 		&disp[3] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyDataQueryPacked[0]._persistentRecords._boundingBox[0]))), 		&disp[4] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyDataQueryPacked[0]._persistentRecords._resolution[0]))), 		&disp[5] );
    MPI_Address( const_cast<void*>(static_cast<const void*>(&(dummyDataQueryPacked[1]._persistentRecords._id))), 		&disp[6] );
    
    for (int i=1; i<Attributes; i++) {
       assertion1( disp[i] > disp[i-1], i );
    }
    for (int i=0; i<Attributes; i++) {
       disp[i] -= base;
    }
    MPI_Type_struct( Attributes, blocklen, disp, subtypes, &DataQueryPacked::Datatype );
    MPI_Type_commit( &DataQueryPacked::Datatype );
    
 }
Ejemplo n.º 28
0
/*mpi_fileattr_define: Defines the MPI Datatype corresponding to the FileAttr 
                       type. Returns 1 on successful definition */
int mpi_fileattr_define() {    
    
    struct FileAttr file1 ;
    /* length, displacement, and type arrays used to describe an MPI derived type */
    /* their size reflects the number of components in SparseElt */
    int          lena[6]; 
    MPI_Aint     loca[6]; 
    MPI_Datatype typa[6];

    MPI_Aint     baseaddress;
   
    MPI_Address(&file1, &baseaddress);
    lena[0] = PATH_MAX;    /* file1.pathname has length of PATH_MAX chars*/
    if(MPI_Address(&file1.pathname,&loca[0]) != MPI_SUCCESS) return MPI_ERR_OTHER ; 
    loca[0] -= baseaddress;  /* byte address relative to start of structure */
    typa[0] = MPI_UNSIGNED_CHAR;
   
    lena[1] = 1;    /* file1.mode has length of 1 unsigned int*/
    if(MPI_Address(&file1.mode,&loca[1]) != MPI_SUCCESS) return MPI_ERR_OTHER ; 
    loca[1] -= baseaddress;  /* byte address relative to start of structure */
    typa[1] = MPI_UNSIGNED;
    
    lena[2] = 1;    /* file1.filesize has length of 1 unsigned long int*/
    if(MPI_Address(&file1.filesize,&loca[2]) != MPI_SUCCESS) return MPI_ERR_OTHER; 
    loca[2] -= baseaddress;  /* byte address relative to start of structure */
    typa[2] = MPI_UNSIGNED_LONG;
   
    lena[3] = 1;    /* file1.atime has length of 1 unsigned long int*/
    if(MPI_Address(&file1.atime,&loca[3]) != MPI_SUCCESS) return MPI_ERR_OTHER; 
    loca[3] -= baseaddress;  /* byte address relative to start of structure */
    typa[3] = MPI_UNSIGNED_LONG;
    
    lena[4] = 1;    /* file1.mtime has length of 1 unsigned long int*/
    if(MPI_Address(&file1.mtime,&loca[4]) != MPI_SUCCESS) return MPI_ERR_OTHER; 
    loca[4] -= baseaddress;  /* byte address relative to start of structure */
    typa[4] = MPI_UNSIGNED_LONG;
    
    lena[5] = 1;    /* file1.ctime has length of 1 unsigned long int*/
    if(MPI_Address(&file1.ctime,&loca[5]) != MPI_SUCCESS) return MPI_ERR_OTHER; 
    loca[5] -= baseaddress;  /* byte address relative to start of structure */
    typa[5] = MPI_UNSIGNED_LONG;    
    
    if(MPI_Type_struct(6, lena, loca, typa, &MPI_FileAttr) != MPI_SUCCESS) return MPI_ERR_OTHER;
    if(MPI_Type_commit(&MPI_FileAttr) != MPI_SUCCESS) return MPI_ERR_OTHER;

    return MPI_SUCCESS ;
}
Ejemplo n.º 29
0
FORT_DLL_SPEC void FORT_CALL mpi_address_ ( void*v1, MPI_Fint *v2, MPI_Fint *ierr ){
    MPI_Aint a, b;
    *ierr = MPI_Address( v1, &a );

#ifndef HAVE_MPI_F_INIT_WORKS_WITH_C
    if (MPIR_F_NeedInit){ mpirinitf_(); MPIR_F_NeedInit = 0; }
#endif

    b = a;
    *v2 = (MPI_Fint)( b );
#ifdef HAVE_AINT_LARGER_THAN_FINT
    /* Check for truncation */
    if ((MPI_Aint)*v2 - b != 0) {
        *ierr = MPIR_Err_create_code( MPI_SUCCESS, MPIR_ERR_RECOVERABLE, 
			  "MPI_Address", __LINE__, MPI_ERR_ARG, "**inttoosmall", 0 );
	(void)MPIR_Err_return_comm( 0, "MPI_Address",  *ierr );
    }
#endif
}
Ejemplo n.º 30
0
void 
DefineMPITypes()
{
  Flags flags;
  rect rectangle;
  MPI_Aint a, b;

  int len[4];
  MPI_Aint disp[4];
  MPI_Datatype types[4];

  NUM_type = MPI_DOUBLE;

  MPI_Type_contiguous( 8, MPI_INT, &winspecs_type );
  MPI_Type_commit( &winspecs_type );

  /* Skip the initial 4 pointers in flags, these should not
     be exchanged between processes.
   */
  len[0] = 12; /* 12 ints */
  len[1] = 2;  /* 2 doubles */
  len[2] = 6;  /* 6 NUM_types */

  MPI_Address( (void*)&flags.breakout, &a );
  MPI_Address( (void*)&flags, &b );
  disp[0] = a - b;
  MPI_Address( (void*)&flags.boundary_sq, &a );
  disp[1] = a - b;
  MPI_Address( (void*)&flags.rmin, &a );
  disp[2] = a - b;
  types[0] = MPI_INT;
  types[1] = MPI_DOUBLE;
  types[2] = NUM_type;
  MPI_Type_struct( 3, len, disp, types, &flags_type );
  MPI_Type_commit( &flags_type );

  len[0] = 5;
  MPI_Address( (void*)&rectangle.l, &a );
  MPI_Address( (void*)&rectangle, &b );
  disp[0] = a - b;
  types[0] = MPI_INT;
  MPI_Type_struct( 1, len, disp, types, &rect_type );
  MPI_Type_commit( &rect_type );

}