Esempio n. 1
0
int PMPI_Type_get_contents(
        MPI_Datatype datatype, 
        int max_integers, 
        int max_addresses, 
        int max_datatypes, 
        int *array_of_integers, 
        MPI_Aint *array_of_addresses, 
        MPI_Datatype *array_of_datatypes)
{
  int index, position;
  position = _MPI_FindType(datatype);
  if (position == _MPI_NOT_OK)
  {
    position = _MPI_BasicType (datatype);
    if (position == MPI_SUCCESS)
    {
      array_of_integers[0] = 1;
      array_of_addresses[0] = (MPI_Aint) 0;
      array_of_datatypes[0] = datatype;
      return MPI_SUCCESS;
    } else {
      _MPI_ERR_ROUTINE (MPI_ERR_TYPE, "MPI_TYPE_GET_CONTENTS: datatype error");
      MPI_Abort (MPI_COMM_NULL, MPI_ERR_TYPE);
    }
  }
  if ( (_MPI_TYPE_LIST[position].info->count < max_addresses) || (_MPI_TYPE_LIST[position].info->count < max_integers) ||
       (_MPI_TYPE_LIST[position].info->count < max_datatypes) )
  {
    _MPI_ERR_ROUTINE (MPI_ERR_TYPE, "MPI_TYPE_GET_CONTENTS: invalid max_* error");
    MPI_Abort (MPI_COMM_NULL, MPI_ERR_TYPE);    
  }
  if (_MPI_TYPE_LIST[position].sendType == _MPI_STRUCT)
  {
    for (index=0; index<max_integers; index++)
    {
      array_of_integers[index] = _MPI_TYPE_LIST[position].info->blocklen[index];
    }
    for (index=0; index<max_addresses; index++)
    {
      array_of_addresses[index] = _MPI_TYPE_LIST[position].info->stride[index];
    }
    for (index=0; index<max_datatypes; index++)
    {
      array_of_addresses[index] = _MPI_TYPE_LIST[position].info->types[index];
    }
  } else {
    _MPI_ERR_ROUTINE (MPI_ERR_TYPE, "MPI_TYPE_GET_CONTENTS: Not Struct datatype error");
    MPI_Abort (MPI_COMM_NULL, MPI_ERR_TYPE);
  }
  return MPI_SUCCESS;
}
Esempio n. 2
0
/*=============================================================================================*/
int PMPI_Msend (void* message, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)
{
  int size, retval;
  char* p;

  /* option 1: Assert( _MPI_IS_INITIALIZED() ); */
 _MPI_COVERAGE();
  
  _MPI_CHECK_STATUS(&comm);
  retval = _MPI_checks(message, count, datatype, dest, tag, comm);
  /* option 2: STANDARD_MPI_CHECK(comm); */
  if (retval == MPI_SUCCESS)
  {
    size = _MPI_calculateSize(count, datatype);  
    p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); 
    p = memcpy(p, message, size);
    retval =_MPI_Buff_Insert(p, count, datatype, tag, comm);
    return retval;
  } else { 
     _MPI_ERR_ROUTINE (retval, "MPI_SEND / MPI_ISEND: argument error");
     MPI_Abort (comm, retval);
  }

 _MPI_COVERAGE();
  return _MPI_NOT_OK;
}
Esempio n. 3
0
int PMPI_Finalize (void) {
  int i;  
  _MPI_FINALIZED_FLAG = _MPI_TRUE; 
  
  if ( (_MPI_INIT_STATUS)!=(_MPI_STARTED) ) {
    _MPI_ERR_ROUTINE(MPI_ERR_IN_STATUS, "MPI_FINALIZE: MPI has not been initialized.");    
    MPI_Abort (MPI_COMM_NULL, MPI_ERR_IN_STATUS);
    return _MPI_NOT_OK; 
  } else {
    _MPI_INIT_STATUS = _MPI_ENDED; 
    for (i = 1; i < _MPI_PREALLOCATION_SIZE; i++) {
      _MPI_Comm_Invalid(i);
      _MPI_Data_Invalid(i);
      _MPI_Type_Invalid(i);
    }
    free(_MPI_COMM_LIST); _MPI_COMM_LIST = 0;
    free(_MPI_DATA_BUFF);_MPI_DATA_BUFF = 0;
    free(_MPI_TYPE_LIST);_MPI_TYPE_LIST = 0;
    free(_MPI_OP_LIST);_MPI_OP_LIST = 0;
    for(i=0;i<_MPI_REQ_ARRAY_SIZE;++i) {
      free(_MPI_REQ_LIST_OF_LISTS[i]);
      _MPI_REQ_LIST_OF_LISTS[i] = 0;
    }
    free(_MPI_REQ_LIST_OF_LISTS);_MPI_REQ_LIST_OF_LISTS = 0;
    free(_MPI_REQNULL);_MPI_REQNULL = 0;
    return MPI_SUCCESS;
  } 
}
Esempio n. 4
0
/* Same behavior as PMPI_Irsend.c */
int PMPI_Issend (void* message, int count, MPI_Datatype datatype, int dest,
        int tag, MPI_Comm comm, MPI_Request* request)
{
  int size, retval, index;
  char* p;

 _MPI_COVERAGE();
  _MPI_CHECK_STATUS(&comm);
  retval = _MPI_checkRequest(*request);
  if ( retval!=MPI_SUCCESS ) {
    _MPI_Set_Request(request, message, count, datatype, _MPI_TRUE, tag, comm);
  }
  retval = _MPI_checks(message, count, datatype, dest, tag, comm);

  if (retval == MPI_SUCCESS) {
    index = _MPI_Req_Find(tag, comm);
    if ( index >= 0 ) {
      size = _MPI_calculateSize(count, datatype);
      p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer.");
      p = memcpy(p, message, size);
      retval =_MPI_Buff_Insert(p, count, datatype, tag, comm);
      return retval;
    }
    return MPI_ERR_PENDING;
  } else {
     _MPI_ERR_ROUTINE (retval, "MPI_ISSEND / MPI_IRSEND: argument error");
     MPI_Abort (comm, retval);
  }

 _MPI_COVERAGE();
  return retval; 
}
Esempio n. 5
0
/*==========================================================================*/
int PMPI_Irecv (void* message, int count, MPI_Datatype datatype, int source,
        int tag, MPI_Comm comm, MPI_Request* request)
{
  int retval, req;
  retval=_MPI_checks(message, count, datatype, source, tag, comm);

  if (retval != MPI_SUCCESS) {
    _MPI_ERR_ROUTINE (retval, "MPI_IRECV: argument error");
    MPI_Abort (comm, retval);
    return retval;
  }

#if 0
  /* Not sure what this is doing.... */
  if (*request == MPI_REQUEST_NULL) {
    _MPI_Set_Request(request, message, count, datatype, _MPI_FALSE, tag, comm);/* KDDKDD Passing request pointer */
  }
#endif

  if (retval == MPI_SUCCESS) {
    /* Check for New Request and Insert it into the Request queque */

#if 0
    /* TODO: WHAT IS THIS DOING? */
    req = _MPI_Req_Find(tag, comm);
#else
    req = -1;
#endif

    if ( req < 0 ) { /* It is a new request */
      req = _MPI_Req_Post(message, count, datatype, tag, comm, _MPI_FALSE);
      *request = &_MPI_REQ_LIST[req];
    }

#if 0
    /* TODO: This should wait until for the "wait" */
    size = _MPI_calculateSize(count, datatype);
    index = _MPI_Buff_Ifind(tag, comm);

    if (index == _MPI_NOT_OK) {
       return MPI_ERR_PENDING;
    }
 
    /*MESSAGE IS THERE*/
    _MPI_Req_Invalid(req);
    if (size < _MPI_DATA_BUFF[index].size) { /*MESSAGE IS TRUNCATED */
      message = memcpy(message, _MPI_DATA_BUFF[index].buffer, size);
      printf("MPI_RECV : Message truncated.\n");
      MPI_Abort(comm, MPI_ERR_COUNT);
    } else {
      message = memcpy(message, _MPI_DATA_BUFF[index].buffer, size);
    }
    _MPI_Data_Invalid(index);
    return MPI_SUCCESS;
#endif
  }
  return retval;
}
Esempio n. 6
0
/* STUB */
int PMPI_Reduce_scatter ( void *sendbuf, void *recvbuf, int *recvcnts, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm ) {
  if ( !recvcnts ) {
    _MPI_ERR_ROUTINE (MPI_ERR_COUNT, "MPI_ERR_COUNT : Invalid recv count argument");
    MPI_Abort(comm, MPI_ERR_OTHER);
    return _MPI_NOT_OK;
  }

  return PMPI_Reduce(sendbuf,recvbuf,*recvcnts,datatype,op,0,comm);
}
Esempio n. 7
0
int PMPI_Type_lb ( MPI_Datatype datatype, MPI_Aint *displacement )
{
  int index;                                      
  index = _MPI_FindType(datatype);
  if (datatype == _MPI_NOT_OK)
  {
    _MPI_ERR_ROUTINE (MPI_ERR_TYPE, "MPI_TYPE_LB: invalid datatype.");
    MPI_Abort (MPI_COMM_NULL, MPI_ERR_TYPE);
  }
  *displacement = _MPI_TYPE_LIST[index].lb;
  return MPI_SUCCESS;
}
Esempio n. 8
0
int PMPI_Comm_rank(MPI_Comm comm, int* rank)
{
int index;
  if (_MPI_CHECK_STATUS(&comm) == _MPI_OK)
  {
    if (_MPI_Comm_check_legal(comm, &index) == MPI_SUCCESS)
    {
      *rank = _MPI_RANK;
      return MPI_SUCCESS;
    }
    _MPI_ERR_ROUTINE(MPI_ERR_COMM, "MPI_COMM_RANK: Null communicator.");
    MPI_Abort (comm, MPI_ERR_COMM); 
    return MPI_ERR_COMM;
  }
  else
  {
    _MPI_ERR_ROUTINE(MPI_ERR_IN_STATUS, "MPI_COMM_RANK: MPI initialization error.");
    MPI_Abort (comm, MPI_ERR_IN_STATUS); 
    return MPI_ERR_ARG;
  }
}
Esempio n. 9
0
int PMPI_Finalized( int *flag )
{
  int retval;
  retval = _MPI_checkIntP (flag);
  if (retval!=MPI_SUCCESS)
  {
    _MPI_ERR_ROUTINE(MPI_ERR_IN_STATUS, "MPI_FINALIZED: Invalid pointer."); 
    MPI_Abort((MPI_Comm)0, MPI_ERR_OTHER);
    return retval;
  }
  if ( (_MPI_INIT_STATUS == _MPI_ENDED) && (_MPI_FINALIZED_FLAG) )
    *flag = _MPI_TRUE;
  else 
    *flag = _MPI_FALSE;
  return MPI_SUCCESS; 
}
Esempio n. 10
0
int PMPI_Type_size ( MPI_Datatype datatype, int *size )
{
  int index;
  *size = 0;
  if ( _MPI_BasicType(datatype) == MPI_SUCCESS ) {
    *size = _MPI_getSize(datatype);
  }
  else
  {
    index = _MPI_FindType (datatype);
    if (index == _MPI_NOT_OK)
    {
      _MPI_ERR_ROUTINE (MPI_ERR_TYPE, "MPI_TYPE_SIZE: datatype error");
      MPI_Abort (MPI_COMM_NULL, MPI_ERR_TYPE); 
    }
    *size = _MPI_TYPE_LIST[index].extent;
  }
  return MPI_SUCCESS;
}
Esempio n. 11
0
/*==========================================================================*/
int PMPI_Recv (void* message, int count, MPI_Datatype datatype, int source,
        int tag, MPI_Comm comm, MPI_Status* status)
{
  int retval, receiver_size, message_size, index;

  retval=_MPI_checks(message, count, datatype, source, tag, comm);
  if (retval == MPI_SUCCESS) {
    receiver_size = _MPI_calculateSize(count, datatype);

    /* ----------------------------------------------- */
    /* Look up the send that matches this receive      */
    /* ----------------------------------------------- */
    index = _MPI_Buff_Find(tag, comm);
    if (index == _MPI_NOT_OK) {
      return MPI_ERR_TAG;  /* this return value is relied upon by some    */
    }                      /* internal calls to indicate no matching send */
    
    message_size = _MPI_calculateSize(_MPI_DATA_BUFF[index].count,
                                      _MPI_DATA_BUFF[index].type);

    if (status != MPI_STATUS_IGNORE) {
      status->MPI_SOURCE = _MPI_RANK;
      status->MPI_TAG = _MPI_DATA_BUFF[index].tag;
      status->__count = message_size;
    }
    if (message_size > receiver_size) {
      _MPI_ERR_ROUTINE(MPI_ERR_COUNT, "MPI_RECV : Message buffer too small for message");
      if (status != MPI_STATUS_IGNORE) status->MPI_ERROR = MPI_ERR_COUNT;
      _MPI_Data_Invalid(index);
      return MPI_ERR_COUNT;
    }

    memcpy(message, _MPI_DATA_BUFF[index].buffer, message_size);

    if (status != MPI_STATUS_IGNORE) status->MPI_ERROR = MPI_SUCCESS;
    _MPI_Data_Invalid(index);
    return MPI_SUCCESS;
  }

  return retval;  
}
Esempio n. 12
0
/*=============================================================================================*/
int PMPI_Send (void* message, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) {
  int size, retval, sendType, index;
  MPI_Aint position, copiedPointer;
  _MPI_TYPE_INFO *info;
  char* p;

  _MPI_COVERAGE();

  _MPI_CHECK_STATUS(&comm);
  retval = _MPI_checks(message, count, datatype, dest, tag, comm);
  if (retval == MPI_SUCCESS) {
    sendType = _MPI_checkSendType(datatype);
    switch (sendType) {
    case _MPI_DEFAULT:
      {
        size = _MPI_calculateSize(count, datatype);  
        p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); 
        p = memcpy(p, message, size);
        retval =_MPI_Buff_Insert(p, count, datatype, tag, comm);
        return retval;
      }
    case _MPI_CONTIG:
      {
        sendType = _MPI_FindType(datatype);
        size = _MPI_TYPE_LIST[sendType].extent;
        p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); 
        p = memcpy(p, message, size);
        retval =_MPI_Buff_Insert(p, count, datatype, tag, comm);
        return retval;
      }
    case _MPI_INDEXED:
      {
        sendType = _MPI_FindType(datatype);
        size = _MPI_TYPE_LIST[sendType].extent;
        p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); 

        /* ================================================== */
        /* Determine the correct parts to save to the buffers */
        info = _MPI_TYPE_LIST[sendType].info;
        position = (MPI_Aint) 0;
        copiedPointer = (MPI_Aint) 0;
        for (index = 0; index < info->count; index++)
          {
            position = info->stride[index]*sizeof(info->types[0]);
            p = memcpy(p+copiedPointer, ((char*)message)+position, info->blocklen[index]*sizeof(info->types[0])); 
            copiedPointer += info->blocklen[index]*sizeof(info->types[0]);
          }
        retval =_MPI_Buff_Insert(p, count, datatype, tag, comm);
        return retval;
      }
    case _MPI_VECTOR:
      {
        sendType = _MPI_FindType(datatype);
        size = _MPI_TYPE_LIST[sendType].extent;
        p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); 
        /* =================================== */
        /* Figure out the correct ones to pass */
        retval =_MPI_Buff_Insert(p, count, datatype, tag, comm);
        return retval;
      }
    case _MPI_STRUCT:
      {
        sendType = _MPI_FindType(datatype);
        size = _MPI_TYPE_LIST[sendType].extent;
        p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); 
        /* =================================== */
        /* Figure out the correct ones to pass */
        retval =_MPI_Buff_Insert(p, count, datatype, tag, comm);
        return retval;
      }
    default:
      {
        fprintf(stderr,"SEND: mpi_Hindexed or mpi_Hvector not implemented\n");
        MPI_Abort (comm, _MPI_NOT_OK);
      }
    }
  } else { 
    _MPI_ERR_ROUTINE (retval, "MPI_SEND / MPI_ISEND: argument error");
    MPI_Abort (comm, retval);
  }

  _MPI_COVERAGE();
  return _MPI_NOT_OK;
}
Esempio n. 13
0
/*==========================================================================*/
int PMPI_Reduce ( void *sendbuf, void *recvbuf, int count, 
   MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm )
{
  int size;
  int retval;

 _MPI_COVERAGE();

  /* ----------------------------------------------- */
  /* We shortcut on length 0 sends since there is no */
  /* work to do...                                   */
  /* ----------------------------------------------- */
  if ( count == 0 ) return MPI_SUCCESS;

  /* ----------------------------------------------- */
  /* First we verify that the sendbuf, recvbuf, and  */
  /* operator are OK.                                */
  /* ----------------------------------------------- */
  retval = _MPI_checks(sendbuf, count, datatype, root, 1, comm);
  if ( retval != MPI_SUCCESS ) {
    _MPI_ERR_ROUTINE (MPI_ERR_OTHER, "MPI_REDUCE : Invalid argument");
    MPI_Abort(comm, retval);
    return _MPI_NOT_OK;
  }
      
  if ( _MPI_checkBuffer(recvbuf) != MPI_SUCCESS ) {
    _MPI_ERR_ROUTINE (MPI_ERR_BUFFER, "MPI_REDUCE : Invalid buffer pointer");
    MPI_Abort(comm, MPI_ERR_BUFFER);
    return _MPI_NOT_OK;
  }
   
  if ( _MPI_checkOp(op) != MPI_SUCCESS ) {
    _MPI_ERR_ROUTINE(MPI_ERR_OP, "MPI_REDUCE : Invalid MPI_Op");
    MPI_Abort(comm, MPI_ERR_OP);
    return _MPI_NOT_OK;
  }

  /* ----------------------------------------------- */
  /* Guard against buffer overlap...                 */
  /* ----------------------------------------------- */
  size = _MPI_calculateSize(count, datatype);
  if (  _MPI_check_overlap(sendbuf, recvbuf, size) != MPI_SUCCESS ) {
    _MPI_ERR_ROUTINE (MPI_ERR_BUFFER, "MPI_REDUCE : Invalid buffer pointer: Arguments must specify different buffers (no aliasing)");
    MPI_Abort(comm, MPI_ERR_BUFFER);
    return _MPI_NOT_OK;
  }

#if 0
  /* KDDKDD:  This initialization isn't right, particularly for 
   * KDDKDD:  user-defined functions that do comparisons of in with inout.
   * KDDKDD:  I'm not sure what the correct initialization is, though.
   * KDDKDD:  Nor am I sure what the symantics of MPI require; that is,
   * KDDKDD:  should the user initialization inout (and we just got lucky
   * KDDKDD:  on other platforms)?  
   * KDDKDD:  Anyway, this initialization to zero causes 
   * KDDKDD:  ch_simple/zdrive.inp.rcb-partlocal2 to go infinite due to
   * KDDKDD:  faulty box merge.
   */
  /* ----------------------------------------------- */
  /* We zero out the buffer since some users expect  */
  /* it to be initialized.                           */
  /* ----------------------------------------------- */
  memset(recvbuf,0,size);
#endif

  /* ----------------------------------------------- */
  /* Now, we call the function                       */
  /* ----------------------------------------------- */
#if 0
  /* KDDKDD:  On one processor, MPICH and LAM both do only a copy of 
   * KDDKDD:  sendbuf to recvbuf.  They do not call user-defined functions.
   * KDDKDD:  This choice may not be correct on their part, but siMPI should
   * KDDKDD:  probably do the same (for consistency).
   */
  if ( op>MPI_MAXLOC ) {
    (_MPI_OP_LIST[op-_MPI_OP_OFFSET].function)(sendbuf, recvbuf, &count, &datatype); 
  } else 
#endif
  { 
    _MPI_Default_Op(sendbuf, recvbuf, &count, &datatype);
  }
  return MPI_SUCCESS;
}
Esempio n. 14
0
int PMPI_Init ( int *argc, char **argv[])
{
  int i;
  int retval = MPI_ERR_IN_STATUS;
 _MPI_COVERAGE();
  _MPI_INITIALIZED_FLAG = _MPI_TRUE;
  
  /* -------------------------------------*/ 
  /*  Check for the current status of MPI */
  if ( (_MPI_INIT_STATUS == _MPI_ENDED) || (_MPI_INIT_STATUS == _MPI_STARTED) ) 
  {   
    if (_MPI_INIT_STATUS == _MPI_ENDED)
    {
     _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_IN_STATUS, "MPI was already finalized");
    } else {
     _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_IN_STATUS, "MPI was already initialized");	
    }
    MPI_ERRORS_ARE_FATAL((MPI_Comm)NULL, &retval);
    return MPI_ERR_OTHER;
  } 
       /* -------------------------------------------------------------- */ 
  else /* If the status is fine, initialize the internal data structures */
  {
   _MPI_COVERAGE();

    /* --------------------------- */ 
    /* Create the MPI_REQUEST_NULL */
    /* _MPI_REQNULL = (MPI_Request *)_MPI_safeMalloc(sizeof(MPI_Request),"Error with malloc for MPI_REQUEST_NULL");
     * _MPI_REQNULL = MPI_REQUEST_NULL; 
     * MPI_REQUEST_NULL = _MPI_REQNULL;
     */
  
    #ifndef PREALLOCATION_SIZE
    #define PREALLOCATION_SIZE 10
    #endif
    
    _MPI_INIT_STATUS = _MPI_STARTED; 

    /* ---------- */ 
    /* Allocation */
    _MPI_COMM_ARRAY_SIZE = PREALLOCATION_SIZE;
    _MPI_DATA_ARRAY_SIZE = PREALLOCATION_SIZE;
    _MPI_TYPE_ARRAY_SIZE = PREALLOCATION_SIZE;
    _MPI_OP_ARRAY_SIZE = PREALLOCATION_SIZE;
    _MPI_REQ_ARRAY_SIZE = PREALLOCATION_SIZE;


    _MPI_COMM_LIST = (_MPI_COMM_IMPL *) _MPI_safeMalloc (_MPI_COMM_ARRAY_SIZE*sizeof(_MPI_COMM_IMPL), "Error with malloc of COMM_LIST");
    _MPI_DATA_BUFF = (_MPI_DATA_ENTRY *) _MPI_safeMalloc (_MPI_DATA_ARRAY_SIZE*sizeof(_MPI_DATA_ENTRY), "Error with malloc of DATA_BUFF");
    _MPI_TYPE_LIST = (_MPI_TYPE_DES *) _MPI_safeMalloc (_MPI_TYPE_ARRAY_SIZE*sizeof(_MPI_TYPE_DES), "Error with malloc of TYPE_LIST");
    _MPI_OP_LIST = (_MPI_OP_TYPE *) _MPI_safeMalloc (_MPI_OP_ARRAY_SIZE*sizeof(_MPI_OP_TYPE), "Error with malloc of OP_LIST");
    _MPI_REQ_LIST = (_MPI_REQUEST_OBJECT *) _MPI_safeMalloc (_MPI_OP_ARRAY_SIZE*sizeof(_MPI_REQUEST_OBJECT), "Error with malloc of REQ_LIST");

    /* ----------------------------------------------- */
    /* Communicators are not set up                    */
    /* ----------------------------------------------- */
    for (i=1; i<_MPI_COMM_ARRAY_SIZE; i++) {
      _MPI_COMM_LIST[i].valid = _MPI_NOT_VALID;
    }
    for (i=1; i<_MPI_DATA_ARRAY_SIZE; i++) {
      _MPI_DATA_BUFF[i].valid = _MPI_NOT_VALID;
    }
    for (i=1; i<_MPI_OP_ARRAY_SIZE; i++) {
      _MPI_OP_LIST[i].valid = _MPI_NOT_VALID;
    }
    for (i=1; i<_MPI_TYPE_ARRAY_SIZE; i++) {
      _MPI_TYPE_LIST[i].id = _MPI_NOT_VALID;
      _MPI_TYPE_LIST[i].info = 0;
      _MPI_TYPE_LIST[i].next = 0;
    }

    _MPI_COMM_COUNT = 0;
    _MPI_DATA_BUFF_COUNT = 0;
    _MPI_TYPE_COUNT = 0;
    _MPI_OP_COUNT = 0;
    _MPI_REQ_COUNT = 0;
    
    /* ------------------------- */
    /* Set entries all to "null" */ 
    for (i=0; i<PREALLOCATION_SIZE; i++)
    {
     _MPI_COVERAGE();
      _MPI_Data_Invalid(i);
      _MPI_Comm_Invalid(i);
      _MPI_Type_Invalid(i);
      _MPI_Req_Invalid(i);
    }                                                           /* --------------------------------------------------- */
    _MPI_Comm_Insert0(0, MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL); /* This inserts MPI_COMM_WORLD as the 1st communicator */
    return MPI_SUCCESS;
  } 

}
Esempio n. 15
0
/*==========================================================================*/
int PMPI_Wait(MPI_Request* request, MPI_Status* status) {
  int retval;
  MPI_Status recv_status;

  if ( !request ) {
    _MPI_ERR_ROUTINE(MPI_ERR_REQUEST,"Request pointer is null");
    return _MPI_NOT_OK;
  }

  /* ----------------------------------------------- */
  /* A null request requires no wait                 */
  /* ----------------------------------------------- */
  if ((*request) == MPI_REQUEST_NULL) return MPI_SUCCESS;

  /* ----------------------------------------------- */
  /* Send requests are always ready (eager)          */
  /* We may have to actually do a recv() here if it  */
  /* is not a send request.                          */
  /* ----------------------------------------------- */
  if (!(*request)->send) {
    retval = PMPI_Recv((*request)->buffer,
                       (*request)->count,
                       (*request)->type,
                       _MPI_RANK, 
                       (*request)->tag,
                       (*request)->comm,
                       &recv_status); 
    if ( retval == MPI_ERR_TAG && (*request)->cancel )
    {
      /* no matching send and the recv request has been cancelled */
      _MPI_Req_Invalid((*request));
      *request = MPI_REQUEST_NULL;
      return MPI_SUCCESS;
    }
    else if (retval != MPI_SUCCESS) {
      return retval;
    }
  }
  
  /* Copy in the status */
  if ( status && status != MPI_STATUS_IGNORE) {
    status->MPI_SOURCE = _MPI_RANK; 
    status->MPI_TAG = (*request)->tag;
    status->MPI_ERROR = MPI_SUCCESS;
    if ((*request)->send) {
      status->__count = _MPI_calculateSize((*request)->count, (*request)->type);
    } else {
      status->__count = recv_status.__count;
    }
  }
  
  /* ----------------------------------------------- */
  /* Mark the request available in the pool and then */
  /* write REQUEST_NULL back into the original req   */
  /* so that subsequent requests will immediately    */
  /* succeed.                                        */
  /* ----------------------------------------------- */
  _MPI_Req_Invalid((*request));
  *request = MPI_REQUEST_NULL;
  return MPI_SUCCESS;
}
Esempio n. 16
0
void MPI_ERRORS_RETURN (MPI_Comm* comm, int* error_code, ...)
{
  _MPI_COVERAGE();
  switch(*error_code)
  {
    case MPI_ERR_BUFFER:          
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_BUFFER, "Error with Buffer.");
      break;
    case MPI_ERR_COUNT:          
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_COUNT, "Error with count value.");
      break;
    case MPI_ERR_TYPE:            
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_TYPE, "Error with datatype.");
      break;
    case MPI_ERR_TAG:       
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_TAG, "Error with tag.");
      break;
    case MPI_ERR_COMM:     
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_COMM, "Error with communicator.");
      break;
    case MPI_ERR_RANK:   
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_RANK, "Error with rank.");
      break;
    case MPI_ERR_ROOT:           
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_ROOT, "Error with root.");
      break;
    case MPI_ERR_GROUP:         
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_GROUP, "Error with group.");
      break;
    case MPI_ERR_OP:         
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_OP, "Error  with Op");
      break;
    case MPI_ERR_TOPOLOGY:      
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_TOPOLOGY, "Error with topology.");
      break;
    case MPI_ERR_DIMS:         
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_DIMS, "Error with Dims.");
      break;
    case MPI_ERR_ARG:         
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_ARG, "Error with argument.");
      break;
    case MPI_ERR_UNKNOWN:    
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_UNKNOWN, "Error unknown.");
      break;
    case MPI_ERR_TRUNCATE:      
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_TRUNCATE, "Error with truncate.");
      break;
    case MPI_ERR_OTHER:        
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_OTHER, "Error with other."); 
      break;
    case MPI_ERR_IN_STATUS:   
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_IN_STATUS, "Error with Init status.");
      break;
    case MPI_ERR_PENDING:    
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_PENDING, "Error pending.");
      break;
    case MPI_ERR_REQUEST:       
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_REQUEST, "Error with request.");
      break;
    case MPI_ERR_LASTCODE:   
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_LASTCODE, "Error with Last code.");
      break;
    case MPI_ERR_INTERN:   
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(MPI_ERR_INTERN, "Error with Internal.");
      break;
    default:
      _MPI_COVERAGE();
      _MPI_ERR_ROUTINE(0, "Unknown Error.");
      break;
  }
  return;
}
Esempio n. 17
0
int PMPI_Type_struct( 
        int count, 
        int blocklens[], 
        MPI_Aint indices[], 
        MPI_Datatype old_types[], 
        MPI_Datatype *newtype )
{
  int index, i;
  int nfields;
  MPI_Aint size;
  _MPI_TYPE_DES* currType;
  _MPI_TYPE_DES* prevType;
  _MPI_TYPE_INFO* info;

  /* KDD 6/2/16 Last entry of old_types must be MPI_UB to 
     get the padding correct. */
  nfields = count - 1;
  if (old_types[nfields] != MPI_UB) {
    _MPI_ERR_ROUTINE (MPI_ERR_TYPE, 
                      "MPI_Type_struct:  Must terminate type list with MPI_UB");
    MPI_Abort(MPI_COMM_WORLD, MPI_ERR_TYPE);
    return _MPI_NOT_OK;
  }

  index = _MPI_Find_free();
  /* ===================================== */
  /* Calculate total size of parts to copy */
  /* KDD 6/2/16  This size calc doesn't account for padding in the struct. 
     KDD 6/2/16  Instead, we'll compute size based on the indices.
     KDD 6/2/16  We assume that the indices are terminated with MPI_UB,
     KDD 6/2/16  as recommended in MPI-1 documentation, so that indices
     KDD 6/2/16  is an offsets array in CRS sense.
  for (i=0; i<nfields; i++)
  {
     size += (MPI_Aint)_MPI_calculateSize(blocklens[i], old_types[i]);
  }
  */
  size = indices[nfields] - indices[0];

  /* ============================== */
  /* Give new id/unique of datatype */
  *newtype = _MPI_TYPE_LIST[index].id = _MPI_TYPE_COUNT+_MPI_TYPE_OFFSET;
  _MPI_TYPE_COUNT++;

  /* ====================== */
  /* Save Query information */
  /* KDD 6/2/16  To account for padding in the structure, the 
   * KDD 6/2/16  extent and ub should be related to the calculated size 
  _MPI_TYPE_LIST[index].extent = indices[count-1]+(blocklens[count-1]*_MPI_getSize(old_types[count-1]));
  _MPI_TYPE_LIST[index].ub = indices[count-1]+(blocklens[count-1]*_MPI_getSize(old_types[count-1]));
  _MPI_TYPE_LIST[index].lb = indices[0];
  */
  _MPI_TYPE_LIST[index].extent = size;
  _MPI_TYPE_LIST[index].sendType = _MPI_STRUCT;
  _MPI_TYPE_LIST[index].next = 0;
  _MPI_TYPE_LIST[index].info = (_MPI_TYPE_INFO *) _MPI_safeMalloc(sizeof(_MPI_TYPE_INFO), "MPI_TYPE_INDEXED: Error with malloc");

  /* ========================================================== */
  /* Save information for packing and unpacking to/from buffers */
  info = _MPI_TYPE_LIST[index].info;
  info->count = count;
  info->blocklen = (int *) _MPI_safeMalloc(sizeof(int)*count, "MPI_TYPE_STRUCT: Error with malloc");;
  info->blocklen = memcpy(info->blocklen, blocklens, sizeof(int)*count);
  info->stride = (int *) _MPI_safeMalloc(sizeof(int)*count, "MPI_TYPE_STRUCT: Error with malloc");;
  info->stride = memcpy(info->stride, indices, sizeof(int)*count);
  info->types = (MPI_Datatype *) _MPI_safeMalloc(sizeof(MPI_Datatype)*count, "MPI_TYPE_STRUCT: Error with malloc");;
  info->types = memcpy(info->types, old_types, sizeof(int)*count);

  /* ================================ */
  /* Create linked list of structures */
  prevType = &_MPI_TYPE_LIST[index];
  for (i=0; i<nfields; i++)
  {
    currType = (_MPI_TYPE_DES *) _MPI_safeMalloc(sizeof(_MPI_TYPE_DES), "MPI_TYPE_STRUCT: Error with malloc");
    prevType->next = currType;
    currType->id = old_types[i];
    /* KDD 6/2/16 use the actual extent provided by the indices 
    currType->extent = indices[i]+currType->size;
    */
    currType->extent = indices[i+1]-indices[i];
    currType->next = 0;
    prevType = currType;
  }
  /* =============================================== */
  /* Add the MPI_UB at the end of the structure list */
  currType = (_MPI_TYPE_DES *) _MPI_safeMalloc(sizeof(_MPI_TYPE_DES), "MPI_TYPE_STRUCT: Error with malloc.");
  prevType->next = currType;
  currType->id = MPI_UB;
  /* KDD 6/2/16  Not sure why MPI_UB should have a size or extent.
  currType->size = _MPI_TYPE_LIST[index].size;
  currType->extent = _MPI_TYPE_LIST[index].extent;
  */
  currType->extent = 0;
  currType->next = 0;

  return MPI_SUCCESS;  
}