/* Same behavior as PMPI_Irsend.c */ int PMPI_Issend (void* message, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request* request) { int size, retval, index; char* p; _MPI_COVERAGE(); _MPI_CHECK_STATUS(&comm); retval = _MPI_checkRequest(*request); if ( retval!=MPI_SUCCESS ) { _MPI_Set_Request(request, message, count, datatype, _MPI_TRUE, tag, comm); } retval = _MPI_checks(message, count, datatype, dest, tag, comm); if (retval == MPI_SUCCESS) { index = _MPI_Req_Find(tag, comm); if ( index >= 0 ) { size = _MPI_calculateSize(count, datatype); p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); p = memcpy(p, message, size); retval =_MPI_Buff_Insert(p, count, datatype, tag, comm); return retval; } return MPI_ERR_PENDING; } else { _MPI_ERR_ROUTINE (retval, "MPI_ISSEND / MPI_IRSEND: argument error"); MPI_Abort (comm, retval); } _MPI_COVERAGE(); return retval; }
/*=============================================================================================*/ int PMPI_Msend (void* message, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) { int size, retval; char* p; /* option 1: Assert( _MPI_IS_INITIALIZED() ); */ _MPI_COVERAGE(); _MPI_CHECK_STATUS(&comm); retval = _MPI_checks(message, count, datatype, dest, tag, comm); /* option 2: STANDARD_MPI_CHECK(comm); */ if (retval == MPI_SUCCESS) { size = _MPI_calculateSize(count, datatype); p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); p = memcpy(p, message, size); retval =_MPI_Buff_Insert(p, count, datatype, tag, comm); return retval; } else { _MPI_ERR_ROUTINE (retval, "MPI_SEND / MPI_ISEND: argument error"); MPI_Abort (comm, retval); } _MPI_COVERAGE(); return _MPI_NOT_OK; }
/*============================================================================*/ int _MPI_Set_Request(MPI_Request* request, void* message, int count, MPI_Datatype datatype, int send, int tag, MPI_Comm comm) { _MPI_COVERAGE(); /* KDDKDD I think this routine should be called only for sends. Recvs search KDDKDD global request array. */ *request = (_MPI_REQUEST_OBJECT *) _MPI_safeMalloc(sizeof(_MPI_REQUEST_OBJECT), "MPI_Recv malloc of MPI_Request"); (*request)->buffer = message; (*request)->size = count; (*request)->tag = tag; (*request)->type = datatype; (*request)->comm = comm; (*request)->send = send; (*request)->valid = _MPI_VALID; return MPI_SUCCESS; }
int PMPI_Type_contiguous( int count, MPI_Datatype old_type, MPI_Datatype *newtype) { int index; MPI_Aint size; _MPI_TYPE_DES* currType; _MPI_TYPE_DES* prevType; index = _MPI_Find_free(); size = (MPI_Aint)_MPI_calculateSize(count, old_type); *newtype = _MPI_TYPE_LIST[index].id = _MPI_TYPE_COUNT+_MPI_TYPE_OFFSET; _MPI_TYPE_COUNT++; _MPI_TYPE_LIST[index].size = size; _MPI_TYPE_LIST[index].extent = size; _MPI_TYPE_LIST[index].ub = size; _MPI_TYPE_LIST[index].lb = (MPI_Aint) 0; _MPI_TYPE_LIST[index].sendType = _MPI_CONTIG; _MPI_TYPE_LIST[index].next = 0; _MPI_TYPE_LIST[index].info = 0; prevType = &_MPI_TYPE_LIST[index]; size = 0; /* ================================================================= */ /* DONT THINK WE ACUTALLY NEED THIS ================================ */ /* WE CAN JUST USE THE INFO STRUCT - same with vector and contiguous */ for (index=0; index<count; index++) { currType = (_MPI_TYPE_DES *) _MPI_safeMalloc(sizeof(_MPI_TYPE_DES), "MPI_TYPE_CONTIGUOUS: Error with malloc."); prevType->next = currType; currType->id = old_type; currType->size = _MPI_getSize(old_type); size += currType->size; currType->extent = size; currType->next = 0; prevType = currType; } return MPI_SUCCESS; }
int PMPI_Type_indexed( int count, int blocklens[], int indices[], MPI_Datatype old_type, MPI_Datatype *newtype ) { int index, i; MPI_Aint size; _MPI_TYPE_DES* currType; _MPI_TYPE_DES* prevType; _MPI_TYPE_INFO* info; index = _MPI_Find_free(); /* ===================================== */ /* Calculate total size of parts to copy */ size = 0; for (i=0; i<count; i++) { size += (MPI_Aint)_MPI_calculateSize(blocklens[i], old_type); } /* ============================== */ /* Give new id/unique of datatype */ *newtype = _MPI_TYPE_LIST[index].id = _MPI_TYPE_COUNT+_MPI_TYPE_OFFSET; _MPI_TYPE_COUNT++; /* ====================== */ /* Save Query information */ _MPI_TYPE_LIST[index].size = size; _MPI_TYPE_LIST[index].extent = size; _MPI_TYPE_LIST[index].ub = size; _MPI_TYPE_LIST[index].lb = (MPI_Aint) 0; _MPI_TYPE_LIST[index].sendType = _MPI_INDEXED; _MPI_TYPE_LIST[index].next = 0; _MPI_TYPE_LIST[index].info = (_MPI_TYPE_INFO *) _MPI_safeMalloc(sizeof(_MPI_TYPE_INFO), "MPI_TYPE_INDEXED: Error with malloc"); /* ========================================================== */ /* Save information for packing and unpacking to/from buffers */ info = _MPI_TYPE_LIST[index].info; info->count = count; info->blocklen = (int *) _MPI_safeMalloc(sizeof(int)*count, "MPI_TYPE_INDEXED: Error with malloc");; info->blocklen = memcpy(info->blocklen, blocklens, sizeof(int)*count); info->stride = (int *) _MPI_safeMalloc(sizeof(int)*count, "MPI_TYPE_INDEXED: Error with malloc");; info->stride = memcpy(info->stride, indices, sizeof(int)*count); info->types = (int *) _MPI_safeMalloc(sizeof(MPI_Datatype), "MPI_TYPE_INDEXED: Error with malloc");; info->types[0] = old_type; /* ================================ */ /* Create linked list of structures */ prevType = &_MPI_TYPE_LIST[index]; size = indices[0]*_MPI_getSize(old_type); for (index=0; index<count; index++) { currType = (_MPI_TYPE_DES *) _MPI_safeMalloc(sizeof(_MPI_TYPE_DES), "MPI_TYPE_INDEXED: Error with malloc"); prevType->next = currType; currType->id = old_type; currType->size = blocklens[index]*_MPI_getSize(old_type); size += currType->size; currType->extent = size; currType->next = 0; prevType = currType; } return MPI_SUCCESS; }
int PMPI_Init ( int *argc, char **argv[]) { int i; int retval = MPI_ERR_IN_STATUS; _MPI_COVERAGE(); _MPI_INITIALIZED_FLAG = _MPI_TRUE; /* -------------------------------------*/ /* Check for the current status of MPI */ if ( (_MPI_INIT_STATUS == _MPI_ENDED) || (_MPI_INIT_STATUS == _MPI_STARTED) ) { if (_MPI_INIT_STATUS == _MPI_ENDED) { _MPI_COVERAGE(); _MPI_ERR_ROUTINE(MPI_ERR_IN_STATUS, "MPI was already finalized"); } else { _MPI_COVERAGE(); _MPI_ERR_ROUTINE(MPI_ERR_IN_STATUS, "MPI was already initialized"); } MPI_ERRORS_ARE_FATAL((MPI_Comm)NULL, &retval); return MPI_ERR_OTHER; } /* -------------------------------------------------------------- */ else /* If the status is fine, initialize the internal data structures */ { _MPI_COVERAGE(); /* --------------------------- */ /* Create the MPI_REQUEST_NULL */ /* _MPI_REQNULL = (MPI_Request *)_MPI_safeMalloc(sizeof(MPI_Request),"Error with malloc for MPI_REQUEST_NULL"); * _MPI_REQNULL = MPI_REQUEST_NULL; * MPI_REQUEST_NULL = _MPI_REQNULL; */ #ifndef PREALLOCATION_SIZE #define PREALLOCATION_SIZE 10 #endif _MPI_INIT_STATUS = _MPI_STARTED; /* ---------- */ /* Allocation */ _MPI_COMM_ARRAY_SIZE = PREALLOCATION_SIZE; _MPI_DATA_ARRAY_SIZE = PREALLOCATION_SIZE; _MPI_TYPE_ARRAY_SIZE = PREALLOCATION_SIZE; _MPI_OP_ARRAY_SIZE = PREALLOCATION_SIZE; _MPI_REQ_ARRAY_SIZE = PREALLOCATION_SIZE; _MPI_COMM_LIST = (_MPI_COMM_IMPL *) _MPI_safeMalloc (_MPI_COMM_ARRAY_SIZE*sizeof(_MPI_COMM_IMPL), "Error with malloc of COMM_LIST"); _MPI_DATA_BUFF = (_MPI_DATA_ENTRY *) _MPI_safeMalloc (_MPI_DATA_ARRAY_SIZE*sizeof(_MPI_DATA_ENTRY), "Error with malloc of DATA_BUFF"); _MPI_TYPE_LIST = (_MPI_TYPE_DES *) _MPI_safeMalloc (_MPI_TYPE_ARRAY_SIZE*sizeof(_MPI_TYPE_DES), "Error with malloc of TYPE_LIST"); _MPI_OP_LIST = (_MPI_OP_TYPE *) _MPI_safeMalloc (_MPI_OP_ARRAY_SIZE*sizeof(_MPI_OP_TYPE), "Error with malloc of OP_LIST"); _MPI_REQ_LIST = (_MPI_REQUEST_OBJECT *) _MPI_safeMalloc (_MPI_OP_ARRAY_SIZE*sizeof(_MPI_REQUEST_OBJECT), "Error with malloc of REQ_LIST"); /* ----------------------------------------------- */ /* Communicators are not set up */ /* ----------------------------------------------- */ for (i=1; i<_MPI_COMM_ARRAY_SIZE; i++) { _MPI_COMM_LIST[i].valid = _MPI_NOT_VALID; } for (i=1; i<_MPI_DATA_ARRAY_SIZE; i++) { _MPI_DATA_BUFF[i].valid = _MPI_NOT_VALID; } for (i=1; i<_MPI_OP_ARRAY_SIZE; i++) { _MPI_OP_LIST[i].valid = _MPI_NOT_VALID; } for (i=1; i<_MPI_TYPE_ARRAY_SIZE; i++) { _MPI_TYPE_LIST[i].id = _MPI_NOT_VALID; _MPI_TYPE_LIST[i].info = 0; _MPI_TYPE_LIST[i].next = 0; } _MPI_COMM_COUNT = 0; _MPI_DATA_BUFF_COUNT = 0; _MPI_TYPE_COUNT = 0; _MPI_OP_COUNT = 0; _MPI_REQ_COUNT = 0; /* ------------------------- */ /* Set entries all to "null" */ for (i=0; i<PREALLOCATION_SIZE; i++) { _MPI_COVERAGE(); _MPI_Data_Invalid(i); _MPI_Comm_Invalid(i); _MPI_Type_Invalid(i); _MPI_Req_Invalid(i); } /* --------------------------------------------------- */ _MPI_Comm_Insert0(0, MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL); /* This inserts MPI_COMM_WORLD as the 1st communicator */ return MPI_SUCCESS; } }
int PMPI_Type_struct( int count, int blocklens[], MPI_Aint indices[], MPI_Datatype old_types[], MPI_Datatype *newtype ) { int index, i; int nfields; MPI_Aint size; _MPI_TYPE_DES* currType; _MPI_TYPE_DES* prevType; _MPI_TYPE_INFO* info; /* KDD 6/2/16 Last entry of old_types must be MPI_UB to get the padding correct. */ nfields = count - 1; if (old_types[nfields] != MPI_UB) { _MPI_ERR_ROUTINE (MPI_ERR_TYPE, "MPI_Type_struct: Must terminate type list with MPI_UB"); MPI_Abort(MPI_COMM_WORLD, MPI_ERR_TYPE); return _MPI_NOT_OK; } index = _MPI_Find_free(); /* ===================================== */ /* Calculate total size of parts to copy */ /* KDD 6/2/16 This size calc doesn't account for padding in the struct. KDD 6/2/16 Instead, we'll compute size based on the indices. KDD 6/2/16 We assume that the indices are terminated with MPI_UB, KDD 6/2/16 as recommended in MPI-1 documentation, so that indices KDD 6/2/16 is an offsets array in CRS sense. for (i=0; i<nfields; i++) { size += (MPI_Aint)_MPI_calculateSize(blocklens[i], old_types[i]); } */ size = indices[nfields] - indices[0]; /* ============================== */ /* Give new id/unique of datatype */ *newtype = _MPI_TYPE_LIST[index].id = _MPI_TYPE_COUNT+_MPI_TYPE_OFFSET; _MPI_TYPE_COUNT++; /* ====================== */ /* Save Query information */ /* KDD 6/2/16 To account for padding in the structure, the * KDD 6/2/16 extent and ub should be related to the calculated size _MPI_TYPE_LIST[index].extent = indices[count-1]+(blocklens[count-1]*_MPI_getSize(old_types[count-1])); _MPI_TYPE_LIST[index].ub = indices[count-1]+(blocklens[count-1]*_MPI_getSize(old_types[count-1])); _MPI_TYPE_LIST[index].lb = indices[0]; */ _MPI_TYPE_LIST[index].extent = size; _MPI_TYPE_LIST[index].sendType = _MPI_STRUCT; _MPI_TYPE_LIST[index].next = 0; _MPI_TYPE_LIST[index].info = (_MPI_TYPE_INFO *) _MPI_safeMalloc(sizeof(_MPI_TYPE_INFO), "MPI_TYPE_INDEXED: Error with malloc"); /* ========================================================== */ /* Save information for packing and unpacking to/from buffers */ info = _MPI_TYPE_LIST[index].info; info->count = count; info->blocklen = (int *) _MPI_safeMalloc(sizeof(int)*count, "MPI_TYPE_STRUCT: Error with malloc");; info->blocklen = memcpy(info->blocklen, blocklens, sizeof(int)*count); info->stride = (int *) _MPI_safeMalloc(sizeof(int)*count, "MPI_TYPE_STRUCT: Error with malloc");; info->stride = memcpy(info->stride, indices, sizeof(int)*count); info->types = (MPI_Datatype *) _MPI_safeMalloc(sizeof(MPI_Datatype)*count, "MPI_TYPE_STRUCT: Error with malloc");; info->types = memcpy(info->types, old_types, sizeof(int)*count); /* ================================ */ /* Create linked list of structures */ prevType = &_MPI_TYPE_LIST[index]; for (i=0; i<nfields; i++) { currType = (_MPI_TYPE_DES *) _MPI_safeMalloc(sizeof(_MPI_TYPE_DES), "MPI_TYPE_STRUCT: Error with malloc"); prevType->next = currType; currType->id = old_types[i]; /* KDD 6/2/16 use the actual extent provided by the indices currType->extent = indices[i]+currType->size; */ currType->extent = indices[i+1]-indices[i]; currType->next = 0; prevType = currType; } /* =============================================== */ /* Add the MPI_UB at the end of the structure list */ currType = (_MPI_TYPE_DES *) _MPI_safeMalloc(sizeof(_MPI_TYPE_DES), "MPI_TYPE_STRUCT: Error with malloc."); prevType->next = currType; currType->id = MPI_UB; /* KDD 6/2/16 Not sure why MPI_UB should have a size or extent. currType->size = _MPI_TYPE_LIST[index].size; currType->extent = _MPI_TYPE_LIST[index].extent; */ currType->extent = 0; currType->next = 0; return MPI_SUCCESS; }
/*=============================================================================================*/ int PMPI_Send (void* message, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) { int size, retval, sendType, index; MPI_Aint position, copiedPointer; _MPI_TYPE_INFO *info; char* p; _MPI_COVERAGE(); _MPI_CHECK_STATUS(&comm); retval = _MPI_checks(message, count, datatype, dest, tag, comm); if (retval == MPI_SUCCESS) { sendType = _MPI_checkSendType(datatype); switch (sendType) { case _MPI_DEFAULT: { size = _MPI_calculateSize(count, datatype); p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); p = memcpy(p, message, size); retval =_MPI_Buff_Insert(p, count, datatype, tag, comm); return retval; } case _MPI_CONTIG: { sendType = _MPI_FindType(datatype); size = _MPI_TYPE_LIST[sendType].extent; p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); p = memcpy(p, message, size); retval =_MPI_Buff_Insert(p, count, datatype, tag, comm); return retval; } case _MPI_INDEXED: { sendType = _MPI_FindType(datatype); size = _MPI_TYPE_LIST[sendType].extent; p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); /* ================================================== */ /* Determine the correct parts to save to the buffers */ info = _MPI_TYPE_LIST[sendType].info; position = (MPI_Aint) 0; copiedPointer = (MPI_Aint) 0; for (index = 0; index < info->count; index++) { position = info->stride[index]*sizeof(info->types[0]); p = memcpy(p+copiedPointer, ((char*)message)+position, info->blocklen[index]*sizeof(info->types[0])); copiedPointer += info->blocklen[index]*sizeof(info->types[0]); } retval =_MPI_Buff_Insert(p, count, datatype, tag, comm); return retval; } case _MPI_VECTOR: { sendType = _MPI_FindType(datatype); size = _MPI_TYPE_LIST[sendType].extent; p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); /* =================================== */ /* Figure out the correct ones to pass */ retval =_MPI_Buff_Insert(p, count, datatype, tag, comm); return retval; } case _MPI_STRUCT: { sendType = _MPI_FindType(datatype); size = _MPI_TYPE_LIST[sendType].extent; p = (char *)_MPI_safeMalloc(size, "Error with malloc for send buffer."); /* =================================== */ /* Figure out the correct ones to pass */ retval =_MPI_Buff_Insert(p, count, datatype, tag, comm); return retval; } default: { fprintf(stderr,"SEND: mpi_Hindexed or mpi_Hvector not implemented\n"); MPI_Abort (comm, _MPI_NOT_OK); } } } else { _MPI_ERR_ROUTINE (retval, "MPI_SEND / MPI_ISEND: argument error"); MPI_Abort (comm, retval); } _MPI_COVERAGE(); return _MPI_NOT_OK; }