void ompi_type_struct_f(MPI_Fint *count, MPI_Fint *array_of_blocklengths, MPI_Fint *array_of_displacements, MPI_Fint *array_of_types, MPI_Fint *newtype, MPI_Fint *ierr) { MPI_Aint *c_disp_array; MPI_Datatype *c_type_old_array; MPI_Datatype c_new; int i, c_ierr; OMPI_ARRAY_NAME_DECL(array_of_blocklengths); c_type_old_array = (MPI_Datatype *) malloc(*count * (sizeof(MPI_Datatype) + sizeof(MPI_Aint))); if (NULL == c_type_old_array) { c_ierr = OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM, FUNC_NAME); if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr); return; } c_disp_array = (MPI_Aint*) c_type_old_array + *count; for (i = 0; i < *count; i++) { c_disp_array[i] = (MPI_Aint) array_of_displacements[i]; c_type_old_array[i] = PMPI_Type_f2c(array_of_types[i]); } OMPI_ARRAY_FINT_2_INT(array_of_blocklengths, *count); c_ierr = PMPI_Type_struct(OMPI_FINT_2_INT(*count), OMPI_ARRAY_NAME_CONVERT(array_of_blocklengths), c_disp_array, c_type_old_array, &c_new); if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr); OMPI_ARRAY_FINT_2_INT_CLEANUP(array_of_blocklengths); free(c_type_old_array); if (MPI_SUCCESS == c_ierr) { *newtype = PMPI_Type_c2f(c_new); } }
/* * mpiPi_collect_basics() - all tasks send their basic info to the * collectorRank. */ void mpiPi_collect_basics () { int i = 0; double app_time = mpiPi.cumulativeTime; int cnt; mpiPi_task_info_t mti; int blockcounts[4] = { 1, 1, 1, MPIPI_HOSTNAME_LEN_MAX }; MPI_Datatype types[4] = { MPI_DOUBLE, MPI_DOUBLE, MPI_INT, MPI_CHAR }; MPI_Aint displs[4]; MPI_Datatype mti_type; MPI_Request *recv_req_arr; mpiPi_msg_debug ("Collect Basics\n"); cnt = 0; PMPI_Address (&mti.mpi_time, &displs[cnt++]); PMPI_Address (&mti.app_time, &displs[cnt++]); PMPI_Address (&mti.rank, &displs[cnt++]); PMPI_Address (&mti.hostname, &displs[cnt++]); for (i = (cnt - 1); i >= 0; i--) { displs[i] -= displs[0]; } PMPI_Type_struct (cnt, blockcounts, displs, types, &mti_type); PMPI_Type_commit (&mti_type); if (mpiPi.rank == mpiPi.collectorRank) { /* In the case where multiple reports are generated per run, only allocate memory for global_task_info once */ if (mpiPi.global_task_info == NULL) { mpiPi.global_task_info = (mpiPi_task_info_t *) calloc (mpiPi.size, sizeof (mpiPi_task_info_t)); if (mpiPi.global_task_info == NULL) mpiPi_abort ("Failed to allocate memory for global_task_info"); mpiPi_msg_debug ("MEMORY : Allocated for global_task_info : %13ld\n", mpiPi.size * sizeof (mpiPi_task_info_t)); } bzero (mpiPi.global_task_info, mpiPi.size * sizeof (mpiPi_task_info_t)); recv_req_arr = (MPI_Request *) malloc (sizeof (MPI_Request) * mpiPi.size); for (i = 0; i < mpiPi.size; i++) { mpiPi_task_info_t *p = &mpiPi.global_task_info[i]; if (i != mpiPi.collectorRank) { PMPI_Irecv (p, 1, mti_type, i, mpiPi.tag, mpiPi.comm, &(recv_req_arr[i])); } else { strcpy (p->hostname, mpiPi.hostname); p->app_time = app_time; p->rank = mpiPi.rank; recv_req_arr[i] = MPI_REQUEST_NULL; } } PMPI_Waitall (mpiPi.size, recv_req_arr, MPI_STATUSES_IGNORE); free (recv_req_arr); /* task MPI time is calculated from callsites data in mpiPi_insert_callsite_records. */ for (i = 0; i < mpiPi.size; i++) mpiPi.global_task_info[i].mpi_time = 0.0; } else { strcpy (mti.hostname, mpiPi.hostname); mti.app_time = app_time; mti.rank = mpiPi.rank; PMPI_Send (&mti, 1, mti_type, mpiPi.collectorRank, mpiPi.tag, mpiPi.comm); } PMPI_Type_free (&mti_type); return; }
int MPI_Type_struct(int count, int* blocklens, MPI_Aint* indices, MPI_Datatype* old_types, MPI_Datatype* newtype) { return PMPI_Type_struct(count, blocklens, indices, old_types, newtype); }
int PMPI_Type_create_struct(int count, const int* blocklens, const MPI_Aint* indices, const MPI_Datatype* old_types, MPI_Datatype* new_type) { return PMPI_Type_struct(count, blocklens, indices, old_types, new_type); }