int mca_fcoll_dynamic_gen2_file_read_all (mca_io_ompio_file_t *fh, void *buf, int count, struct ompi_datatype_t *datatype, ompi_status_public_t *status) { MPI_Aint position = 0; MPI_Aint total_bytes = 0; /* total bytes to be read */ MPI_Aint bytes_to_read_in_cycle = 0; /* left to be read in a cycle*/ MPI_Aint bytes_per_cycle = 0; /* total read in each cycle by each process*/ int index = 0, ret=OMPI_SUCCESS; int cycles = 0; int i=0, j=0, l=0; int n=0; /* current position in total_bytes_per_process array */ MPI_Aint bytes_remaining = 0; /* how many bytes have been read from the current value from total_bytes_per_process */ int *sorted_file_offsets=NULL, entries_per_aggregator=0; int bytes_received = 0; int blocks = 0; /* iovec structure and count of the buffer passed in */ uint32_t iov_count = 0; struct iovec *decoded_iov = NULL; int iov_index = 0; size_t current_position = 0; struct iovec *local_iov_array=NULL, *global_iov_array=NULL; char *receive_buf = NULL; MPI_Aint *memory_displacements=NULL; /* global iovec at the readers that contain the iovecs created from file_set_view */ uint32_t total_fview_count = 0; int local_count = 0; int *fview_count = NULL, *disp_index=NULL, *temp_disp_index=NULL; int current_index=0, temp_index=0; int **blocklen_per_process=NULL; MPI_Aint **displs_per_process=NULL; char *global_buf = NULL; MPI_Aint global_count = 0; mca_io_ompio_local_io_array *file_offsets_for_agg=NULL; /* array that contains the sorted indices of the global_iov */ int *sorted = NULL; int *displs = NULL; int dynamic_gen2_num_io_procs; size_t max_data = 0; MPI_Aint *total_bytes_per_process = NULL; ompi_datatype_t **sendtype = NULL; MPI_Request *send_req=NULL, recv_req=NULL; int my_aggregator =-1; bool recvbuf_is_contiguous=false; size_t ftype_size; OPAL_PTRDIFF_TYPE ftype_extent, lb; #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0; double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0; double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0; mca_io_ompio_print_entry nentry; #endif /************************************************************************** ** 1. In case the data is not contigous in memory, decode it into an iovec **************************************************************************/ opal_datatype_type_size ( &datatype->super, &ftype_size ); opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent ); if ( (ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) && opal_datatype_is_contiguous_memory_layout(&datatype->super,1) && 0 == lb ) { recvbuf_is_contiguous = true; } if (! recvbuf_is_contiguous ) { ret = fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh, datatype, count, buf, &max_data, &decoded_iov, &iov_count); if (OMPI_SUCCESS != ret){ goto exit; } } else { max_data = count * datatype->super.size; } if ( MPI_STATUS_IGNORE != status ) { status->_ucount = max_data; } fh->f_get_num_aggregators ( &dynamic_gen2_num_io_procs); ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh, dynamic_gen2_num_io_procs, max_data); if (OMPI_SUCCESS != ret){ goto exit; } my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index]; /************************************************************************** ** 2. Determine the total amount of data to be written **************************************************************************/ total_bytes_per_process = (MPI_Aint*)malloc(fh->f_procs_per_group*sizeof(MPI_Aint)); if (NULL == total_bytes_per_process) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rcomm_time = MPI_Wtime(); #endif ret = fcoll_base_coll_allgather_array (&max_data, 1, MPI_LONG, total_bytes_per_process, 1, MPI_LONG, fh->f_aggregator_index, fh->f_procs_in_group, fh->f_procs_per_group, fh->f_comm); if (OMPI_SUCCESS != ret){ goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif for (i=0 ; i<fh->f_procs_per_group ; i++) { total_bytes += total_bytes_per_process[i]; } if (NULL != total_bytes_per_process) { free (total_bytes_per_process); total_bytes_per_process = NULL; } /********************************************************************* *** 3. Generate the File offsets/lengths corresponding to this write ********************************************************************/ ret = fh->f_generate_current_file_view ((struct mca_io_ompio_file_t *) fh, max_data, &local_iov_array, &local_count); if (ret != OMPI_SUCCESS){ goto exit; } /************************************************************* *** 4. Allgather the File View information at all processes *************************************************************/ fview_count = (int *) malloc (fh->f_procs_per_group * sizeof (int)); if (NULL == fview_count) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rcomm_time = MPI_Wtime(); #endif ret = fcoll_base_coll_allgather_array (&local_count, 1, MPI_INT, fview_count, 1, MPI_INT, fh->f_aggregator_index, fh->f_procs_in_group, fh->f_procs_per_group, fh->f_comm); if (OMPI_SUCCESS != ret){ goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif displs = (int*)malloc (fh->f_procs_per_group*sizeof(int)); if (NULL == displs) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } displs[0] = 0; total_fview_count = fview_count[0]; for (i=1 ; i<fh->f_procs_per_group ; i++) { total_fview_count += fview_count[i]; displs[i] = displs[i-1] + fview_count[i-1]; } #if DEBUG_ON if (my_aggregator == fh->f_rank) { for (i=0 ; i<fh->f_procs_per_group ; i++) { printf ("%d: PROCESS: %d ELEMENTS: %d DISPLS: %d\n", fh->f_rank, i, fview_count[i], displs[i]); } } #endif /* allocate the global iovec */ if (0 != total_fview_count) { global_iov_array = (struct iovec*)malloc (total_fview_count * sizeof(struct iovec)); if (NULL == global_iov_array) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rcomm_time = MPI_Wtime(); #endif ret = fcoll_base_coll_allgatherv_array (local_iov_array, local_count, fh->f_iov_type, global_iov_array, fview_count, displs, fh->f_iov_type, fh->f_aggregator_index, fh->f_procs_in_group, fh->f_procs_per_group, fh->f_comm); if (OMPI_SUCCESS != ret){ goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif /**************************************************************************************** *** 5. Sort the global offset/lengths list based on the offsets. *** The result of the sort operation is the 'sorted', an integer array, *** which contains the indexes of the global_iov_array based on the offset. *** For example, if global_iov_array[x].offset is followed by global_iov_array[y].offset *** in the file, and that one is followed by global_iov_array[z].offset, than *** sorted[0] = x, sorted[1]=y and sorted[2]=z; ******************************************************************************************/ if (0 != total_fview_count) { sorted = (int *)malloc (total_fview_count * sizeof(int)); if (NULL == sorted) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } fh->f_sort_iovec (global_iov_array, total_fview_count, sorted); } if (NULL != local_iov_array) { free (local_iov_array); local_iov_array = NULL; } #if DEBUG_ON if (my_aggregator == fh->f_rank) { for (i=0 ; i<total_fview_count ; i++) { printf("%d: OFFSET: %p LENGTH: %d\n", fh->f_rank, global_iov_array[sorted[i]].iov_base, global_iov_array[sorted[i]].iov_len); } } #endif /************************************************************* *** 6. Determine the number of cycles required to execute this *** operation *************************************************************/ fh->f_get_bytes_per_agg ( (int *) &bytes_per_cycle); cycles = ceil((double)total_bytes/bytes_per_cycle); if ( my_aggregator == fh->f_rank) { disp_index = (int *)malloc (fh->f_procs_per_group * sizeof (int)); if (NULL == disp_index) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*)); if (NULL == blocklen_per_process) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } displs_per_process = (MPI_Aint **)malloc (fh->f_procs_per_group * sizeof (MPI_Aint*)); if (NULL == displs_per_process){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } for (i=0;i<fh->f_procs_per_group;i++){ blocklen_per_process[i] = NULL; displs_per_process[i] = NULL; } send_req = (MPI_Request *) malloc (fh->f_procs_per_group * sizeof(MPI_Request)); if (NULL == send_req){ opal_output ( 1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } global_buf = (char *) malloc (bytes_per_cycle); if (NULL == global_buf){ opal_output(1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } sendtype = (ompi_datatype_t **) malloc (fh->f_procs_per_group * sizeof(ompi_datatype_t *)); if (NULL == sendtype) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } for(l=0;l<fh->f_procs_per_group;l++){ sendtype[l] = MPI_DATATYPE_NULL; } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rexch = MPI_Wtime(); #endif n = 0; bytes_remaining = 0; current_index = 0; for (index = 0; index < cycles; index++) { /********************************************************************** *** 7a. Getting ready for next cycle: initializing and freeing buffers **********************************************************************/ if (my_aggregator == fh->f_rank) { if (NULL != fh->f_io_array) { free (fh->f_io_array); fh->f_io_array = NULL; } fh->f_num_of_io_entries = 0; if (NULL != sendtype){ for (i =0; i< fh->f_procs_per_group; i++) { if ( MPI_DATATYPE_NULL != sendtype[i] ) { ompi_datatype_destroy(&sendtype[i]); sendtype[i] = MPI_DATATYPE_NULL; } } } for(l=0;l<fh->f_procs_per_group;l++){ disp_index[l] = 1; if (NULL != blocklen_per_process[l]){ free(blocklen_per_process[l]); blocklen_per_process[l] = NULL; } if (NULL != displs_per_process[l]){ free(displs_per_process[l]); displs_per_process[l] = NULL; } blocklen_per_process[l] = (int *) calloc (1, sizeof(int)); if (NULL == blocklen_per_process[l]) { opal_output (1, "OUT OF MEMORY for blocklen\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } displs_per_process[l] = (MPI_Aint *) calloc (1, sizeof(MPI_Aint)); if (NULL == displs_per_process[l]){ opal_output (1, "OUT OF MEMORY for displs\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } } if (NULL != sorted_file_offsets){ free(sorted_file_offsets); sorted_file_offsets = NULL; } if(NULL != file_offsets_for_agg){ free(file_offsets_for_agg); file_offsets_for_agg = NULL; } if (NULL != memory_displacements){ free(memory_displacements); memory_displacements = NULL; } } /* (my_aggregator == fh->f_rank */ /************************************************************************** *** 7b. Determine the number of bytes to be actually read in this cycle **************************************************************************/ if (cycles-1 == index) { bytes_to_read_in_cycle = total_bytes - bytes_per_cycle*index; } else { bytes_to_read_in_cycle = bytes_per_cycle; } #if DEBUG_ON if (my_aggregator == fh->f_rank) { printf ("****%d: CYCLE %d Bytes %d**********\n", fh->f_rank, index, bytes_to_write_in_cycle); } #endif /***************************************************************** *** 7c. Calculate how much data will be contributed in this cycle *** by each process *****************************************************************/ bytes_received = 0; while (bytes_to_read_in_cycle) { /* This next block identifies which process is the holder ** of the sorted[current_index] element; */ blocks = fview_count[0]; for (j=0 ; j<fh->f_procs_per_group ; j++) { if (sorted[current_index] < blocks) { n = j; break; } else { blocks += fview_count[j+1]; } } if (bytes_remaining) { /* Finish up a partially used buffer from the previous cycle */ if (bytes_remaining <= bytes_to_read_in_cycle) { /* Data fits completely into the block */ if (my_aggregator == fh->f_rank) { blocklen_per_process[n][disp_index[n] - 1] = bytes_remaining; displs_per_process[n][disp_index[n] - 1] = (OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base + (global_iov_array[sorted[current_index]].iov_len - bytes_remaining); blocklen_per_process[n] = (int *) realloc ((void *)blocklen_per_process[n], (disp_index[n]+1)*sizeof(int)); displs_per_process[n] = (MPI_Aint *) realloc ((void *)displs_per_process[n], (disp_index[n]+1)*sizeof(MPI_Aint)); blocklen_per_process[n][disp_index[n]] = 0; displs_per_process[n][disp_index[n]] = 0; disp_index[n] += 1; } if (fh->f_procs_in_group[n] == fh->f_rank) { bytes_received += bytes_remaining; } current_index ++; bytes_to_read_in_cycle -= bytes_remaining; bytes_remaining = 0; continue; } else { /* the remaining data from the previous cycle is larger than the bytes_to_write_in_cycle, so we have to segment again */ if (my_aggregator == fh->f_rank) { blocklen_per_process[n][disp_index[n] - 1] = bytes_to_read_in_cycle; displs_per_process[n][disp_index[n] - 1] = (OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base + (global_iov_array[sorted[current_index]].iov_len - bytes_remaining); } if (fh->f_procs_in_group[n] == fh->f_rank) { bytes_received += bytes_to_read_in_cycle; } bytes_remaining -= bytes_to_read_in_cycle; bytes_to_read_in_cycle = 0; break; } } else { /* No partially used entry available, have to start a new one */ if (bytes_to_read_in_cycle < (MPI_Aint) global_iov_array[sorted[current_index]].iov_len) { /* This entry has more data than we can sendin one cycle */ if (my_aggregator == fh->f_rank) { blocklen_per_process[n][disp_index[n] - 1] = bytes_to_read_in_cycle; displs_per_process[n][disp_index[n] - 1] = (OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base ; } if (fh->f_procs_in_group[n] == fh->f_rank) { bytes_received += bytes_to_read_in_cycle; } bytes_remaining = global_iov_array[sorted[current_index]].iov_len - bytes_to_read_in_cycle; bytes_to_read_in_cycle = 0; break; } else { /* Next data entry is less than bytes_to_write_in_cycle */ if (my_aggregator == fh->f_rank) { blocklen_per_process[n][disp_index[n] - 1] = global_iov_array[sorted[current_index]].iov_len; displs_per_process[n][disp_index[n] - 1] = (OPAL_PTRDIFF_TYPE) global_iov_array[sorted[current_index]].iov_base; blocklen_per_process[n] = (int *) realloc ((void *)blocklen_per_process[n], (disp_index[n]+1)*sizeof(int)); displs_per_process[n] = (MPI_Aint *)realloc ((void *)displs_per_process[n], (disp_index[n]+1)*sizeof(MPI_Aint)); blocklen_per_process[n][disp_index[n]] = 0; displs_per_process[n][disp_index[n]] = 0; disp_index[n] += 1; } if (fh->f_procs_in_group[n] == fh->f_rank) { bytes_received += global_iov_array[sorted[current_index]].iov_len; } bytes_to_read_in_cycle -= global_iov_array[sorted[current_index]].iov_len; current_index ++; continue; } } } /* end while (bytes_to_read_in_cycle) */ /************************************************************************* *** 7d. Calculate the displacement on where to put the data and allocate *** the recieve buffer (global_buf) *************************************************************************/ if (my_aggregator == fh->f_rank) { entries_per_aggregator=0; for (i=0;i<fh->f_procs_per_group; i++){ for (j=0;j<disp_index[i];j++){ if (blocklen_per_process[i][j] > 0) entries_per_aggregator++ ; } } if (entries_per_aggregator > 0){ file_offsets_for_agg = (mca_io_ompio_local_io_array *) malloc(entries_per_aggregator*sizeof(mca_io_ompio_local_io_array)); if (NULL == file_offsets_for_agg) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } sorted_file_offsets = (int *) malloc (entries_per_aggregator*sizeof(int)); if (NULL == sorted_file_offsets){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } /*Moving file offsets to an IO array!*/ temp_index = 0; global_count = 0; for (i=0;i<fh->f_procs_per_group; i++){ for(j=0;j<disp_index[i];j++){ if (blocklen_per_process[i][j] > 0){ file_offsets_for_agg[temp_index].length = blocklen_per_process[i][j]; global_count += blocklen_per_process[i][j]; file_offsets_for_agg[temp_index].process_id = i; file_offsets_for_agg[temp_index].offset = displs_per_process[i][j]; temp_index++; } } } } else{ continue; } /* Sort the displacements for each aggregator */ read_heap_sort (file_offsets_for_agg, entries_per_aggregator, sorted_file_offsets); memory_displacements = (MPI_Aint *) malloc (entries_per_aggregator * sizeof(MPI_Aint)); memory_displacements[sorted_file_offsets[0]] = 0; for (i=1; i<entries_per_aggregator; i++){ memory_displacements[sorted_file_offsets[i]] = memory_displacements[sorted_file_offsets[i-1]] + file_offsets_for_agg[sorted_file_offsets[i-1]].length; } /********************************************************** *** 7e. Create the io array, and pass it to fbtl *********************************************************/ fh->f_io_array = (mca_io_ompio_io_array_t *) malloc (entries_per_aggregator * sizeof (mca_io_ompio_io_array_t)); if (NULL == fh->f_io_array) { opal_output(1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } fh->f_num_of_io_entries = 0; fh->f_io_array[0].offset = (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset; fh->f_io_array[0].length = file_offsets_for_agg[sorted_file_offsets[0]].length; fh->f_io_array[0].memory_address = global_buf+memory_displacements[sorted_file_offsets[0]]; fh->f_num_of_io_entries++; for (i=1;i<entries_per_aggregator;i++){ if (file_offsets_for_agg[sorted_file_offsets[i-1]].offset + file_offsets_for_agg[sorted_file_offsets[i-1]].length == file_offsets_for_agg[sorted_file_offsets[i]].offset){ fh->f_io_array[fh->f_num_of_io_entries - 1].length += file_offsets_for_agg[sorted_file_offsets[i]].length; } else{ fh->f_io_array[fh->f_num_of_io_entries].offset = (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[i]].offset; fh->f_io_array[fh->f_num_of_io_entries].length = file_offsets_for_agg[sorted_file_offsets[i]].length; fh->f_io_array[fh->f_num_of_io_entries].memory_address = global_buf+memory_displacements[sorted_file_offsets[i]]; fh->f_num_of_io_entries++; } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_read_time = MPI_Wtime(); #endif if (fh->f_num_of_io_entries) { if ( 0 > fh->f_fbtl->fbtl_preadv (fh)) { opal_output (1, "READ FAILED\n"); ret = OMPI_ERROR; goto exit; } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_read_time = MPI_Wtime(); read_time += end_read_time - start_read_time; #endif /********************************************************** ******************** DONE READING ************************ *********************************************************/ temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int)); if (NULL == temp_disp_index) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } for (i=0; i<entries_per_aggregator; i++){ temp_index = file_offsets_for_agg[sorted_file_offsets[i]].process_id; displs_per_process[temp_index][temp_disp_index[temp_index]] = memory_displacements[sorted_file_offsets[i]]; if (temp_disp_index[temp_index] < disp_index[temp_index]){ temp_disp_index[temp_index] += 1; } else{ printf("temp_disp_index[%d]: %d is greater than disp_index[%d]: %d\n", temp_index, temp_disp_index[temp_index], temp_index, disp_index[temp_index]); } } if (NULL != temp_disp_index){ free(temp_disp_index); temp_disp_index = NULL; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rcomm_time = MPI_Wtime(); #endif for (i=0;i<fh->f_procs_per_group;i++){ send_req[i] = MPI_REQUEST_NULL; if ( 0 < disp_index[i] ) { ompi_datatype_create_hindexed(disp_index[i], blocklen_per_process[i], displs_per_process[i], MPI_BYTE, &sendtype[i]); ompi_datatype_commit(&sendtype[i]); ret = MCA_PML_CALL (isend(global_buf, 1, sendtype[i], fh->f_procs_in_group[i], 123, MCA_PML_BASE_SEND_STANDARD, fh->f_comm, &send_req[i])); if(OMPI_SUCCESS != ret){ goto exit; } } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif } /********************************************************** *** 7f. Scatter the Data from the readers *********************************************************/ if ( recvbuf_is_contiguous ) { receive_buf = &((char*)buf)[position]; } else if (bytes_received) { /* allocate a receive buffer and copy the data that needs to be received into it in case the data is non-contigous in memory */ receive_buf = malloc (bytes_received); if (NULL == receive_buf) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rcomm_time = MPI_Wtime(); #endif ret = MCA_PML_CALL(irecv(receive_buf, bytes_received, MPI_BYTE, my_aggregator, 123, fh->f_comm, &recv_req)); if (OMPI_SUCCESS != ret){ goto exit; } if (my_aggregator == fh->f_rank){ ret = ompi_request_wait_all (fh->f_procs_per_group, send_req, MPI_STATUS_IGNORE); if (OMPI_SUCCESS != ret){ goto exit; } } ret = ompi_request_wait (&recv_req, MPI_STATUS_IGNORE); if (OMPI_SUCCESS != ret){ goto exit; } position += bytes_received; /* If data is not contigous in memory, copy the data from the receive buffer into the buffer passed in */ if (!recvbuf_is_contiguous ) { OPAL_PTRDIFF_TYPE mem_address; size_t remaining = 0; size_t temp_position = 0; remaining = bytes_received; while (remaining) { mem_address = (OPAL_PTRDIFF_TYPE) (decoded_iov[iov_index].iov_base) + current_position; if (remaining >= (decoded_iov[iov_index].iov_len - current_position)) { memcpy ((IOVBASE_TYPE *) mem_address, receive_buf+temp_position, decoded_iov[iov_index].iov_len - current_position); remaining = remaining - (decoded_iov[iov_index].iov_len - current_position); temp_position = temp_position + (decoded_iov[iov_index].iov_len - current_position); iov_index = iov_index + 1; current_position = 0; } else { memcpy ((IOVBASE_TYPE *) mem_address, receive_buf+temp_position, remaining); current_position = current_position + remaining; remaining = 0; } } if (NULL != receive_buf) { free (receive_buf); receive_buf = NULL; } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif } /* end for (index=0; index < cycles; index ++) */ #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rexch = MPI_Wtime(); read_exch += end_rexch - start_rexch; nentry.time[0] = read_time; nentry.time[1] = rcomm_time; nentry.time[2] = read_exch; if (my_aggregator == fh->f_rank) nentry.aggregator = 1; else nentry.aggregator = 0; nentry.nprocs_for_coll = dynamic_gen2_num_io_procs; if (!fh->f_full_print_queue(READ_PRINT_QUEUE)){ fh->f_register_print_entry(READ_PRINT_QUEUE, nentry); } #endif exit: if (!recvbuf_is_contiguous) { if (NULL != receive_buf) { free (receive_buf); receive_buf = NULL; } } if (NULL != global_buf) { free (global_buf); global_buf = NULL; } if (NULL != sorted) { free (sorted); sorted = NULL; } if (NULL != global_iov_array) { free (global_iov_array); global_iov_array = NULL; } if (NULL != fview_count) { free (fview_count); fview_count = NULL; } if (NULL != decoded_iov) { free (decoded_iov); decoded_iov = NULL; } if (NULL != local_iov_array){ free(local_iov_array); local_iov_array=NULL; } if (NULL != displs) { free (displs); displs = NULL; } if (my_aggregator == fh->f_rank) { if (NULL != sorted_file_offsets){ free(sorted_file_offsets); sorted_file_offsets = NULL; } if (NULL != file_offsets_for_agg){ free(file_offsets_for_agg); file_offsets_for_agg = NULL; } if (NULL != memory_displacements){ free(memory_displacements); memory_displacements= NULL; } if (NULL != sendtype){ for (i = 0; i < fh->f_procs_per_group; i++) { if ( MPI_DATATYPE_NULL != sendtype[i] ) { ompi_datatype_destroy(&sendtype[i]); } } free(sendtype); sendtype=NULL; } if (NULL != disp_index){ free(disp_index); disp_index = NULL; } if ( NULL != blocklen_per_process){ for(l=0;l<fh->f_procs_per_group;l++){ if (NULL != blocklen_per_process[l]){ free(blocklen_per_process[l]); blocklen_per_process[l] = NULL; } } free(blocklen_per_process); blocklen_per_process = NULL; } if (NULL != displs_per_process){ for (l=0; i<fh->f_procs_per_group; l++){ if (NULL != displs_per_process[l]){ free(displs_per_process[l]); displs_per_process[l] = NULL; } } free(displs_per_process); displs_per_process = NULL; } if ( NULL != send_req ) { free ( send_req ); send_req = NULL; } } return ret; }
int mca_io_ompio_prepare_to_group(mca_io_ompio_file_t *fh, OMPI_MPI_OFFSET_TYPE **start_offsets_lens, OMPI_MPI_OFFSET_TYPE **end_offsets, // need it? OMPI_MPI_OFFSET_TYPE **aggr_bytes_per_group, OMPI_MPI_OFFSET_TYPE *bytes_per_group, int **decision_list, size_t bytes_per_proc, int *is_aggregator, int *ompio_grouping_flag) { OMPI_MPI_OFFSET_TYPE start_offset_len[3] = {0}; OMPI_MPI_OFFSET_TYPE *aggr_bytes_per_group_tmp = NULL; OMPI_MPI_OFFSET_TYPE *start_offsets_lens_tmp = NULL; OMPI_MPI_OFFSET_TYPE *end_offsets_tmp = NULL; int *decision_list_tmp = NULL; int i = 0; int j = 0; int k = 0; int merge_count = 0; int split_count = 0; //not req? int retain_as_is_count = 0; //not req? int ret=OMPI_SUCCESS; //Store start offset and length in an array //also add bytes per process if(NULL == fh->f_decoded_iov){ start_offset_len[0] = 0; start_offset_len[1] = 0; } else{ start_offset_len[0] = (OMPI_MPI_OFFSET_TYPE) fh->f_decoded_iov[0].iov_base; start_offset_len[1] = fh->f_decoded_iov[0].iov_len; } start_offset_len[2] = bytes_per_proc; start_offsets_lens_tmp = (OMPI_MPI_OFFSET_TYPE* )malloc (3 * fh->f_init_procs_per_group * sizeof(OMPI_MPI_OFFSET_TYPE)); if (NULL == start_offsets_lens_tmp) { opal_output (1, "OUT OF MEMORY\n"); return OMPI_ERR_OUT_OF_RESOURCE; } end_offsets_tmp = (OMPI_MPI_OFFSET_TYPE* )malloc (fh->f_init_procs_per_group * sizeof(OMPI_MPI_OFFSET_TYPE)); if (NULL == end_offsets_tmp) { opal_output (1, "OUT OF MEMORY\n"); free(start_offsets_lens_tmp); return OMPI_ERR_OUT_OF_RESOURCE; } //Gather start offsets across processes in a group on aggregator ret = fcoll_base_coll_allgather_array (start_offset_len, 3, OMPI_OFFSET_DATATYPE, start_offsets_lens_tmp, 3, OMPI_OFFSET_DATATYPE, 0, fh->f_init_procs_in_group, fh->f_init_procs_per_group, fh->f_comm); if ( OMPI_SUCCESS != ret ) { opal_output (1, "mca_io_ompio_prepare_to_grou[: error in fcoll_base_coll_allgather_array\n"); return ret; } for( k = 0 ; k < fh->f_init_procs_per_group; k++){ end_offsets_tmp[k] = start_offsets_lens_tmp[3*k] + start_offsets_lens_tmp[3*k+1]; } //Every process has the total bytes written in its group for(j = 0; j < fh->f_init_procs_per_group; j++){ *bytes_per_group = *bytes_per_group + start_offsets_lens_tmp[3*j+2]; } *start_offsets_lens = &start_offsets_lens_tmp[0]; *end_offsets = &end_offsets_tmp[0]; for( j = 0 ; j < fh->f_init_num_aggrs ; j++){ if(fh->f_rank == fh->f_init_aggr_list[j]) *is_aggregator = 1; } //Decide groups going in for a merge or a split //Merge only if the groups are consecutive if(*is_aggregator == 1){ aggr_bytes_per_group_tmp = (OMPI_MPI_OFFSET_TYPE*)malloc (fh->f_init_num_aggrs * sizeof(OMPI_MPI_OFFSET_TYPE)); if (NULL == aggr_bytes_per_group_tmp) { opal_output (1, "OUT OF MEMORY\n"); return OMPI_ERR_OUT_OF_RESOURCE; } decision_list_tmp = (int* )malloc (fh->f_init_num_aggrs * sizeof(int)); if (NULL == decision_list_tmp) { opal_output (1, "OUT OF MEMORY\n"); free(aggr_bytes_per_group_tmp); free(start_offsets_lens_tmp); free(end_offsets_tmp); return OMPI_ERR_OUT_OF_RESOURCE; } //Communicate bytes per group between all aggregators ret = fcoll_base_coll_allgather_array (bytes_per_group, 1, OMPI_OFFSET_DATATYPE, aggr_bytes_per_group_tmp, 1, OMPI_OFFSET_DATATYPE, 0, fh->f_init_aggr_list, fh->f_init_num_aggrs, fh->f_comm); if ( OMPI_SUCCESS != ret ) { opal_output (1, "mca_io_ompio_prepare_to_grou[: error in fcoll_base_coll_allgather_array 2\n"); return ret; } for( i = 0; i < fh->f_init_num_aggrs; i++){ if((size_t)(aggr_bytes_per_group_tmp[i])> (size_t)mca_io_ompio_bytes_per_agg){ decision_list_tmp[i] = OMPIO_SPLIT; split_count++; } else if((size_t)(aggr_bytes_per_group_tmp[i])< (size_t)mca_io_ompio_bytes_per_agg){ decision_list_tmp[i] = OMPIO_MERGE; merge_count++; } else{ decision_list_tmp[i] = OMPIO_RETAIN; retain_as_is_count++; } } *aggr_bytes_per_group = &aggr_bytes_per_group_tmp[0]; //Go through the decision list to see if non consecutive //processes intend to merge, if yes retain original grouping for( i = 0; i < fh->f_init_num_aggrs ; i++){ if(decision_list_tmp[i] == OMPIO_MERGE){ if( (i == 0) && (decision_list_tmp[i+1] != OMPIO_MERGE)){ //first group decision_list_tmp[i] = OMPIO_RETAIN; } else if( (i == fh->f_init_num_aggrs-1) && (decision_list_tmp[i-1] != OMPIO_MERGE)){ decision_list_tmp[i] = OMPIO_RETAIN; } else if(!((decision_list_tmp[i-1] == OMPIO_MERGE) || (decision_list_tmp[i+1] == OMPIO_MERGE))){ decision_list_tmp[i] = OMPIO_RETAIN; } } } //Set the flag as per the decision list for( i = 0 ; i < fh->f_init_num_aggrs; i++){ if((decision_list_tmp[i] == OMPIO_MERGE)&& (fh->f_rank == fh->f_init_aggr_list[i])) *ompio_grouping_flag = OMPIO_MERGE; if((decision_list_tmp[i] == OMPIO_SPLIT)&& (fh->f_rank == fh->f_init_aggr_list[i])) *ompio_grouping_flag = OMPIO_SPLIT; if((decision_list_tmp[i] == OMPIO_RETAIN)&& (fh->f_rank == fh->f_init_aggr_list[i])) *ompio_grouping_flag = OMPIO_RETAIN; } //print decision list of aggregators /*printf("RANK%d : Printing decsion list : \n",fh->f_rank); for( i = 0; i < fh->f_init_num_aggrs; i++){ if(decision_list_tmp[i] == OMPIO_MERGE) printf("MERGE,"); else if(decision_list_tmp[i] == OMPIO_SPLIT) printf("SPLIT, "); else if(decision_list_tmp[i] == OMPIO_RETAIN) printf("RETAIN, " ); } printf("\n\n"); */ *decision_list = &decision_list_tmp[0]; } //Communicate flag to all group members ret = fcoll_base_coll_bcast_array (ompio_grouping_flag, 1, MPI_INT, 0, fh->f_init_procs_in_group, fh->f_init_procs_per_group, fh->f_comm); return ret; }
int mca_io_ompio_merge_groups(mca_io_ompio_file_t *fh, int *merge_aggrs, int num_merge_aggrs) { int i = 0; int *sizes_old_group; int ret = OMPI_SUCCESS; int *displs; sizes_old_group = (int*)malloc(num_merge_aggrs * sizeof(int)); if (NULL == sizes_old_group) { opal_output (1, "OUT OF MEMORY\n"); return OMPI_ERR_OUT_OF_RESOURCE; } displs = (int*)malloc(num_merge_aggrs * sizeof(int)); if (NULL == displs) { opal_output (1, "OUT OF MEMORY\n"); free(sizes_old_group); return OMPI_ERR_OUT_OF_RESOURCE; } //merge_aggrs[0] is considered the new aggregator //New aggregator collects group sizes of the groups to be merged ret = fcoll_base_coll_allgather_array (&fh->f_init_procs_per_group, 1, MPI_INT, sizes_old_group, 1, MPI_INT, 0, merge_aggrs, num_merge_aggrs, fh->f_comm); if ( OMPI_SUCCESS != ret ) { free (displs); free (sizes_old_group); return ret; } fh->f_procs_per_group = 0; for( i = 0; i < num_merge_aggrs; i++){ fh->f_procs_per_group = fh->f_procs_per_group + sizes_old_group[i]; } displs[0] = 0; for(i = 1; i < num_merge_aggrs; i++){ displs[i] = displs[i-1] + sizes_old_group[i-1]; } fh->f_procs_in_group = (int*)malloc (fh->f_procs_per_group * sizeof(int)); if (NULL == fh->f_procs_in_group) { opal_output (1, "OUT OF MEMORY\n"); free(sizes_old_group); free(displs); return OMPI_ERR_OUT_OF_RESOURCE; } //New aggregator also collects the grouping distribution //This is the actual merge //use allgatherv array ret = fcoll_base_coll_allgatherv_array (fh->f_init_procs_in_group, fh->f_init_procs_per_group, MPI_INT, fh->f_procs_in_group, sizes_old_group, displs, MPI_INT, 0, merge_aggrs, num_merge_aggrs, fh->f_comm); free (displs); free (sizes_old_group); return ret; }
int mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh, void *buf, int count, struct ompi_datatype_t *datatype, ompi_status_public_t *status) { int ret = OMPI_SUCCESS, iov_size=0, *bytes_remaining=NULL; int i, j, l,cycles=0, local_cycles=0, *current_index=NULL; int index, *disp_index=NULL, *bytes_per_process=NULL, current_position=0; int **blocklen_per_process=NULL, *iovec_count_per_process=NULL; int *displs=NULL, *sorted=NULL ,entries_per_aggregator=0; int *sorted_file_offsets=NULL, temp_index=0, position=0, *temp_disp_index=NULL; MPI_Aint **displs_per_process=NULL, global_iov_count=0, global_count=0; MPI_Aint *memory_displacements=NULL; int bytes_to_read_in_cycle=0; size_t max_data=0, bytes_per_cycle=0; uint32_t iov_count=0, iov_index=0; struct iovec *decoded_iov=NULL, *iov=NULL; mca_fcoll_static_local_io_array *local_iov_array=NULL, *global_iov_array=NULL; mca_fcoll_static_local_io_array *file_offsets_for_agg=NULL; char *global_buf=NULL, *receive_buf=NULL; int blocklen[3] = {1, 1, 1}; int static_num_io_procs=1; OPAL_PTRDIFF_TYPE d[3], base; ompi_datatype_t *types[3]; ompi_datatype_t *io_array_type=MPI_DATATYPE_NULL; ompi_datatype_t **sendtype = NULL; MPI_Request *send_req=NULL, recv_req=NULL; int my_aggregator=-1; bool recvbuf_is_contiguous=false; size_t ftype_size; OPAL_PTRDIFF_TYPE ftype_extent, lb; #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0; double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0; double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0; mca_common_ompio_print_entry nentry; #endif #if DEBUG_ON MPI_Aint gc_in; #endif opal_datatype_type_size ( &datatype->super, &ftype_size ); opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent ); /************************************************************************** ** 1. In case the data is not contigous in memory, decode it into an iovec **************************************************************************/ if ( ( ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size) && opal_datatype_is_contiguous_memory_layout(&datatype->super,1) && 0 == lb ) { recvbuf_is_contiguous = true; } /* In case the data is not contigous in memory, decode it into an iovec */ if (!recvbuf_is_contiguous ) { fh->f_decode_datatype ( (struct mca_io_ompio_file_t *)fh, datatype, count, buf, &max_data, &decoded_iov, &iov_count); } else { max_data = count * datatype->super.size; } if ( MPI_STATUS_IGNORE != status ) { status->_ucount = max_data; } fh->f_get_num_aggregators ( &static_num_io_procs ); fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh, static_num_io_procs, max_data); my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index]; /* printf("max_data %ld\n", max_data); */ ret = fh->f_generate_current_file_view((struct mca_io_ompio_file_t *)fh, max_data, &iov, &iov_size); if (ret != OMPI_SUCCESS){ goto exit; } if ( iov_size > 0 ) { local_iov_array = (mca_fcoll_static_local_io_array *)malloc (iov_size * sizeof(mca_fcoll_static_local_io_array)); if ( NULL == local_iov_array){ ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } for (j=0; j < iov_size; j++){ local_iov_array[j].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t) iov[j].iov_base; local_iov_array[j].length = (size_t)iov[j].iov_len; local_iov_array[j].process_id = fh->f_rank; } } else { /* Allocate at least one element to correctly create the derived data type */ local_iov_array = (mca_fcoll_static_local_io_array *)malloc (sizeof(mca_fcoll_static_local_io_array)); if ( NULL == local_iov_array){ ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } local_iov_array[0].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t) 0; local_iov_array[0].length = (size_t) 0; local_iov_array[0].process_id = fh->f_rank; } d[0] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0]; d[1] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].length; d[2] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].process_id; base = d[0]; for (i=0 ; i<3 ; i++) { d[i] -= base; } /* io_array datatype for using in communication*/ types[0] = &ompi_mpi_long.dt; types[1] = &ompi_mpi_long.dt; types[2] = &ompi_mpi_int.dt; ompi_datatype_create_struct (3, blocklen, d, types, &io_array_type); ompi_datatype_commit (&io_array_type); /* #########################################################*/ fh->f_get_bytes_per_agg ( (int*) &bytes_per_cycle); local_cycles = ceil((double)max_data*fh->f_procs_per_group/bytes_per_cycle); #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rexch = MPI_Wtime(); #endif ret = fh->f_comm->c_coll.coll_allreduce (&local_cycles, &cycles, 1, MPI_INT, MPI_MAX, fh->f_comm, fh->f_comm->c_coll.coll_allreduce_module); if (OMPI_SUCCESS != ret){ goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif if (my_aggregator == fh->f_rank) { disp_index = (int *) malloc (fh->f_procs_per_group * sizeof(int)); if (NULL == disp_index) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } bytes_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int )); if (NULL == bytes_per_process){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } bytes_remaining = (int *) calloc (fh->f_procs_per_group, sizeof(int)); if (NULL == bytes_remaining){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } current_index = (int *) calloc (fh->f_procs_per_group, sizeof(int)); if (NULL == current_index){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } blocklen_per_process = (int **)calloc (fh->f_procs_per_group, sizeof (int*)); if (NULL == blocklen_per_process) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } displs_per_process = (MPI_Aint **)calloc (fh->f_procs_per_group, sizeof (MPI_Aint*)); if (NULL == displs_per_process) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } } iovec_count_per_process = (int *) calloc (fh->f_procs_per_group, sizeof(int)); if (NULL == iovec_count_per_process){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } displs = (int *) calloc (fh->f_procs_per_group, sizeof(int)); if (NULL == displs){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rexch = MPI_Wtime(); #endif ret = fcoll_base_coll_allgather_array (&iov_size, 1, MPI_INT, iovec_count_per_process, 1, MPI_INT, fh->f_aggregator_index, fh->f_procs_in_group, fh->f_procs_per_group, fh->f_comm); if( OMPI_SUCCESS != ret){ goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif if (my_aggregator == fh->f_rank) { displs[0] = 0; global_iov_count = iovec_count_per_process[0]; for (i=1 ; i<fh->f_procs_per_group ; i++) { global_iov_count += iovec_count_per_process[i]; displs[i] = displs[i-1] + iovec_count_per_process[i-1]; } } if ( (my_aggregator == fh->f_rank) && (global_iov_count > 0 )) { global_iov_array = (mca_fcoll_static_local_io_array *) malloc (global_iov_count * sizeof(mca_fcoll_static_local_io_array)); if (NULL == global_iov_array){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rexch = MPI_Wtime(); #endif ret = fcoll_base_coll_gatherv_array (local_iov_array, iov_size, io_array_type, global_iov_array, iovec_count_per_process, displs, io_array_type, fh->f_aggregator_index, fh->f_procs_in_group, fh->f_procs_per_group, fh->f_comm); if (OMPI_SUCCESS != ret){ fprintf(stderr,"global_iov_array gather error!\n"); goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif if (NULL != local_iov_array){ free(local_iov_array); local_iov_array = NULL; } if ( ( my_aggregator == fh->f_rank) && ( global_iov_count > 0 )) { sorted = (int *)malloc (global_iov_count * sizeof(int)); if (NULL == sorted) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } read_local_heap_sort (global_iov_array, global_iov_count, sorted); send_req = (MPI_Request *) malloc (fh->f_procs_per_group * sizeof(MPI_Request)); if (NULL == send_req){ opal_output ( 1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } sendtype = (ompi_datatype_t **) malloc (fh->f_procs_per_group * sizeof(ompi_datatype_t *)); if (NULL == sendtype) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } for ( i=0; i<fh->f_procs_per_group; i++ ) { sendtype[i] = MPI_DATATYPE_NULL; } if (NULL == bytes_per_process){ bytes_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int)); if (NULL == bytes_per_process){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } } } #if DEBUG_ON if (my_aggregator == fh->f_rank) { for (gc_in=0; gc_in<global_iov_count; gc_in++){ printf("%d: Offset[%ld]: %lld, Length[%ld]: %ld\n", global_iov_array[sorted[gc_in]].process_id, gc_in, global_iov_array[sorted[gc_in]].offset, gc_in, global_iov_array[sorted[gc_in]].length); } } #endif #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rexch = MPI_Wtime(); #endif for (index = 0; index < cycles; index++){ if (my_aggregator == fh->f_rank) { fh->f_num_of_io_entries = 0; if (NULL != fh->f_io_array) { free (fh->f_io_array); fh->f_io_array = NULL; } if (NULL != global_buf) { free (global_buf); global_buf = NULL; } if (NULL != sorted_file_offsets){ free(sorted_file_offsets); sorted_file_offsets = NULL; } if (NULL != file_offsets_for_agg){ free(file_offsets_for_agg); file_offsets_for_agg = NULL; } if (NULL != memory_displacements){ free(memory_displacements); memory_displacements= NULL; } if ( NULL != sendtype ) { for ( i=0; i<fh->f_procs_per_group; i++ ) { if ( MPI_DATATYPE_NULL != sendtype[i] ) { ompi_datatype_destroy (&sendtype[i] ); sendtype[i] = MPI_DATATYPE_NULL; } } } for(l=0;l<fh->f_procs_per_group;l++){ disp_index[l] = 1; if (NULL != blocklen_per_process[l]){ free(blocklen_per_process[l]); blocklen_per_process[l] = NULL; } if (NULL != displs_per_process[l]){ free(displs_per_process[l]); displs_per_process[l] = NULL; } blocklen_per_process[l] = (int *) calloc (1, sizeof(int)); if (NULL == blocklen_per_process[l]) { opal_output (1, "OUT OF MEMORY for blocklen\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } displs_per_process[l] = (MPI_Aint *) calloc (1, sizeof(MPI_Aint)); if (NULL == displs_per_process[l]){ opal_output (1, "OUT OF MEMORY for displs\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } } } if (index < local_cycles ) { if ((index == local_cycles-1) && (max_data % (bytes_per_cycle/fh->f_procs_per_group))) { bytes_to_read_in_cycle = max_data - position; } else if (max_data <= bytes_per_cycle/fh->f_procs_per_group) { bytes_to_read_in_cycle = max_data; } else { bytes_to_read_in_cycle = bytes_per_cycle/fh->f_procs_per_group; } } else { bytes_to_read_in_cycle = 0; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rexch = MPI_Wtime(); #endif fcoll_base_coll_gather_array (&bytes_to_read_in_cycle, 1, MPI_INT, bytes_per_process, 1, MPI_INT, fh->f_aggregator_index, fh->f_procs_in_group, fh->f_procs_per_group, fh->f_comm); #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif if (recvbuf_is_contiguous ) { receive_buf = &((char*)buf)[position]; } else if (bytes_to_read_in_cycle) { receive_buf = (char *) malloc (bytes_to_read_in_cycle * sizeof(char)); if ( NULL == receive_buf){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rcomm_time = MPI_Wtime(); #endif ret = MCA_PML_CALL(irecv(receive_buf, bytes_to_read_in_cycle, MPI_BYTE, my_aggregator, 123, fh->f_comm, &recv_req)); if (OMPI_SUCCESS != ret){ goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif if (my_aggregator == fh->f_rank) { for (i=0;i<fh->f_procs_per_group; i++){ while (bytes_per_process[i] > 0){ /*printf("%d: bytes_per_process[%d]: %d, bytes_remaining[%d]: %d\n", index, i, bytes_per_process[i], i, bytes_remaining[i]);*/ if (read_get_process_id(global_iov_array[sorted[current_index[i]]].process_id, fh) == i){ /* current id owns this entry!*/ if (bytes_remaining[i]){ /*Remaining bytes in the current entry of the global offset array*/ if (bytes_remaining[i] <= bytes_per_process[i]){ blocklen_per_process[i][disp_index[i] - 1] = bytes_remaining[i]; displs_per_process[i][disp_index[i] - 1] = global_iov_array[sorted[current_index[i]]].offset + (global_iov_array[sorted[current_index[i]]].length - bytes_remaining[i]); blocklen_per_process[i] = (int *) realloc ((void *)blocklen_per_process[i], (disp_index[i]+1)*sizeof(int)); displs_per_process[i] = (MPI_Aint *)realloc ((void *)displs_per_process[i], (disp_index[i]+1)*sizeof(MPI_Aint)); bytes_per_process[i] -= bytes_remaining[i]; blocklen_per_process[i][disp_index[i]] = 0; displs_per_process[i][disp_index[i]] = 0; disp_index[i] += 1; bytes_remaining[i] = 0; /* This entry has been used up, we need to move to the next entry of this process and make current_index point there*/ current_index[i] = read_find_next_index(i, current_index[i], fh, global_iov_array, global_iov_count, sorted); if (current_index[i] == -1){ break; } continue; } else{ blocklen_per_process[i][disp_index[i] - 1] = bytes_per_process[i]; displs_per_process[i][disp_index[i] - 1] = global_iov_array[sorted[current_index[i]]].offset + (global_iov_array[sorted[current_index[i]]].length - bytes_remaining[i]); bytes_remaining[i] -= bytes_per_process[i]; bytes_per_process[i] = 0; break; } } else{ if (bytes_per_process[i] < global_iov_array[sorted[current_index[i]]].length){ blocklen_per_process[i][disp_index[i] - 1] = bytes_per_process[i]; displs_per_process[i][disp_index[i] - 1] = global_iov_array[sorted[current_index[i]]].offset; bytes_remaining[i] = global_iov_array[sorted[current_index[i]]].length - bytes_per_process[i]; bytes_per_process[i] = 0; break; } else { blocklen_per_process[i][disp_index[i] - 1] = global_iov_array[sorted[current_index[i]]].length; displs_per_process[i][disp_index[i] - 1] = global_iov_array[sorted[current_index[i]]].offset; blocklen_per_process[i] = (int *) realloc ((void *)blocklen_per_process[i], (disp_index[i]+1)*sizeof(int)); displs_per_process[i] = (MPI_Aint *)realloc ((void *)displs_per_process[i], (disp_index[i]+1)*sizeof(MPI_Aint)); blocklen_per_process[i][disp_index[i]] = 0; displs_per_process[i][disp_index[i]] = 0; disp_index[i] += 1; bytes_per_process[i] -= global_iov_array[sorted[current_index[i]]].length; current_index[i] = read_find_next_index(i, current_index[i], fh, global_iov_array, global_iov_count, sorted); if (current_index[i] == -1){ break; } } } } else{ current_index[i] = read_find_next_index(i, current_index[i], fh, global_iov_array, global_iov_count, sorted); if (current_index[i] == -1){ bytes_per_process[i] = 0; /* no more entries left to service this request*/ continue; } } } } entries_per_aggregator=0; for (i=0;i<fh->f_procs_per_group;i++){ for (j=0;j<disp_index[i];j++){ if (blocklen_per_process[i][j] > 0){ entries_per_aggregator++; #if DEBUG_ON printf("%d sends blocklen[%d]: %d, disp[%d]: %ld to %d\n", fh->f_procs_in_group[i],j, blocklen_per_process[i][j],j, displs_per_process[i][j], fh->f_rank); #endif } } } if (entries_per_aggregator > 0){ file_offsets_for_agg = (mca_fcoll_static_local_io_array *) malloc(entries_per_aggregator*sizeof(mca_fcoll_static_local_io_array)); if (NULL == file_offsets_for_agg) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } sorted_file_offsets = (int *) malloc (entries_per_aggregator * sizeof(int)); if (NULL == sorted_file_offsets){ opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } temp_index=0; global_count = 0; for (i=0;i<fh->f_procs_per_group; i++){ for(j=0;j<disp_index[i]; j++){ if (blocklen_per_process[i][j] > 0){ file_offsets_for_agg[temp_index].length = blocklen_per_process[i][j]; global_count += blocklen_per_process[i][j]; file_offsets_for_agg[temp_index].process_id = i; file_offsets_for_agg[temp_index].offset = displs_per_process[i][j]; temp_index++; } } } } else{ continue; } read_local_heap_sort (file_offsets_for_agg, entries_per_aggregator, sorted_file_offsets); memory_displacements = (MPI_Aint *) malloc (entries_per_aggregator * sizeof(MPI_Aint)); memory_displacements[sorted_file_offsets[0]] = 0; for (i=1; i<entries_per_aggregator; i++){ memory_displacements[sorted_file_offsets[i]] = memory_displacements[sorted_file_offsets[i-1]] + file_offsets_for_agg[sorted_file_offsets[i-1]].length; } global_buf = (char *) malloc (global_count * sizeof(char)); if (NULL == global_buf){ opal_output(1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } #if DEBUG_ON printf("************Cycle: %d, Aggregator: %d ***************\n", index+1,fh->f_rank); for (i=0; i<entries_per_aggregator;i++){ printf("%d: OFFSET: %lld LENGTH: %ld, Mem-offset: %ld, disp_index :%d\n", file_offsets_for_agg[sorted_file_offsets[i]].process_id, file_offsets_for_agg[sorted_file_offsets[i]].offset, file_offsets_for_agg[sorted_file_offsets[i]].length, memory_displacements[sorted_file_offsets[i]], disp_index[i]); } #endif fh->f_io_array = (mca_io_ompio_io_array_t *) malloc (entries_per_aggregator * sizeof (mca_io_ompio_io_array_t)); if (NULL == fh->f_io_array) { opal_output(1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } fh->f_num_of_io_entries = 0; fh->f_io_array[0].offset = (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset; fh->f_io_array[0].length = file_offsets_for_agg[sorted_file_offsets[0]].length; fh->f_io_array[0].memory_address = global_buf+memory_displacements[sorted_file_offsets[0]]; fh->f_num_of_io_entries++; for (i=1;i<entries_per_aggregator;i++){ if (file_offsets_for_agg[sorted_file_offsets[i-1]].offset + file_offsets_for_agg[sorted_file_offsets[i-1]].length == file_offsets_for_agg[sorted_file_offsets[i]].offset){ fh->f_io_array[fh->f_num_of_io_entries - 1].length += file_offsets_for_agg[sorted_file_offsets[i]].length; } else{ fh->f_io_array[fh->f_num_of_io_entries].offset = (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[i]].offset; fh->f_io_array[fh->f_num_of_io_entries].length = file_offsets_for_agg[sorted_file_offsets[i]].length; fh->f_io_array[fh->f_num_of_io_entries].memory_address = global_buf+memory_displacements[sorted_file_offsets[i]]; fh->f_num_of_io_entries++; } } #if DEBUG_ON printf("*************************** %d\n", fh->f_num_of_io_entries); for (i=0 ; i<fh->f_num_of_io_entries ; i++) { printf(" ADDRESS: %p OFFSET: %ld LENGTH: %ld\n", fh->f_io_array[i].memory_address, (OPAL_PTRDIFF_TYPE)fh->f_io_array[i].offset, fh->f_io_array[i].length); } #endif #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_read_time = MPI_Wtime(); #endif if (fh->f_num_of_io_entries) { if ( 0 > fh->f_fbtl->fbtl_preadv (fh)) { opal_output (1, "READ FAILED\n"); ret = OMPI_ERROR; goto exit; } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_read_time = MPI_Wtime(); read_time += end_read_time - start_read_time; #endif #if DEBUG_ON printf("************Cycle: %d, Aggregator: %d ***************\n", index+1,fh->f_rank); if (my_aggregator == fh->f_rank){ for (i=0 ; i<global_count/4 ; i++) printf (" READ %d \n",((int *)global_buf)[i]); } #endif temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int)); if (NULL == temp_disp_index) { opal_output (1, "OUT OF MEMORY\n"); ret = OMPI_ERR_OUT_OF_RESOURCE; goto exit; } for (i=0; i<entries_per_aggregator; i++){ temp_index = file_offsets_for_agg[sorted_file_offsets[i]].process_id; displs_per_process[temp_index][temp_disp_index[temp_index]] = memory_displacements[sorted_file_offsets[i]]; if (temp_disp_index[temp_index] < disp_index[temp_index]){ temp_disp_index[temp_index] += 1; } else{ printf("temp_disp_index[%d]: %d is greater than disp_index[%d]: %d\n", temp_index, temp_disp_index[temp_index], temp_index, disp_index[temp_index]); } } if (NULL != temp_disp_index){ free(temp_disp_index); temp_disp_index = NULL; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN start_rcomm_time = MPI_Wtime(); #endif for (i=0;i<fh->f_procs_per_group; i++){ send_req[i] = MPI_REQUEST_NULL; ompi_datatype_create_hindexed(disp_index[i], blocklen_per_process[i], displs_per_process[i], MPI_BYTE, &sendtype[i]); ompi_datatype_commit(&sendtype[i]); ret = MCA_PML_CALL (isend(global_buf, 1, sendtype[i], fh->f_procs_in_group[i], 123, MCA_PML_BASE_SEND_STANDARD, fh->f_comm, &send_req[i])); if(OMPI_SUCCESS != ret){ goto exit; } } ret = ompi_request_wait_all (fh->f_procs_per_group, send_req, MPI_STATUS_IGNORE); if (OMPI_SUCCESS != ret){ goto exit; } } /* if ( my_aggregator == fh->f_rank ) */ ret = ompi_request_wait (&recv_req, MPI_STATUS_IGNORE); if (OMPI_SUCCESS != ret){ goto exit; } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rcomm_time = MPI_Wtime(); rcomm_time += end_rcomm_time - start_rcomm_time; #endif position += bytes_to_read_in_cycle; if (!recvbuf_is_contiguous) { OPAL_PTRDIFF_TYPE mem_address; size_t remaining = 0; size_t temp_position = 0; remaining = bytes_to_read_in_cycle; while (remaining && (iov_count > iov_index)){ mem_address = (OPAL_PTRDIFF_TYPE) (decoded_iov[iov_index].iov_base) + current_position; if (remaining >= (decoded_iov[iov_index].iov_len - current_position)) { memcpy ((IOVBASE_TYPE *) mem_address, receive_buf+temp_position, decoded_iov[iov_index].iov_len - current_position); remaining = remaining - (decoded_iov[iov_index].iov_len - current_position); temp_position = temp_position + (decoded_iov[iov_index].iov_len - current_position); iov_index = iov_index + 1; current_position = 0; } else{ memcpy ((IOVBASE_TYPE *) mem_address, receive_buf+temp_position, remaining); current_position = current_position + remaining; remaining = 0; } } if (NULL != receive_buf) { free (receive_buf); receive_buf = NULL; } } } #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN end_rexch = MPI_Wtime(); read_exch += end_rexch - start_rexch; nentry.time[0] = read_time; nentry.time[1] = rcomm_time; nentry.time[2] = read_exch; if (my_aggregator == fh->f_rank) nentry.aggregator = 1; else nentry.aggregator = 0; nentry.nprocs_for_coll = static_num_io_procs; if (!mca_common_ompio_full_print_queue(fh->f_coll_read_time)){ mca_common_ompio_register_print_entry(fh->f_coll_read_time, nentry); } #endif exit: if (NULL != decoded_iov){ free(decoded_iov); decoded_iov = NULL; } if (NULL != displs){ free(displs); displs = NULL; } if (NULL != iovec_count_per_process){ free(iovec_count_per_process); iovec_count_per_process=NULL; } if (NULL != local_iov_array){ free(local_iov_array); local_iov_array=NULL; } if (NULL != global_iov_array){ free(global_iov_array); global_iov_array=NULL; } if (my_aggregator == fh->f_rank) { for(l=0;l<fh->f_procs_per_group;l++){ if (blocklen_per_process) { free(blocklen_per_process[l]); } if (NULL != displs_per_process[l]){ free(displs_per_process[l]); displs_per_process[l] = NULL; } } } if (NULL != bytes_per_process){ free(bytes_per_process); bytes_per_process =NULL; } if (NULL != disp_index){ free(disp_index); disp_index =NULL; } if (NULL != displs_per_process){ free(displs_per_process); displs_per_process = NULL; } if(NULL != bytes_remaining){ free(bytes_remaining); bytes_remaining = NULL; } if(NULL != current_index){ free(current_index); current_index = NULL; } if (NULL != blocklen_per_process){ free(blocklen_per_process); blocklen_per_process =NULL; } if (NULL != bytes_remaining){ free(bytes_remaining); bytes_remaining =NULL; } if (NULL != memory_displacements){ free(memory_displacements); memory_displacements= NULL; } if (NULL != file_offsets_for_agg){ free(file_offsets_for_agg); file_offsets_for_agg = NULL; } if (NULL != sorted_file_offsets){ free(sorted_file_offsets); sorted_file_offsets = NULL; } if (NULL != sendtype){ free(sendtype); sendtype=NULL; } if ( !recvbuf_is_contiguous ) { if (NULL != receive_buf){ free(receive_buf); receive_buf=NULL; } } if (NULL != global_buf) { free(global_buf); global_buf = NULL; } if (NULL != sorted) { free(sorted); sorted = NULL; } if (NULL != send_req){ free(send_req); send_req = NULL; } return ret; }