예제 #1
0
/*
 * alltoall_intra_linear_sync
 *
 * Function:       Linear implementation of alltoall with limited number
 *                 of outstanding requests.
 * Accepts:        Same as MPI_Alltoall(), and the maximum number of
 *                 outstanding requests (actual number is 2 * max, since
 *                 we count receive and send requests separately).
 * Returns:        MPI_SUCCESS or error code
 *
 * Description:    Algorithm is the following:
 *                 1) post K irecvs, K <= N
 *                 2) post K isends, K <= N
 *                 3) while not done
 *                    - wait for any request to complete
 *                    - replace that request by the new one of the same type.
 */
int ompi_coll_base_alltoall_intra_linear_sync(const void *sbuf, int scount,
                                               struct ompi_datatype_t *sdtype,
                                               void* rbuf, int rcount,
                                               struct ompi_datatype_t *rdtype,
                                               struct ompi_communicator_t *comm,
                                               mca_coll_base_module_t *module,
                                               int max_outstanding_reqs)
{
    int line, error, ri, si, rank, size, nreqs, nrreqs, nsreqs, total_reqs;
    char *psnd, *prcv;
    ptrdiff_t slb, sext, rlb, rext;

    ompi_request_t **reqs = NULL;

    if (MPI_IN_PLACE == sbuf) {
        return mca_coll_base_alltoall_intra_basic_inplace (rbuf, rcount, rdtype,
                                                            comm, module);
    }

    /* Initialize. */

    size = ompi_comm_size(comm);
    rank = ompi_comm_rank(comm);

    OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
                 "ompi_coll_base_alltoall_intra_linear_sync rank %d", rank));

    error = ompi_datatype_get_extent(sdtype, &slb, &sext);
    if (OMPI_SUCCESS != error) {
        return error;
    }
    sext *= scount;

    error = ompi_datatype_get_extent(rdtype, &rlb, &rext);
    if (OMPI_SUCCESS != error) {
        return error;
    }
    rext *= rcount;

    /* simple optimization */

    psnd = ((char *) sbuf) + (ptrdiff_t)rank * sext;
    prcv = ((char *) rbuf) + (ptrdiff_t)rank * rext;

    error = ompi_datatype_sndrcv(psnd, scount, sdtype, prcv, rcount, rdtype);
    if (MPI_SUCCESS != error) {
        return error;
    }

    /* If only one process, we're done. */

    if (1 == size) {
        return MPI_SUCCESS;
    }

    /* Initiate send/recv to/from others. */
    total_reqs =  (((max_outstanding_reqs > (size - 1)) ||
                    (max_outstanding_reqs <= 0)) ?
                   (size - 1) : (max_outstanding_reqs));
    reqs = coll_base_comm_get_reqs(module->base_data, 2 * total_reqs);
    if (NULL == reqs) { error = -1; line = __LINE__; goto error_hndl; }

    prcv = (char *) rbuf;
    psnd = (char *) sbuf;

    /* Post first batch or ireceive and isend requests  */
    for (nreqs = 0, nrreqs = 0, ri = (rank + 1) % size; nreqs < total_reqs;
         ri = (ri + 1) % size, ++nreqs, ++nrreqs) {
        error =
            MCA_PML_CALL(irecv
                         (prcv + (ptrdiff_t)ri * rext, rcount, rdtype, ri,
                          MCA_COLL_BASE_TAG_ALLTOALL, comm, &reqs[nreqs]));
        if (MPI_SUCCESS != error) { line = __LINE__; goto error_hndl; }
    }
    for ( nsreqs = 0, si =  (rank + size - 1) % size; nreqs < 2 * total_reqs;
          si = (si + size - 1) % size, ++nreqs, ++nsreqs) {
        error =
            MCA_PML_CALL(isend
                         (psnd + (ptrdiff_t)si * sext, scount, sdtype, si,
                          MCA_COLL_BASE_TAG_ALLTOALL,
                          MCA_PML_BASE_SEND_STANDARD, comm, &reqs[nreqs]));
        if (MPI_SUCCESS != error) { line = __LINE__; goto error_hndl; }
    }

    /* Wait for requests to complete */
    if (nreqs == 2 * (size - 1)) {
        /* Optimization for the case when all requests have been posted  */
        error = ompi_request_wait_all(nreqs, reqs, MPI_STATUSES_IGNORE);
        if (MPI_SUCCESS != error) { line = __LINE__; goto error_hndl; }

    } else {
        /* As requests complete, replace them with corresponding requests:
           - wait for any request to complete, mark the request as
           MPI_REQUEST_NULL
           - If it was a receive request, replace it with new irecv request
           (if any)
           - if it was a send request, replace it with new isend request (if any)
        */
        int ncreqs = 0;
        while (ncreqs < 2 * (size - 1)) {
            int completed;
            error = ompi_request_wait_any(2 * total_reqs, reqs, &completed,
                                          MPI_STATUS_IGNORE);
            if (MPI_SUCCESS != error) { line = __LINE__; goto error_hndl; }
            reqs[completed] = MPI_REQUEST_NULL;
            ncreqs++;
            if (completed < total_reqs) {
                if (nrreqs < (size - 1)) {
                    error =
                        MCA_PML_CALL(irecv
                                     (prcv + (ptrdiff_t)ri * rext, rcount, rdtype, ri,
                                      MCA_COLL_BASE_TAG_ALLTOALL, comm,
                                      &reqs[completed]));
                    if (MPI_SUCCESS != error) { line = __LINE__; goto error_hndl; }
                    ++nrreqs;
                    ri = (ri + 1) % size;
                }
            } else {
                if (nsreqs < (size - 1)) {
                    error = MCA_PML_CALL(isend
                                         (psnd + (ptrdiff_t)si * sext, scount, sdtype, si,
                                          MCA_COLL_BASE_TAG_ALLTOALL,
                                          MCA_PML_BASE_SEND_STANDARD, comm,
                                          &reqs[completed]));
                    ++nsreqs;
                    si = (si + size - 1) % size;
                }
            }
        }
    }

    /* All done */
    return MPI_SUCCESS;

 error_hndl:
    OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
                 "%s:%4d\tError occurred %d, rank %2d", __FILE__, line, error,
                 rank));
    ompi_coll_base_free_reqs(reqs, 2 * total_reqs);
    return error;
}
예제 #2
0
int
mca_fcoll_dynamic_file_write_all (ompio_file_t *fh,
                                  const void *buf,
                                  int count,
                                  struct ompi_datatype_t *datatype,
                                  ompi_status_public_t *status)
{
    MPI_Aint total_bytes_written = 0;  /* total bytes that have been written*/
    MPI_Aint total_bytes = 0;          /* total bytes to be written */
    MPI_Aint bytes_to_write_in_cycle = 0; /* left to be written in a cycle*/
    MPI_Aint bytes_per_cycle = 0;      /* total written in each cycle by each process*/
    int index = 0;
    int cycles = 0;
    int i=0, j=0, l=0;
    int n=0; /* current position in total_bytes_per_process array */
    MPI_Aint bytes_remaining = 0; /* how many bytes have been written from the current
                                     value from total_bytes_per_process */
    int bytes_sent = 0, ret =0;
    int blocks=0, entries_per_aggregator=0;

    /* iovec structure and count of the buffer passed in */
    uint32_t iov_count = 0;
    struct iovec *decoded_iov = NULL;
    int iov_index = 0;
    char *send_buf = NULL;
    size_t current_position = 0;
    struct iovec *local_iov_array=NULL, *global_iov_array=NULL;
    mca_io_ompio_local_io_array *file_offsets_for_agg=NULL;
    /* global iovec at the writers that contain the iovecs created from
       file_set_view */
    uint32_t total_fview_count = 0;
    int local_count = 0, temp_pindex;
    int *fview_count = NULL, *disp_index=NULL, *temp_disp_index=NULL;
    int current_index = 0, temp_index=0;

    char *global_buf = NULL;
    MPI_Aint global_count = 0;


    /* array that contains the sorted indices of the global_iov */
    int *sorted = NULL, *sorted_file_offsets=NULL;
    int *displs = NULL;
    int dynamic_num_io_procs;
    size_t max_data = 0, datatype_size = 0;
    int **blocklen_per_process=NULL;
    MPI_Aint **displs_per_process=NULL, *memory_displacements=NULL;
    ompi_datatype_t **recvtype = NULL;
    MPI_Aint *total_bytes_per_process = NULL;
    MPI_Request send_req=NULL, *recv_req=NULL;
    int my_aggregator=-1;
    bool sendbuf_is_contiguous = false;
    size_t ftype_size;
    ptrdiff_t ftype_extent, lb;


#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    double write_time = 0.0, start_write_time = 0.0, end_write_time = 0.0;
    double comm_time = 0.0, start_comm_time = 0.0, end_comm_time = 0.0;
    double exch_write = 0.0, start_exch = 0.0, end_exch = 0.0;
    mca_common_ompio_print_entry nentry;
#endif

    opal_datatype_type_size ( &datatype->super, &ftype_size );
    opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent );

    /**************************************************************************
     ** 1.  In case the data is not contigous in memory, decode it into an iovec
     **************************************************************************/
    if ( ( ftype_extent == (ptrdiff_t) ftype_size)             &&
         opal_datatype_is_contiguous_memory_layout(&datatype->super,1) &&
         0 == lb ) {
        sendbuf_is_contiguous = true;
    }



    if (! sendbuf_is_contiguous ) {
        ret =   mca_common_ompio_decode_datatype ((struct ompio_file_t *) fh,
                                                  datatype,
                                                  count,
                                                  buf,
                                                  &max_data,
                                                  &decoded_iov,
                                                  &iov_count);
        if (OMPI_SUCCESS != ret ){
            goto exit;
        }
    }
    else {
        max_data = count * datatype->super.size;
    }

    if ( MPI_STATUS_IGNORE != status ) {
	status->_ucount = max_data;
    }

    dynamic_num_io_procs = fh->f_get_mca_parameter_value ( "num_aggregators", strlen ("num_aggregators"));
    if ( OMPI_ERR_MAX == dynamic_num_io_procs ) {
        ret = OMPI_ERROR;
        goto exit;
    }
    ret = mca_common_ompio_set_aggregator_props ((struct ompio_file_t *) fh,
				                 dynamic_num_io_procs,
				                 max_data);

    if (OMPI_SUCCESS != ret){
	goto exit;
    }
    my_aggregator = fh->f_procs_in_group[0];
    /**************************************************************************
     ** 2. Determine the total amount of data to be written
     **************************************************************************/
    total_bytes_per_process = (MPI_Aint*)malloc
        (fh->f_procs_per_group*sizeof(MPI_Aint));
    if (NULL == total_bytes_per_process) {
        opal_output (1, "OUT OF MEMORY\n");
        ret = OMPI_ERR_OUT_OF_RESOURCE;
	goto exit;
    }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    start_comm_time = MPI_Wtime();
#endif
    ret = ompi_fcoll_base_coll_allgather_array (&max_data,
                                           1,
                                           MPI_LONG,
                                           total_bytes_per_process,
                                           1,
                                           MPI_LONG,
                                           0,
                                           fh->f_procs_in_group,
                                           fh->f_procs_per_group,
                                           fh->f_comm);
    
    if( OMPI_SUCCESS != ret){
	goto exit;
    }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    end_comm_time = MPI_Wtime();
    comm_time += (end_comm_time - start_comm_time);
#endif

    for (i=0 ; i<fh->f_procs_per_group ; i++) {
        total_bytes += total_bytes_per_process[i];
    }

    if (NULL != total_bytes_per_process) {
        free (total_bytes_per_process);
        total_bytes_per_process = NULL;
    }

    /*********************************************************************
     *** 3. Generate the local offsets/lengths array corresponding to
     ***    this write operation
     ********************************************************************/
    ret = fh->f_generate_current_file_view( (struct ompio_file_t *) fh,
					    max_data,
					    &local_iov_array,
					    &local_count);
    if (ret != OMPI_SUCCESS){
	goto exit;
    }

#if DEBUG_ON
    for (i=0 ; i<local_count ; i++) {

        printf("%d: OFFSET: %d   LENGTH: %ld\n",
               fh->f_rank,
               local_iov_array[i].iov_base,
               local_iov_array[i].iov_len);

    }
#endif

    /*************************************************************
     *** 4. Allgather the offset/lengths array from all processes
     *************************************************************/
    fview_count = (int *) malloc (fh->f_procs_per_group * sizeof (int));
    if (NULL == fview_count) {
        opal_output (1, "OUT OF MEMORY\n");
        ret = OMPI_ERR_OUT_OF_RESOURCE;
	goto exit;
    }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    start_comm_time = MPI_Wtime();
#endif
    ret = ompi_fcoll_base_coll_allgather_array (&local_count,
                                           1,
                                           MPI_INT,
                                           fview_count,
                                           1,
                                           MPI_INT,
                                           0,
                                           fh->f_procs_in_group,
                                           fh->f_procs_per_group,
                                           fh->f_comm);
    
    if( OMPI_SUCCESS != ret){
	goto exit;
    }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    end_comm_time = MPI_Wtime();
    comm_time += (end_comm_time - start_comm_time);
#endif

    displs = (int*) malloc (fh->f_procs_per_group * sizeof (int));
    if (NULL == displs) {
        opal_output (1, "OUT OF MEMORY\n");
        ret = OMPI_ERR_OUT_OF_RESOURCE;
	goto exit;
    }

    displs[0] = 0;
    total_fview_count = fview_count[0];
    for (i=1 ; i<fh->f_procs_per_group ; i++) {
        total_fview_count += fview_count[i];
        displs[i] = displs[i-1] + fview_count[i-1];
    }

#if DEBUG_ON
    printf("total_fview_count : %d\n", total_fview_count);
    if (my_aggregator == fh->f_rank) {
        for (i=0 ; i<fh->f_procs_per_group ; i++) {
            printf ("%d: PROCESS: %d  ELEMENTS: %d  DISPLS: %d\n",
                    fh->f_rank,
                    i,
                    fview_count[i],
                    displs[i]);
        }
    }
#endif

    /* allocate the global iovec  */

    if (0 != total_fview_count) {
        global_iov_array = (struct iovec*) malloc (total_fview_count *
                                                   sizeof(struct iovec));
        if (NULL == global_iov_array){
            opal_output(1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

    }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    start_comm_time = MPI_Wtime();
#endif
    ret = ompi_fcoll_base_coll_allgatherv_array (local_iov_array,
                                            local_count,
                                            fh->f_iov_type,
                                            global_iov_array,
                                            fview_count,
                                            displs,
                                            fh->f_iov_type,
                                            0,
                                            fh->f_procs_in_group,
                                            fh->f_procs_per_group,
                                            fh->f_comm);
    if (OMPI_SUCCESS != ret){
	goto exit;
    }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    end_comm_time = MPI_Wtime();
    comm_time += (end_comm_time - start_comm_time);
#endif

    /****************************************************************************************
    *** 5. Sort the global offset/lengths list based on the offsets.
    *** The result of the sort operation is the 'sorted', an integer array,
    *** which contains the indexes of the global_iov_array based on the offset.
    *** For example, if global_iov_array[x].offset is followed by global_iov_array[y].offset
    *** in the file, and that one is followed by global_iov_array[z].offset, than
    *** sorted[0] = x, sorted[1]=y and sorted[2]=z;
    ******************************************************************************************/
    if (0 != total_fview_count) {
        sorted = (int *)malloc (total_fview_count * sizeof(int));
        if (NULL == sorted) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
	    goto exit;
        }
	ompi_fcoll_base_sort_iovec (global_iov_array, total_fview_count, sorted);
    }

    if (NULL != local_iov_array){
	free(local_iov_array);
	local_iov_array = NULL;
    }

    if (NULL != displs){
	free(displs);
	displs=NULL;
    }


#if DEBUG_ON
    if (my_aggregator == fh->f_rank) {
        uint32_t tv=0;
        for (tv=0 ; tv<total_fview_count ; tv++) {
            printf("%d: OFFSET: %lld   LENGTH: %ld\n",
                   fh->f_rank,
                   global_iov_array[sorted[tv]].iov_base,
                   global_iov_array[sorted[tv]].iov_len);
        }
    }
#endif
    /*************************************************************
     *** 6. Determine the number of cycles required to execute this
     ***    operation
     *************************************************************/
    bytes_per_cycle = fh->f_bytes_per_agg;
    cycles = ceil((double)total_bytes/bytes_per_cycle);

    if (my_aggregator == fh->f_rank) {
        disp_index = (int *)malloc (fh->f_procs_per_group * sizeof (int));
        if (NULL == disp_index) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
	    goto exit;
        }

	blocklen_per_process = (int **)calloc (fh->f_procs_per_group, sizeof (int*));
        if (NULL == blocklen_per_process) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
	    goto exit;
        }

	displs_per_process = (MPI_Aint **)calloc (fh->f_procs_per_group, sizeof (MPI_Aint*));
	if (NULL == displs_per_process) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
	    goto exit;
        }

	recv_req = (MPI_Request *)malloc ((fh->f_procs_per_group)*sizeof(MPI_Request));
	if ( NULL == recv_req ) {
	    opal_output (1, "OUT OF MEMORY\n");
	    ret = OMPI_ERR_OUT_OF_RESOURCE;
	    goto exit;
	}

	global_buf  = (char *) malloc (bytes_per_cycle);
	if (NULL == global_buf){
	    opal_output(1, "OUT OF MEMORY");
	    ret = OMPI_ERR_OUT_OF_RESOURCE;
	    goto exit;
	}

	recvtype = (ompi_datatype_t **) malloc (fh->f_procs_per_group  * sizeof(ompi_datatype_t *));
	if (NULL == recvtype) {
	    opal_output (1, "OUT OF MEMORY\n");
	    ret = OMPI_ERR_OUT_OF_RESOURCE;
	    goto exit;
	}
	for(l=0;l<fh->f_procs_per_group;l++){
            recvtype[l] = MPI_DATATYPE_NULL;
	}
    }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    start_exch = MPI_Wtime();
#endif
    n = 0;
    bytes_remaining = 0;
    current_index = 0;

    for (index = 0; index < cycles; index++) {
        /**********************************************************************
         ***  7a. Getting ready for next cycle: initializing and freeing buffers
	 **********************************************************************/
        if (my_aggregator == fh->f_rank) {
            if (NULL != fh->f_io_array) {
                free (fh->f_io_array);
                fh->f_io_array = NULL;
            }
	    fh->f_num_of_io_entries = 0;

            if (NULL != recvtype){
                for (i =0; i< fh->f_procs_per_group; i++) {
                    if ( MPI_DATATYPE_NULL != recvtype[i] ) {
                        ompi_datatype_destroy(&recvtype[i]);
			recvtype[i] = MPI_DATATYPE_NULL;
                    }
                }
            }

            for(l=0;l<fh->f_procs_per_group;l++){
                disp_index[l] =  1;

                free(blocklen_per_process[l]);
                free(displs_per_process[l]);

                blocklen_per_process[l] = (int *) calloc (1, sizeof(int));
                displs_per_process[l] = (MPI_Aint *) calloc (1, sizeof(MPI_Aint));
                if (NULL == displs_per_process[l] || NULL == blocklen_per_process[l]){
                    opal_output (1, "OUT OF MEMORY for displs\n");
                    ret = OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
            }

            if (NULL != sorted_file_offsets){
                free(sorted_file_offsets);
                sorted_file_offsets = NULL;
            }

            if(NULL != file_offsets_for_agg){
                free(file_offsets_for_agg);
                file_offsets_for_agg = NULL;
            }

            if (NULL != memory_displacements){
                free(memory_displacements);
                memory_displacements = NULL;
            }

        } /* (my_aggregator == fh->f_rank */

        /**************************************************************************
         ***  7b. Determine the number of bytes to be actually written in this cycle
	 **************************************************************************/
        if (cycles-1 == index) {
            bytes_to_write_in_cycle = total_bytes - bytes_per_cycle*index;
        }
        else {
            bytes_to_write_in_cycle = bytes_per_cycle;
        }

#if DEBUG_ON
        if (my_aggregator == fh->f_rank) {
            printf ("****%d: CYCLE %d   Bytes %lld**********\n",
                    fh->f_rank,
                    index,
                    bytes_to_write_in_cycle);
        }
#endif
        /**********************************************************
         **Gather the Data from all the processes at the writers **
         *********************************************************/

#if DEBUG_ON
        printf("bytes_to_write_in_cycle: %ld, cycle : %d\n", bytes_to_write_in_cycle,
	       index);
#endif

        /*****************************************************************
         *** 7c. Calculate how much data will be contributed in this cycle
	 ***     by each process
         *****************************************************************/
        bytes_sent = 0;

        /* The blocklen and displs calculation only done at aggregators!*/
        while (bytes_to_write_in_cycle) {

	    /* This next block identifies which process is the holder
	    ** of the sorted[current_index] element;
	    */
            blocks = fview_count[0];
            for (j=0 ; j<fh->f_procs_per_group ; j++) {
                if (sorted[current_index] < blocks) {
                    n = j;
                    break;
                }
                else {
                    blocks += fview_count[j+1];
                }
            }

            if (bytes_remaining) {
                /* Finish up a partially used buffer from the previous  cycle */

                if (bytes_remaining <= bytes_to_write_in_cycle) {
                    /* The data fits completely into the block */
                    if (my_aggregator == fh->f_rank) {
                        blocklen_per_process[n][disp_index[n] - 1] = bytes_remaining;
                        displs_per_process[n][disp_index[n] - 1] =
                            (ptrdiff_t)global_iov_array[sorted[current_index]].iov_base +
                            (global_iov_array[sorted[current_index]].iov_len
                             - bytes_remaining);

                        /* In this cases the length is consumed so allocating for
                           next displacement and blocklength*/
                        blocklen_per_process[n] = (int *) realloc
                            ((void *)blocklen_per_process[n], (disp_index[n]+1)*sizeof(int));
                        displs_per_process[n] = (MPI_Aint *) realloc
                            ((void *)displs_per_process[n], (disp_index[n]+1)*sizeof(MPI_Aint));
                        blocklen_per_process[n][disp_index[n]] = 0;
                        displs_per_process[n][disp_index[n]] = 0;
                        disp_index[n] += 1;
                    }
                    if (fh->f_procs_in_group[n] == fh->f_rank) {
                        bytes_sent += bytes_remaining;
                    }
                    current_index ++;
                    bytes_to_write_in_cycle -= bytes_remaining;
                    bytes_remaining = 0;
                    continue;
                }
                else {
                    /* the remaining data from the previous cycle is larger than the
		       bytes_to_write_in_cycle, so we have to segment again */
                    if (my_aggregator == fh->f_rank) {
                        blocklen_per_process[n][disp_index[n] - 1] = bytes_to_write_in_cycle;
                        displs_per_process[n][disp_index[n] - 1] =
                            (ptrdiff_t)global_iov_array[sorted[current_index]].iov_base +
                            (global_iov_array[sorted[current_index]].iov_len
                             - bytes_remaining);
                    }

                    if (fh->f_procs_in_group[n] == fh->f_rank) {
                        bytes_sent += bytes_to_write_in_cycle;
                    }
                    bytes_remaining -= bytes_to_write_in_cycle;
                    bytes_to_write_in_cycle = 0;
                    break;
                }
            }
            else {
                 /* No partially used entry available, have to start a new one */
                if (bytes_to_write_in_cycle <
                    (MPI_Aint) global_iov_array[sorted[current_index]].iov_len) {
                     /* This entry has more data than we can sendin one cycle */
                    if (my_aggregator == fh->f_rank) {
                        blocklen_per_process[n][disp_index[n] - 1] = bytes_to_write_in_cycle;
                        displs_per_process[n][disp_index[n] - 1] =
                            (ptrdiff_t)global_iov_array[sorted[current_index]].iov_base ;
                    }
                    if (fh->f_procs_in_group[n] == fh->f_rank) {
                        bytes_sent += bytes_to_write_in_cycle;

                    }
                    bytes_remaining = global_iov_array[sorted[current_index]].iov_len -
                        bytes_to_write_in_cycle;
                    bytes_to_write_in_cycle = 0;
                    break;
                }
                else {
                    /* Next data entry is less than bytes_to_write_in_cycle */
                    if (my_aggregator == fh->f_rank) {
                        blocklen_per_process[n][disp_index[n] - 1] =
                            global_iov_array[sorted[current_index]].iov_len;
                        displs_per_process[n][disp_index[n] - 1] = (ptrdiff_t)
                            global_iov_array[sorted[current_index]].iov_base;

                        /*realloc for next blocklength
                          and assign this displacement and check for next displs as
                          the total length of this entry has been consumed!*/
                        blocklen_per_process[n] =
                            (int *) realloc ((void *)blocklen_per_process[n], (disp_index[n]+1)*sizeof(int));
                        displs_per_process[n] = (MPI_Aint *)realloc
                            ((void *)displs_per_process[n], (disp_index[n]+1)*sizeof(MPI_Aint));
                        blocklen_per_process[n][disp_index[n]] = 0;
                        displs_per_process[n][disp_index[n]] = 0;
                        disp_index[n] += 1;
                    }
                    if (fh->f_procs_in_group[n] == fh->f_rank) {
                        bytes_sent += global_iov_array[sorted[current_index]].iov_len;
                    }
                    bytes_to_write_in_cycle -=
                        global_iov_array[sorted[current_index]].iov_len;
                    current_index ++;
                    continue;
                }
            }
        }


        /*************************************************************************
	 *** 7d. Calculate the displacement on where to put the data and allocate
         ***     the recieve buffer (global_buf)
	 *************************************************************************/
        if (my_aggregator == fh->f_rank) {
            entries_per_aggregator=0;
            for (i=0;i<fh->f_procs_per_group; i++){
                for (j=0;j<disp_index[i];j++){
                    if (blocklen_per_process[i][j] > 0)
                        entries_per_aggregator++ ;
                }
            }

#if DEBUG_ON
            printf("%d: cycle: %d, bytes_sent: %d\n ",fh->f_rank,index,
                   bytes_sent);
            printf("%d : Entries per aggregator : %d\n",fh->f_rank,entries_per_aggregator);
#endif

            if (entries_per_aggregator > 0){
                file_offsets_for_agg = (mca_io_ompio_local_io_array *)
                    malloc(entries_per_aggregator*sizeof(mca_io_ompio_local_io_array));
                if (NULL == file_offsets_for_agg) {
                    opal_output (1, "OUT OF MEMORY\n");
                    ret = OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }

                sorted_file_offsets = (int *)
                    malloc (entries_per_aggregator*sizeof(int));
                if (NULL == sorted_file_offsets){
                    opal_output (1, "OUT OF MEMORY\n");
                    ret =  OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }

                /*Moving file offsets to an IO array!*/
                temp_index = 0;

                for (i=0;i<fh->f_procs_per_group; i++){
                    for(j=0;j<disp_index[i];j++){
                        if (blocklen_per_process[i][j] > 0){
                            file_offsets_for_agg[temp_index].length =
                                blocklen_per_process[i][j];
                            file_offsets_for_agg[temp_index].process_id = i;
                            file_offsets_for_agg[temp_index].offset =
                                displs_per_process[i][j];
                            temp_index++;

#if DEBUG_ON
                            printf("************Cycle: %d,  Aggregator: %d ***************\n",
                                   index+1,fh->f_rank);

                            printf("%d sends blocklen[%d]: %d, disp[%d]: %ld to %d\n",
                                   fh->f_procs_in_group[i],j,
                                   blocklen_per_process[i][j],j,
                                   displs_per_process[i][j],
                                   fh->f_rank);
#endif
                        }
                    }
                }
            }
            else{
                continue;
            }
            /* Sort the displacements for each aggregator*/
            local_heap_sort (file_offsets_for_agg,
                             entries_per_aggregator,
                             sorted_file_offsets);

            /*create contiguous memory displacements
              based on blocklens on the same displs array
              and map it to this aggregator's actual
              file-displacements (this is in the io-array created above)*/
            memory_displacements = (MPI_Aint *) malloc
                (entries_per_aggregator * sizeof(MPI_Aint));

            memory_displacements[sorted_file_offsets[0]] = 0;
            for (i=1; i<entries_per_aggregator; i++){
                memory_displacements[sorted_file_offsets[i]] =
                    memory_displacements[sorted_file_offsets[i-1]] +
                    file_offsets_for_agg[sorted_file_offsets[i-1]].length;
            }

            temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int));
            if (NULL == temp_disp_index) {
                opal_output (1, "OUT OF MEMORY\n");
                ret = OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }

            /*Now update the displacements array  with memory offsets*/
            global_count = 0;
            for (i=0;i<entries_per_aggregator;i++){
                temp_pindex =
                    file_offsets_for_agg[sorted_file_offsets[i]].process_id;
                displs_per_process[temp_pindex][temp_disp_index[temp_pindex]] =
                    memory_displacements[sorted_file_offsets[i]];
                if (temp_disp_index[temp_pindex] < disp_index[temp_pindex])
                    temp_disp_index[temp_pindex] += 1;
                else{
                    printf("temp_disp_index[%d]: %d is greater than disp_index[%d]: %d\n",
                           temp_pindex, temp_disp_index[temp_pindex],
                           temp_pindex, disp_index[temp_pindex]);
                }
                global_count +=
                    file_offsets_for_agg[sorted_file_offsets[i]].length;
            }

            if (NULL != temp_disp_index){
                free(temp_disp_index);
                temp_disp_index = NULL;
            }

#if DEBUG_ON

            printf("************Cycle: %d,  Aggregator: %d ***************\n",
                   index+1,fh->f_rank);
            for (i=0;i<fh->f_procs_per_group; i++){
                for(j=0;j<disp_index[i];j++){
                    if (blocklen_per_process[i][j] > 0){
                        printf("%d sends blocklen[%d]: %d, disp[%d]: %ld to %d\n",
                               fh->f_procs_in_group[i],j,
                               blocklen_per_process[i][j],j,
                               displs_per_process[i][j],
                               fh->f_rank);

                    }
                }
            }
            printf("************Cycle: %d,  Aggregator: %d ***************\n",
                   index+1,fh->f_rank);
            for (i=0; i<entries_per_aggregator;i++){
                printf("%d: OFFSET: %lld   LENGTH: %ld, Mem-offset: %ld\n",
                       file_offsets_for_agg[sorted_file_offsets[i]].process_id,
                       file_offsets_for_agg[sorted_file_offsets[i]].offset,
                       file_offsets_for_agg[sorted_file_offsets[i]].length,
                       memory_displacements[sorted_file_offsets[i]]);
            }
            printf("%d : global_count : %ld, bytes_sent : %d\n",
                   fh->f_rank,global_count, bytes_sent);
#endif
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
            start_comm_time = MPI_Wtime();
#endif
        /*************************************************************************
	 *** 7e. Perform the actual communication
	 *************************************************************************/
            for (i=0;i<fh->f_procs_per_group; i++) {
                recv_req[i] = MPI_REQUEST_NULL;
                if ( 0 < disp_index[i] ) {
                    ompi_datatype_create_hindexed(disp_index[i],
                                                  blocklen_per_process[i],
                                                  displs_per_process[i],
                                                  MPI_BYTE,
                                                  &recvtype[i]);
                    ompi_datatype_commit(&recvtype[i]);
                    opal_datatype_type_size(&recvtype[i]->super, &datatype_size);

                    if (datatype_size){
                        ret = MCA_PML_CALL(irecv(global_buf,
                                                 1,
                                                 recvtype[i],
                                                 fh->f_procs_in_group[i],
                                                 123,
                                                 fh->f_comm,
                                                 &recv_req[i]));
                        if (OMPI_SUCCESS != ret){
                            goto exit;
                        }
                    }
                }
            }
        } /* end if (my_aggregator == fh->f_rank ) */


        if ( sendbuf_is_contiguous ) {
            send_buf = &((char*)buf)[total_bytes_written];
        }
        else if (bytes_sent) {
            /* allocate a send buffer and copy the data that needs
               to be sent into it in case the data is non-contigous
               in memory */
            ptrdiff_t mem_address;
            size_t remaining = 0;
            size_t temp_position = 0;

            send_buf = malloc (bytes_sent);
            if (NULL == send_buf) {
                opal_output (1, "OUT OF MEMORY\n");
                ret = OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }

            remaining = bytes_sent;

            while (remaining) {
                mem_address = (ptrdiff_t)
                    (decoded_iov[iov_index].iov_base) + current_position;

                if (remaining >=
                    (decoded_iov[iov_index].iov_len - current_position)) {
                    memcpy (send_buf+temp_position,
                            (IOVBASE_TYPE *)mem_address,
                            decoded_iov[iov_index].iov_len - current_position);
                    remaining = remaining -
                        (decoded_iov[iov_index].iov_len - current_position);
                    temp_position = temp_position +
                        (decoded_iov[iov_index].iov_len - current_position);
                    iov_index = iov_index + 1;
                    current_position = 0;
                }
                else {
                    memcpy (send_buf+temp_position,
                            (IOVBASE_TYPE *) mem_address,
                            remaining);
                    current_position = current_position + remaining;
                    remaining = 0;
                }
            }
	}
	total_bytes_written += bytes_sent;

	/* Gather the sendbuf from each process in appropritate locations in
           aggregators*/

	if (bytes_sent){
            ret = MCA_PML_CALL(isend(send_buf,
                                     bytes_sent,
                                     MPI_BYTE,
                                     my_aggregator,
                                     123,
                                     MCA_PML_BASE_SEND_STANDARD,
                                     fh->f_comm,
                                     &send_req));


	    if ( OMPI_SUCCESS != ret ){
		goto exit;
	    }

	    ret = ompi_request_wait(&send_req, MPI_STATUS_IGNORE);
	    if (OMPI_SUCCESS != ret){
		goto exit;
	    }
	}

	if (my_aggregator == fh->f_rank) {
            ret = ompi_request_wait_all (fh->f_procs_per_group,
                                         recv_req,
                                         MPI_STATUS_IGNORE);

            if (OMPI_SUCCESS != ret){
                goto exit;
            }
	}

#if DEBUG_ON
	if (my_aggregator == fh->f_rank){
            printf("************Cycle: %d,  Aggregator: %d ***************\n",
                   index+1,fh->f_rank);
            for (i=0 ; i<global_count/4 ; i++)
                printf (" RECV %d \n",((int *)global_buf)[i]);
	}
#endif

        if (! sendbuf_is_contiguous) {
            if (NULL != send_buf) {
                free (send_buf);
                send_buf = NULL;
            }
        }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
        end_comm_time = MPI_Wtime();
        comm_time += (end_comm_time - start_comm_time);
#endif
        /**********************************************************
         *** 7f. Create the io array, and pass it to fbtl
         *********************************************************/

	if (my_aggregator == fh->f_rank) {

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
	    start_write_time = MPI_Wtime();
#endif

            fh->f_io_array = (mca_common_ompio_io_array_t *) malloc
                (entries_per_aggregator * sizeof (mca_common_ompio_io_array_t));
            if (NULL == fh->f_io_array) {
                opal_output(1, "OUT OF MEMORY\n");
                ret = OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }

            fh->f_num_of_io_entries = 0;
            /*First entry for every aggregator*/
            fh->f_io_array[0].offset =
                (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset;
            fh->f_io_array[0].length =
                file_offsets_for_agg[sorted_file_offsets[0]].length;
            fh->f_io_array[0].memory_address =
                global_buf+memory_displacements[sorted_file_offsets[0]];
            fh->f_num_of_io_entries++;

            for (i=1;i<entries_per_aggregator;i++){
                /* If the enrties are contiguous merge them,
                   else make a new entry */
                if (file_offsets_for_agg[sorted_file_offsets[i-1]].offset +
                    file_offsets_for_agg[sorted_file_offsets[i-1]].length ==
                    file_offsets_for_agg[sorted_file_offsets[i]].offset){
                    fh->f_io_array[fh->f_num_of_io_entries - 1].length +=
                        file_offsets_for_agg[sorted_file_offsets[i]].length;
                }
                else {
                    fh->f_io_array[fh->f_num_of_io_entries].offset =
                        (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[i]].offset;
                    fh->f_io_array[fh->f_num_of_io_entries].length =
                        file_offsets_for_agg[sorted_file_offsets[i]].length;
                    fh->f_io_array[fh->f_num_of_io_entries].memory_address =
                        global_buf+memory_displacements[sorted_file_offsets[i]];
                    fh->f_num_of_io_entries++;
                }

            }

#if DEBUG_ON
            printf("*************************** %d\n", fh->f_num_of_io_entries);
            for (i=0 ; i<fh->f_num_of_io_entries ; i++) {
                printf(" ADDRESS: %p  OFFSET: %ld   LENGTH: %ld\n",
                       fh->f_io_array[i].memory_address,
                       (ptrdiff_t)fh->f_io_array[i].offset,
                       fh->f_io_array[i].length);
            }

#endif

            if (fh->f_num_of_io_entries) {
                if ( 0 >  fh->f_fbtl->fbtl_pwritev (fh)) {
                    opal_output (1, "WRITE FAILED\n");
                    ret = OMPI_ERROR;
                    goto exit;
                }
            }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
            end_write_time = MPI_Wtime();
            write_time += end_write_time - start_write_time;
#endif


	} /* end if (my_aggregator == fh->f_rank) */
    } /* end  for (index = 0; index < cycles; index++) */

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    end_exch = MPI_Wtime();
    exch_write += end_exch - start_exch;
    nentry.time[0] = write_time;
    nentry.time[1] = comm_time;
    nentry.time[2] = exch_write;
    if (my_aggregator == fh->f_rank)
	nentry.aggregator = 1;
    else
	nentry.aggregator = 0;
    nentry.nprocs_for_coll = dynamic_num_io_procs;
    if (!mca_common_ompio_full_print_queue(fh->f_coll_write_time)){
        mca_common_ompio_register_print_entry(fh->f_coll_write_time,
                                              nentry);
    }
#endif


exit :
    if (my_aggregator == fh->f_rank) {
        if (NULL != sorted_file_offsets){
            free(sorted_file_offsets);
            sorted_file_offsets = NULL;
        }
        if(NULL != file_offsets_for_agg){
            free(file_offsets_for_agg);
            file_offsets_for_agg = NULL;
        }
        if (NULL != memory_displacements){
            free(memory_displacements);
            memory_displacements = NULL;
        }
        if (NULL != recvtype){
            for (i =0; i< fh->f_procs_per_group; i++) {
                if ( MPI_DATATYPE_NULL != recvtype[i] ) {
                    ompi_datatype_destroy(&recvtype[i]);
                }
            }
            free(recvtype);
            recvtype=NULL;
        }

        if (NULL != fh->f_io_array) {
            free (fh->f_io_array);
            fh->f_io_array = NULL;
        }
        if (NULL != disp_index){
            free(disp_index);
            disp_index = NULL;
        }
        if (NULL != recvtype){
            free(recvtype);
            recvtype=NULL;
        }
        if (NULL != recv_req){
            free(recv_req);
            recv_req = NULL;
        }
        if (NULL != global_buf) {
            free (global_buf);
            global_buf = NULL;
        }
        for(l=0;l<fh->f_procs_per_group;l++){
            if (NULL != blocklen_per_process){
                free(blocklen_per_process[l]);
            }
            if (NULL != displs_per_process){
                free(displs_per_process[l]);
            }
        }

        free(blocklen_per_process);
        free(displs_per_process);
    }

    if (NULL != displs){
	free(displs);
	displs=NULL;
    }

    if (! sendbuf_is_contiguous) {
	if (NULL != send_buf) {
	    free (send_buf);
	    send_buf = NULL;
	}
    }
    if (NULL != global_buf) {
        free (global_buf);
        global_buf = NULL;
    }
    if (NULL != sorted) {
        free (sorted);
        sorted = NULL;
    }
    if (NULL != global_iov_array) {
        free (global_iov_array);
	global_iov_array = NULL;
    }
    if (NULL != fview_count) {
        free (fview_count);
        fview_count = NULL;
    }
    if (NULL != decoded_iov) {
        free (decoded_iov);
        decoded_iov = NULL;
    }


    return OMPI_SUCCESS;
}
예제 #3
0
/* Arguments not used in this implementation:
 * - send_first
 */
static int ompi_comm_allreduce_intra_bridge (int *inbuf, int *outbuf, 
                                             int count, struct ompi_op_t *op, 
                                             ompi_communicator_t *comm,
                                             ompi_communicator_t *bcomm, 
                                             void* lleader, void* rleader,
                                             int send_first )
{
    int *tmpbuf=NULL;
    int local_rank;
    int i;
    int rc;
    int local_leader, remote_leader;

    local_leader  = (*((int*)lleader));
    remote_leader = (*((int*)rleader));

    if ( &ompi_mpi_op_sum.op != op && &ompi_mpi_op_prod.op != op &&
         &ompi_mpi_op_max.op != op && &ompi_mpi_op_min.op  != op ) {
        return MPI_ERR_OP;
    }
    
    local_rank = ompi_comm_rank ( comm );
    tmpbuf     = (int *) malloc ( count * sizeof(int));
    if ( NULL == tmpbuf ) {
        rc = OMPI_ERR_OUT_OF_RESOURCE;
        goto exit;
    }

    /* Intercomm_create */
    rc = comm->c_coll.coll_allreduce ( inbuf, tmpbuf, count, MPI_INT,
                                       op, comm, comm->c_coll.coll_allreduce_module );
    if ( OMPI_SUCCESS != rc ) {
        goto exit;
    }

    if (local_rank == local_leader ) {
        MPI_Request req;
        
        rc = MCA_PML_CALL(irecv ( outbuf, count, MPI_INT, remote_leader,
                                  OMPI_COMM_ALLREDUCE_TAG, 
                                  bcomm, &req));
        if ( OMPI_SUCCESS != rc ) {
            goto exit;       
        }
        rc = MCA_PML_CALL(send (tmpbuf, count, MPI_INT, remote_leader, 
                                OMPI_COMM_ALLREDUCE_TAG,
                                MCA_PML_BASE_SEND_STANDARD,  bcomm));
        if ( OMPI_SUCCESS != rc ) {
            goto exit;
        }
        rc = ompi_request_wait_all ( 1, &req, MPI_STATUS_IGNORE);
        if ( OMPI_SUCCESS != rc ) {
            goto exit;
        }

        if ( &ompi_mpi_op_max.op == op ) {
            for ( i = 0 ; i < count; i++ ) {
                if (tmpbuf[i] > outbuf[i]) {
                    outbuf[i] = tmpbuf[i];
                }
            }
        }
        else if ( &ompi_mpi_op_min.op == op ) {
            for ( i = 0 ; i < count; i++ ) {
                if (tmpbuf[i] < outbuf[i]) {
                    outbuf[i] = tmpbuf[i];
                }
            }
        }
        else if ( &ompi_mpi_op_sum.op == op ) {
            for ( i = 0 ; i < count; i++ ) {
                outbuf[i] += tmpbuf[i];
            }
        }
        else if ( &ompi_mpi_op_prod.op == op ) {
            for ( i = 0 ; i < count; i++ ) {
                outbuf[i] *= tmpbuf[i];
            }
        }
    }

    rc = comm->c_coll.coll_bcast ( outbuf, count, MPI_INT, local_leader, 
                                   comm, comm->c_coll.coll_bcast_module );

 exit:
    if (NULL != tmpbuf ) {
        free (tmpbuf);
    }

    return (rc);
}
예제 #4
0
/*
 *	allgatherv_inter
 *
 *	Function:	- allgatherv using other MPI collectives
 *	Accepts:	- same as MPI_Allgatherv()
 *	Returns:	- MPI_SUCCESS or error code
 */
int
mca_coll_inter_allgatherv_inter(void *sbuf, int scount,
                                struct ompi_datatype_t *sdtype,
                                void *rbuf, int *rcounts, int *disps,
                                struct ompi_datatype_t *rdtype,
                                struct ompi_communicator_t *comm,
                                mca_coll_base_module_t *module)
{
    int i, rank, size, size_local, total=0, err;
    int *count=NULL,*displace=NULL;
    char *ptmp=NULL;
    MPI_Aint incr;
    MPI_Aint extent;
    MPI_Aint lb;
    ompi_datatype_t *ndtype;
    ompi_request_t *req[2];

    rank = ompi_comm_rank(comm);
    size_local = ompi_comm_size(comm->c_local_comm);
    size = ompi_comm_remote_size(comm);

    if (0 == rank) {
        count = (int *)malloc(sizeof(int) * size_local);
        if (NULL == count) {
            return OMPI_ERR_OUT_OF_RESOURCE;
        }
        displace = (int *)malloc(sizeof(int) * size_local);
        if (NULL == displace) {
            return OMPI_ERR_OUT_OF_RESOURCE;
        }
    }
    /* Local gather to get the scount of each process */
    err = comm->c_local_comm->c_coll.coll_gather(&scount, 1, MPI_INT,
            count, 1, MPI_INT,
            0, comm->c_local_comm,
            comm->c_local_comm->c_coll.coll_gather_module);
    if (OMPI_SUCCESS != err) {
        return err;
    }
    if(0 == rank) {
        displace[0] = 0;
        for (i = 1; i < size_local; i++) {
            displace[i] = displace[i-1] + count[i-1];
        }
        /* Perform the gatherv locally with the first process as root */
        err = ompi_ddt_get_extent(sdtype, &lb, &extent);
        if (OMPI_SUCCESS != err) {
            return OMPI_ERROR;
        }
        incr = 0;
        for (i = 0; i < size_local; i++) {
            incr = incr + extent*count[i];
        }
        ptmp = (char*)malloc(incr);
        if (NULL == ptmp) {
            return OMPI_ERR_OUT_OF_RESOURCE;
        }
    }
    err = comm->c_local_comm->c_coll.coll_gatherv(sbuf, scount, sdtype,
            ptmp, count, displace,
            sdtype,0, comm->c_local_comm,
            comm->c_local_comm->c_coll.coll_gatherv_module);
    if (OMPI_SUCCESS != err) {
        return err;
    }

    ompi_ddt_create_indexed(size,rcounts,disps,rdtype,&ndtype);
    ompi_ddt_commit(&ndtype);

    if (0 == rank) {
        for (i = 0; i < size_local; i++) {
            total = total + count[i];
        }
        /* Exchange data between roots */
        err = MCA_PML_CALL(irecv(rbuf, 1, ndtype, 0,
                                 MCA_COLL_BASE_TAG_ALLGATHERV, comm,
                                 &(req[0])));
        if (OMPI_SUCCESS != err) {
            return err;
        }

        err = MCA_PML_CALL(isend(ptmp, total, sdtype, 0,
                                 MCA_COLL_BASE_TAG_ALLGATHERV,
                                 MCA_PML_BASE_SEND_STANDARD,
                                 comm, &(req[1])));
        if (OMPI_SUCCESS != err) {
            return err;
        }

        err = ompi_request_wait_all(2, req, MPI_STATUSES_IGNORE);
        if (OMPI_SUCCESS != err) {
            return err;
        }
    }

    /* bcast the message to all the local processes */
    err = comm->c_local_comm->c_coll.coll_bcast(rbuf, 1, ndtype,
            0, comm->c_local_comm,
            comm->c_local_comm->c_coll.coll_bcast_module);
    if (OMPI_SUCCESS != err) {
        return err;
    }

    ompi_ddt_destroy(&ndtype);
    if (NULL != ptmp) {
        free(ptmp);
    }
    if (NULL != displace) {
        free(displace);
    }
    if (NULL != count) {
        free(count);
    }

    return err;

}
예제 #5
0
/*
 *	allgather_inter
 *
 *	Function:	- allgather using other MPI collections
 *	Accepts:	- same as MPI_Allgather()
 *	Returns:	- MPI_SUCCESS or error code
 */
int
mca_coll_basic_allgather_inter(void *sbuf, int scount,
                               struct ompi_datatype_t *sdtype,
                               void *rbuf, int rcount,
                               struct ompi_datatype_t *rdtype,
                               struct ompi_communicator_t *comm,
                               mca_coll_base_module_t *module)
{
    int rank, root = 0, size, rsize, err, i;
    char *tmpbuf = NULL, *ptmp;
    ptrdiff_t rlb, slb, rextent, sextent, incr;
    ompi_request_t *req;
    mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
    ompi_request_t **reqs = basic_module->mccb_reqs;

    rank = ompi_comm_rank(comm);
    size = ompi_comm_size(comm);
    rsize = ompi_comm_remote_size(comm);

    /* Algorithm:
     * - a gather to the root in remote group (simultaniously executed,
     * thats why we cannot use coll_gather).
     * - exchange the temp-results between two roots 
     * - inter-bcast (again simultanious).
     */

    /* Step one: gather operations: */
    if (rank != root) {
        /* send your data to root */
        err = MCA_PML_CALL(send(sbuf, scount, sdtype, root,
                                MCA_COLL_BASE_TAG_ALLGATHER,
                                MCA_PML_BASE_SEND_STANDARD, comm));
        if (OMPI_SUCCESS != err) {
            return err;
        }
    } else {
        /* receive a msg. from all other procs. */
        err = ompi_datatype_get_extent(rdtype, &rlb, &rextent);
        if (OMPI_SUCCESS != err) {
            return err;
        }
        err = ompi_datatype_get_extent(sdtype, &slb, &sextent);
        if (OMPI_SUCCESS != err) {
            return err;
        }

        /* Do a send-recv between the two root procs. to avoid deadlock */
        err = MCA_PML_CALL(isend(sbuf, scount, sdtype, 0,
                                 MCA_COLL_BASE_TAG_ALLGATHER,
                                 MCA_PML_BASE_SEND_STANDARD,
                                 comm, &reqs[rsize]));
        if (OMPI_SUCCESS != err) {
            return err;
        }

        err = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, 0,
                                 MCA_COLL_BASE_TAG_ALLGATHER, comm,
                                 &reqs[0]));
        if (OMPI_SUCCESS != err) {
            return err;
        }

        incr = rextent * rcount;
        ptmp = (char *) rbuf + incr;
        for (i = 1; i < rsize; ++i, ptmp += incr) {
            err = MCA_PML_CALL(irecv(ptmp, rcount, rdtype, i,
                                     MCA_COLL_BASE_TAG_ALLGATHER,
                                     comm, &reqs[i]));
            if (MPI_SUCCESS != err) {
                return err;
            }
        }

        err = ompi_request_wait_all(rsize + 1, reqs, MPI_STATUSES_IGNORE);
        if (OMPI_SUCCESS != err) {
            return err;
        }

        /* Step 2: exchange the resuts between the root processes */
        tmpbuf = (char *) malloc(scount * size * sextent);
        if (NULL == tmpbuf) {
            return err;
        }

        err = MCA_PML_CALL(isend(rbuf, rsize * rcount, rdtype, 0,
                                 MCA_COLL_BASE_TAG_ALLGATHER,
                                 MCA_PML_BASE_SEND_STANDARD, comm, &req));
        if (OMPI_SUCCESS != err) {
            goto exit;
        }

        err = MCA_PML_CALL(recv(tmpbuf, size * scount, sdtype, 0,
                                MCA_COLL_BASE_TAG_ALLGATHER, comm,
                                MPI_STATUS_IGNORE));
        if (OMPI_SUCCESS != err) {
            goto exit;
        }

        err = ompi_request_wait( &req, MPI_STATUS_IGNORE);
        if (OMPI_SUCCESS != err) {
            goto exit;
        }
    }


    /* Step 3: bcast the data to the remote group. This 
     * happens in both groups simultaniously, thus we can 
     * not use coll_bcast (this would deadlock). 
     */
    if (rank != root) {
        /* post the recv */
        err = MCA_PML_CALL(recv(rbuf, rsize * rcount, rdtype, 0,
                                MCA_COLL_BASE_TAG_ALLGATHER, comm,
                                MPI_STATUS_IGNORE));
        if (OMPI_SUCCESS != err) {
            goto exit;
        }

    } else {
        /* Send the data to every other process in the remote group
         * except to rank zero. which has it already. */
        for (i = 1; i < rsize; i++) {
            err = MCA_PML_CALL(isend(tmpbuf, size * scount, sdtype, i,
                                     MCA_COLL_BASE_TAG_ALLGATHER,
                                     MCA_PML_BASE_SEND_STANDARD,
                                     comm, &reqs[i - 1]));
            if (OMPI_SUCCESS != err) {
                goto exit;
            }

        }

        err = ompi_request_wait_all(rsize - 1, reqs, MPI_STATUSES_IGNORE);
        if (OMPI_SUCCESS != err) {
            goto exit;
        }
    }

  exit:
    if (NULL != tmpbuf) {
        free(tmpbuf);
    }

    return err;
}
static int mca_coll_hierarch_bcast_intra_seg3 (void *buff, 
					       int count,
					       struct ompi_datatype_t *datatype, 
					       int root,
					       struct ompi_communicator_t *comm, 
					       mca_coll_base_module_t *module, 
                                               int segsize )
{
    struct ompi_communicator_t *llcomm=NULL;
    struct ompi_communicator_t *lcomm=NULL;
    mca_coll_hierarch_module_t *hierarch_module = (mca_coll_hierarch_module_t *) module;
    int lroot=MPI_UNDEFINED, llroot=MPI_UNDEFINED;
    int llrank=MPI_UNDEFINED, llsize=0, rank=0, ret=OMPI_SUCCESS;
    int lsize=0, lrank=MPI_UNDEFINED;
    MPI_Aint ub=0, typeext=0;
    size_t typesize=0;
    int i, realsegsize=0, remaining_count=0;
    int num_segments=0, segcount=0, segindex=0;
    char* tmpbuf = (char *) buff;
    ompi_request_t **sreq=NULL, **sreq1=NULL;
    ompi_request_t *rreq=MPI_REQUEST_NULL, *rreq1=MPI_REQUEST_NULL;

    rank   = ompi_comm_rank ( comm );
    lcomm  = hierarch_module->hier_lcomm;
    
    if ( mca_coll_hierarch_verbose_param ) { 
	printf("%s:%d: executing hierarchical seg3 bcast with cnt=%d root=%d segsize=%d\n",
	       comm->c_name, rank, count, root, segsize );
    } 

   /*
     * This function returns the local leader communicator
     * which *always* contains the root of this operation.
     * This might involve creating a new communicator. This is
     * also the reason, that *every* process in comm has to call
     * this function
     */
    llcomm = mca_coll_hierarch_get_llcomm ( root, hierarch_module, &llroot, &lroot);

    ompi_datatype_type_size ( datatype, &typesize);
    ompi_datatype_get_extent ( datatype, &ub, &typeext);

    /* Determine number of segments and number of elements per segment */
    if ((typesize > 0) && (segsize % typesize != 0)) {
	/* segment size must be a multiple of typesize */
	segsize = typesize * (segsize / typesize);
    }
    if ((segsize == 0) || (count == 0) || (typesize == 0)) {
	segcount = count;
	num_segments = 1;
    } else {
	segcount = segsize/typesize;
	num_segments = count/segcount;
	if ( (count % segcount) != 0 ) num_segments++;
	if (num_segments == 1) segcount = count;
    }
    realsegsize = segcount*typeext;
    remaining_count = segcount;

    if ( MPI_COMM_NULL != lcomm ) {
        lsize = ompi_comm_size ( lcomm );
        lrank = ompi_comm_rank ( lcomm );	
        sreq1 = (ompi_request_t **)malloc ( lsize * sizeof(ompi_request_t *));
        if ( NULL == sreq1 ) {
	    return OMPI_ERR_OUT_OF_RESOURCE;
        }
        for(i=0; i<lsize; i++) {
	    sreq1[i] = MPI_REQUEST_NULL;
        }
    }

    if ( MPI_COMM_NULL != llcomm ) {
        llsize = ompi_comm_size (llcomm);
        llrank = ompi_comm_rank ( llcomm );
    
        sreq  = hierarch_module->hier_reqs;
	for(i=0; i<llsize; i++) {
	    sreq[i] = MPI_REQUEST_NULL;
	}
    }

    
    /* Broadcasting the first segment in the upper level*/
    if ( MPI_UNDEFINED != llroot ) {
	ret = llcomm->c_coll.coll_bcast(tmpbuf, remaining_count, datatype, 
					llroot, llcomm, 
					llcomm->c_coll.coll_bcast_module);
	if ( OMPI_SUCCESS != ret ) {
	    goto exit;
	}
    }
    
    for (segindex = 1; segindex < num_segments; segindex++) {
	/* determine how many elements are being sent in this round */
	if( segindex == (num_segments - 1) ) {
	    remaining_count = count - segindex*segcount;
	}
	tmpbuf += realsegsize;
	
	/* Broadcasting the next segment in the upper level*/
	if ( MPI_COMM_NULL != llcomm ) {
	    if(llrank == llroot) {
		for(i = 0; i < llsize; i++) {
		    if( i != llroot) {
			ret = MCA_PML_CALL(isend(tmpbuf, remaining_count, datatype, i,
						 MCA_COLL_BASE_TAG_BCAST, 
						 MCA_PML_BASE_SEND_STANDARD,
						 llcomm, (sreq+i) ));
			if ( OMPI_SUCCESS != ret ) {
			    goto exit;
			}
		    }
		}
	    }
            else {
                ret = MCA_PML_CALL(irecv(tmpbuf, remaining_count, datatype, llroot,
                                         MCA_COLL_BASE_TAG_BCAST, 
                                         llcomm, &rreq ));
                if ( OMPI_SUCCESS != ret ) {
                    goto exit;
                }
            }
        }
	
	/* broadcasting the before segment among the lower level processes
	 * once the local leaders got the data from the root, they can distribute
	 * it to the processes in their local, low-level communicator.
	 */
	if ( MPI_COMM_NULL != lcomm ) {
	    if( lrank == lroot) {
		for( i = 0; i < lsize; i++) {
		    if( i != lroot) {
			ret = MCA_PML_CALL(isend(tmpbuf-realsegsize, segcount, datatype, i,
						 MCA_COLL_BASE_TAG_BCAST,
						 MCA_PML_BASE_SEND_STANDARD,
						 lcomm, (sreq1+i) ));
			if ( OMPI_SUCCESS != ret ) {
			    goto exit;
			}
		    }
		}
	    }
	    else {
		ret = MCA_PML_CALL(irecv(tmpbuf-realsegsize, segcount, datatype, lroot, 
					 MCA_COLL_BASE_TAG_BCAST , lcomm, &rreq1 ));
		if ( OMPI_SUCCESS != ret ) {
		    goto exit;
		}
	    }
	}
	
	/* Wait for the upper level bcast to complete*/
	if ( MPI_COMM_NULL != llcomm ) {
	    if ( llrank == llroot ) {
		ret = ompi_request_wait_all(llsize, sreq, MPI_STATUSES_IGNORE);
		if ( OMPI_SUCCESS != ret ) {
		    goto exit;
		}
	    }
	    else {
		ret = ompi_request_wait_all ( 1, &rreq, MPI_STATUS_IGNORE );
		if ( OMPI_SUCCESS != ret ) {
		    goto exit;
		}
	    }
	}
	
	/*Wait for the lower level bcast to complete */
	if ( MPI_COMM_NULL != lcomm ) {
	    if ( lrank == lroot ) {
		ret = ompi_request_wait_all(lsize, sreq1, MPI_STATUSES_IGNORE);
		if ( OMPI_SUCCESS != ret ) {
		    goto exit;
		}
	    }
	    else {
		ret = ompi_request_wait_all( 1, &rreq1, MPI_STATUS_IGNORE);
		if ( OMPI_SUCCESS != ret ) {
		    goto exit;
		}
	    }
	}	
    }
    
    /*Bcasting the last segment among the lower level processes
     * once the local leaders got the data from the root, they can distribute
     * it to the processes in their local, low-level communicator.
     */
    if ( MPI_COMM_NULL != lcomm ) {
        ret = lcomm->c_coll.coll_bcast(tmpbuf, remaining_count, datatype,  
                                       lroot, lcomm, 
                                       lcomm->c_coll.coll_bcast_module);
    }
    
exit:
    if ( NULL != sreq1 ) {
	free ( sreq1 );
    }

    return ret;
}
예제 #7
0
/*
 *	alltoallv_inter
 *
 *	Function:	- MPI_Alltoallv
 *	Accepts:	- same as MPI_Alltoallv()
 *	Returns:	- MPI_SUCCESS or an MPI error code
 */
int
mca_coll_basic_alltoallv_inter(const void *sbuf, const int *scounts, const int *sdisps,
                               struct ompi_datatype_t *sdtype, void *rbuf,
                               const int *rcounts, const int *rdisps,
                               struct ompi_datatype_t *rdtype,
                               struct ompi_communicator_t *comm,
                               mca_coll_base_module_t *module)
{
    int i;
    int rsize;
    int err;
    char *psnd;
    char *prcv;
    size_t nreqs;
    MPI_Aint sndextent;
    MPI_Aint rcvextent;

    mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t*) module;
    ompi_request_t **preq = basic_module->mccb_reqs;

    /* Initialize. */

    rsize = ompi_comm_remote_size(comm);

    ompi_datatype_type_extent(sdtype, &sndextent);
    ompi_datatype_type_extent(rdtype, &rcvextent);

    /* Initiate all send/recv to/from others. */
    nreqs = rsize * 2;

    /* Post all receives first  */
    /* A simple optimization: do not send and recv msgs of length zero */
    for (i = 0; i < rsize; ++i) {
        prcv = ((char *) rbuf) + (rdisps[i] * rcvextent);
        if (rcounts[i] > 0) {
            err = MCA_PML_CALL(irecv(prcv, rcounts[i], rdtype,
                                     i, MCA_COLL_BASE_TAG_ALLTOALLV, comm,
                                     &preq[i]));
            if (MPI_SUCCESS != err) {
                return err;
            }
        } else {
            preq[i] = MPI_REQUEST_NULL;
        }
    }

    /* Now post all sends */
    for (i = 0; i < rsize; ++i) {
        psnd = ((char *) sbuf) + (sdisps[i] * sndextent);
        if (scounts[i] > 0) {
            err = MCA_PML_CALL(isend(psnd, scounts[i], sdtype,
                                     i, MCA_COLL_BASE_TAG_ALLTOALLV,
                                     MCA_PML_BASE_SEND_STANDARD, comm,
                                     &preq[rsize + i]));
            if (MPI_SUCCESS != err) {
                return err;
            }
        } else {
            preq[rsize + i] = MPI_REQUEST_NULL;
        }
    }

    err = ompi_request_wait_all(nreqs, preq, MPI_STATUSES_IGNORE);

    /* All done */
    return err;
}
예제 #8
0
/*  
 * Linear functions are copied from the basic coll module.  For
 * some small number of nodes and/or small data sizes they are just as
 * fast as tuned/tree based segmenting operations and as such may be
 * selected by the decision functions.  These are copied into this module
 * due to the way we select modules in V1. i.e. in V2 we will handle this
 * differently and so will not have to duplicate code.  
 * GEF Oct05 after asking Jeff.  
 */
int
ompi_coll_tuned_alltoallv_intra_basic_linear(void *sbuf, int *scounts, int *sdisps,
                                            struct ompi_datatype_t *sdtype,
                                            void *rbuf, int *rcounts, int *rdisps,
                                            struct ompi_datatype_t *rdtype,
                                            struct ompi_communicator_t *comm,
                                            mca_coll_base_module_t *module)
{
    int i, size, rank, err, nreqs;
    char *psnd, *prcv;
    ptrdiff_t sext, rext;
    MPI_Request *preq;
    mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
    mca_coll_tuned_comm_t *data = tuned_module->tuned_data;

    if (MPI_IN_PLACE == sbuf) {
        return  mca_coll_tuned_alltoallv_intra_basic_inplace (rbuf, rcounts, rdisps,
                                                              rdtype, comm, module);
    }

    size = ompi_comm_size(comm);
    rank = ompi_comm_rank(comm);

    OPAL_OUTPUT((ompi_coll_tuned_stream,
                 "coll:tuned:alltoallv_intra_basic_linear rank %d", rank));

    ompi_datatype_type_extent(sdtype, &sext);
    ompi_datatype_type_extent(rdtype, &rext);

    /* Simple optimization - handle send to self first */
    psnd = ((char *) sbuf) + (ptrdiff_t)sdisps[rank] * sext;
    prcv = ((char *) rbuf) + (ptrdiff_t)rdisps[rank] * rext;
    if (0 != scounts[rank]) {
        err = ompi_datatype_sndrcv(psnd, scounts[rank], sdtype,
                              prcv, rcounts[rank], rdtype);
        if (MPI_SUCCESS != err) {
            return err;
        }
    }

    /* If only one process, we're done. */
    if (1 == size) {
        return MPI_SUCCESS;
    }

    /* Now, initiate all send/recv to/from others. */
    nreqs = 0;
    preq = data->mcct_reqs;

    /* Post all receives first */
    for (i = 0; i < size; ++i) {
        if (i == rank || 0 == rcounts[i]) {
            continue;
        }

        prcv = ((char *) rbuf) + (ptrdiff_t)rdisps[i] * rext;
        err = MCA_PML_CALL(irecv_init(prcv, rcounts[i], rdtype,
                                      i, MCA_COLL_BASE_TAG_ALLTOALLV, comm,
                                      preq++));
        ++nreqs;
        if (MPI_SUCCESS != err) {
            ompi_coll_tuned_free_reqs(data->mcct_reqs, nreqs);
            return err;
        }
    }

    /* Now post all sends */
    for (i = 0; i < size; ++i) {
        if (i == rank || 0 == scounts[i]) {
            continue;
        }

        psnd = ((char *) sbuf) + (ptrdiff_t)sdisps[i] * sext;
        err = MCA_PML_CALL(isend_init(psnd, scounts[i], sdtype,
                                      i, MCA_COLL_BASE_TAG_ALLTOALLV,
                                      MCA_PML_BASE_SEND_STANDARD, comm,
                                      preq++));
        ++nreqs;
        if (MPI_SUCCESS != err) {
            ompi_coll_tuned_free_reqs(data->mcct_reqs, nreqs);
            return err;
        }
    }

    /* Start your engines.  This will never return an error. */
    MCA_PML_CALL(start(nreqs, data->mcct_reqs));

    /* Wait for them all.  If there's an error, note that we don't care
     * what the error was -- just that there *was* an error.  The PML
     * will finish all requests, even if one or more of them fail.
     * i.e., by the end of this call, all the requests are free-able.
     * So free them anyway -- even if there was an error, and return the
     * error after we free everything. */
    err = ompi_request_wait_all(nreqs, data->mcct_reqs,
                                MPI_STATUSES_IGNORE);

    /* Free the requests. */
    ompi_coll_tuned_free_reqs(data->mcct_reqs, nreqs);

    return err;
}
예제 #9
0
static int
mca_coll_tuned_alltoallv_intra_basic_inplace(void *rbuf, const int *rcounts, const int *rdisps,
                                             struct ompi_datatype_t *rdtype,
                                             struct ompi_communicator_t *comm,
                                             mca_coll_base_module_t *module)
{
    mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
    int i, j, size, rank, err=MPI_SUCCESS;
    MPI_Request *preq;
    char *tmp_buffer;
    size_t max_size, rdtype_size;
    ptrdiff_t ext;

    /* Initialize. */

    size = ompi_comm_size(comm);
    rank = ompi_comm_rank(comm);
    ompi_datatype_type_size(rdtype, &rdtype_size);

    /* If only one process, we're done. */
    if (1 == size || 0 == rdtype_size) {
        return MPI_SUCCESS;
    }

    /* Find the largest receive amount */
    ompi_datatype_type_extent (rdtype, &ext);
    for (i = 0, max_size = 0 ; i < size ; ++i) {
        size_t size = ext * rcounts[i];

        max_size = size > max_size ? size : max_size;
    }

    /* Allocate a temporary buffer */
    tmp_buffer = calloc (max_size, 1);
    if (NULL == tmp_buffer) {
        return OMPI_ERR_OUT_OF_RESOURCE;
    }

    /* in-place alltoallv slow algorithm (but works) */
    for (i = 0 ; i < size ; ++i) {
        for (j = i+1 ; j < size ; ++j) {
            /* Initiate all send/recv to/from others. */
            preq = tuned_module->tuned_data->mcct_reqs;

            if (i == rank && rcounts[j]) {
                /* Copy the data into the temporary buffer */
                err = ompi_datatype_copy_content_same_ddt (rdtype, rcounts[j],
                                                           tmp_buffer, (char *) rbuf + rdisps[j] * ext);
                if (MPI_SUCCESS != err) { goto error_hndl; }

                /* Exchange data with the peer */
                err = MCA_PML_CALL(irecv ((char *) rbuf + rdisps[j] * ext, rcounts[j], rdtype,
                                          j, MCA_COLL_BASE_TAG_ALLTOALLV, comm, preq++));
                if (MPI_SUCCESS != err) { goto error_hndl; }

                err = MCA_PML_CALL(isend ((void *) tmp_buffer,  rcounts[j], rdtype,
                                          j, MCA_COLL_BASE_TAG_ALLTOALLV, MCA_PML_BASE_SEND_STANDARD,
                                          comm, preq++));
                if (MPI_SUCCESS != err) { goto error_hndl; }
            } else if (j == rank && rcounts[i]) {
                /* Copy the data into the temporary buffer */
                err = ompi_datatype_copy_content_same_ddt (rdtype, rcounts[i],
                                                           tmp_buffer, (char *) rbuf + rdisps[i] * ext);
                if (MPI_SUCCESS != err) { goto error_hndl; }

                /* Exchange data with the peer */
                err = MCA_PML_CALL(irecv ((char *) rbuf + rdisps[i] * ext, rcounts[i], rdtype,
                                          i, MCA_COLL_BASE_TAG_ALLTOALLV, comm, preq++));
                if (MPI_SUCCESS != err) { goto error_hndl; }

                err = MCA_PML_CALL(isend ((void *) tmp_buffer,  rcounts[i], rdtype,
                                          i, MCA_COLL_BASE_TAG_ALLTOALLV, MCA_PML_BASE_SEND_STANDARD,
                                          comm, preq++));
                if (MPI_SUCCESS != err) { goto error_hndl; }
            } else {
                continue;
            }

            /* Wait for the requests to complete */
            err = ompi_request_wait_all (2, tuned_module->tuned_data->mcct_reqs, MPI_STATUSES_IGNORE);
            if (MPI_SUCCESS != err) { goto error_hndl; }

            /* Free the requests. */
            mca_coll_tuned_free_reqs(tuned_module->tuned_data->mcct_reqs, 2);
        }
    }

 error_hndl:
    /* Free the temporary buffer */
    free (tmp_buffer);

    /* All done */

    return err;
}
예제 #10
0
파일: bcast.c 프로젝트: IanYXXL/A1
/**
 * Bcast - subgroup in communicator
 *  This is a very simple algorithm - binary tree, transmitting the full
 *  message at each step.
 */
OMPI_DECLSPEC int comm_bcast_pml(void *buffer, int root, int count,
        ompi_datatype_t *dtype, int my_rank_in_group,
        int n_peers, int *ranks_in_comm,ompi_communicator_t *comm)
{
    /* local variables */
    int rc=OMPI_SUCCESS,msg_cnt,i;
    ompi_request_t *requests[2];
    int node_rank, peer_rank;
    netpatterns_tree_node_t node_data;

    /*
     * shift rank to root==0 tree
     */
    node_rank=(my_rank_in_group-root+n_peers)%n_peers;

    /*
     * compute my communication pattern - binary tree
     */
    rc=netpatterns_setup_narray_tree(2, node_rank, n_peers,
            &node_data);
    if( OMPI_SUCCESS != rc ) {
        goto Error;
    }

    /* 1 process special case */
    if(1 == n_peers) {
        return OMPI_SUCCESS;
    }

    /* if I have parents - wait on the data to arrive */
    if(node_data.n_parents) {
        /* I will have only 1 parent */
        peer_rank=node_data.parent_rank;
        peer_rank=(peer_rank+root)%n_peers;
        /* translate back to actual rank */
        rc=MCA_PML_CALL(recv(buffer, count,dtype,peer_rank,
                    -OMPI_COMMON_TAG_BCAST, comm, MPI_STATUSES_IGNORE));
        if( 0 > rc ) {
            goto Error;
        }
    }

    /* send the data to my children */
    msg_cnt=0;
    for(i=0 ; i < node_data.n_children ; i++ ) {
        peer_rank=node_data.children_ranks[i];
        peer_rank=(peer_rank+root)%n_peers;
        rc=MCA_PML_CALL(isend(buffer,
                    count,dtype,peer_rank,
                    -OMPI_COMMON_TAG_BCAST,MCA_PML_BASE_SEND_STANDARD,
                    comm,&(requests[msg_cnt])));
        if( 0 > rc ) {
            goto Error;
        }
        msg_cnt++;
    }
    /* wait for send completion */
    if(msg_cnt) {
        /* wait on send and receive completion */
        ompi_request_wait_all(msg_cnt,requests,MPI_STATUSES_IGNORE);
    }

    /* return */
    return OMPI_SUCCESS;

Error:
    return rc;
}
static int two_phase_exchage_data(mca_io_ompio_file_t *fh,
				  void *buf,
				  char *write_buf,
				  struct iovec *offset_length,
				  int *send_size,int *start_pos,
				  int *recv_size,
				  OMPI_MPI_OFFSET_TYPE off,
				  OMPI_MPI_OFFSET_TYPE size, int *count,
				  int *partial_recv, int *sent_to_proc,
				  int contig_access_count,
				  OMPI_MPI_OFFSET_TYPE min_st_offset,
				  OMPI_MPI_OFFSET_TYPE fd_size,
				  OMPI_MPI_OFFSET_TYPE *fd_start,
				  OMPI_MPI_OFFSET_TYPE *fd_end,
				  Flatlist_node *flat_buf,
				  mca_io_ompio_access_array_t *others_req,
				  int *send_buf_idx, int *curr_to_proc,
				  int *done_to_proc, int iter,
				  int *buf_idx,MPI_Aint buftype_extent,
				  int striping_unit, int *aggregator_list,
				  int *hole){
  
    int *tmp_len=NULL, sum, *srt_len=NULL, nprocs_recv, nprocs_send,  k,i,j;
    int ret=OMPI_SUCCESS;
    MPI_Request *requests=NULL, *send_req=NULL;
    MPI_Datatype *recv_types=NULL;
    OMPI_MPI_OFFSET_TYPE *srt_off=NULL;
    char **send_buf = NULL; 
    
    
    ret = fh->f_comm->c_coll.coll_alltoall (recv_size,
					    1,
					    MPI_INT,
					    send_size,
					    1,
					    MPI_INT,
					    fh->f_comm,
					    fh->f_comm->c_coll.coll_alltoall_module);
    
    if ( OMPI_SUCCESS != ret ){
      return ret;
    }

    nprocs_recv = 0;
    for (i=0;i<fh->f_size;i++){
      if (recv_size[i]){
	nprocs_recv++;
      }
    }
    
    
    recv_types = (MPI_Datatype *)
	malloc (( nprocs_recv + 1 ) * sizeof(MPI_Datatype *));
    
    if ( NULL == recv_types ){
      return OMPI_ERR_OUT_OF_RESOURCE;
    }

    tmp_len = (int *) malloc(fh->f_size*sizeof(int));
    
    if ( NULL == tmp_len ) {
      return OMPI_ERR_OUT_OF_RESOURCE;
    }

    j = 0;
    for (i=0;i<fh->f_size;i++){
	if (recv_size[i]) {
	    if (partial_recv[i]) {
		k = start_pos[i] + count[i] - 1;
		tmp_len[i] = others_req[i].lens[k];
		others_req[i].lens[k] = partial_recv[i];
	    }
	    MPI_Type_hindexed(count[i], 
			      &(others_req[i].lens[start_pos[i]]),
			      &(others_req[i].mem_ptrs[start_pos[i]]), 
			      MPI_BYTE, recv_types+j);
	    MPI_Type_commit(recv_types+j);
	    j++;
	}
    }

    sum = 0;
    for (i=0;i<fh->f_size;i++) sum += count[i];
    srt_off = (OMPI_MPI_OFFSET_TYPE *) 
      malloc((sum+1)*sizeof(OMPI_MPI_OFFSET_TYPE));
    
    if ( NULL == srt_off ){
      return OMPI_ERR_OUT_OF_RESOURCE;
    }
    
    srt_len = (int *) malloc((sum+1)*sizeof(int));
    
    if ( NULL == srt_len ) {
      return OMPI_ERR_OUT_OF_RESOURCE;
    }


    two_phase_heap_merge(others_req, count, srt_off, srt_len, start_pos, fh->f_size,fh->f_rank,  nprocs_recv, sum);


    for (i=0; i<fh->f_size; i++) 
        if (partial_recv[i]) {
            k = start_pos[i] + count[i] - 1;
            others_req[i].lens[k] = tmp_len[i];
        }
    
    if ( NULL != tmp_len ){
      free(tmp_len); 
    }

    *hole = 0;
    if (off != srt_off[0]){
	*hole = 1;
    }
    else{
	for (i=1;i<sum;i++){
	    if (srt_off[i] <= srt_off[0] + srt_len[0]){
		int new_len = srt_off[i] + srt_len[i] - srt_off[0];
		if(new_len > srt_len[0]) 
		    srt_len[0] = new_len;
	    }
	    else
		break;
	}
	if (i < sum || size != srt_len[0])
	    *hole = 1;
    }


    if ( NULL != srt_off ){
      free(srt_off);
    }
    if ( NULL != srt_len ){
      free(srt_len);
    }

    if (nprocs_recv){
	if (*hole){
	    if (off > 0){
		fh->f_io_array = (mca_io_ompio_io_array_t *)malloc 
		    (sizeof(mca_io_ompio_io_array_t));
		if (NULL == fh->f_io_array) {
		    opal_output(1, "OUT OF MEMORY\n");
		    return OMPI_ERR_OUT_OF_RESOURCE;
		}
		fh->f_io_array[0].offset  =(IOVBASE_TYPE *)(intptr_t)off;
		fh->f_num_of_io_entries = 1;
		fh->f_io_array[0].length = size;
		fh->f_io_array[0].memory_address = write_buf;
		if (fh->f_num_of_io_entries){
		    if (OMPI_SUCCESS != fh->f_fbtl->fbtl_preadv (fh, NULL)) {
			opal_output(1, "READ FAILED\n");
			return OMPI_ERROR;
		    }
		}
		
	    }
	    fh->f_num_of_io_entries = 0;
	    if (NULL != fh->f_io_array) {
		free (fh->f_io_array);
		fh->f_io_array = NULL;
	    }
	}
    }
    
    nprocs_send = 0;
    for (i=0; i <fh->f_size; i++) if (send_size[i]) nprocs_send++;

    #if DEBUG_ON
    printf("%d : nprocs_send : %d\n", fh->f_rank,nprocs_send);
    #endif

    requests = (MPI_Request *) 	
	malloc((nprocs_send+nprocs_recv+1)*sizeof(MPI_Request)); 

    if ( NULL == requests ){
      return OMPI_ERR_OUT_OF_RESOURCE;
    }
    
    j = 0;
    for (i=0; i<fh->f_size; i++) {
	if (recv_size[i]) {
	  ret = MCA_PML_CALL(irecv(MPI_BOTTOM,
				   1,
				   recv_types[j],
				   i,
				   fh->f_rank+i+100*iter,
				   fh->f_comm,
				   requests+j));

	  if ( OMPI_SUCCESS != ret ){
	    return ret;
	  }
	  j++;
	}
    }
    send_req = requests + nprocs_recv;
    
    
    if (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY) {
	j = 0;
	for (i=0; i <fh->f_size; i++) 
	  if (send_size[i]) {
	    ret = MCA_PML_CALL(isend(((char *) buf) + buf_idx[i],
				     send_size[i],
				     MPI_BYTE,
				     i,
				     fh->f_rank+i+100*iter,
				     MCA_PML_BASE_SEND_STANDARD, 
				     fh->f_comm,
				     send_req+j));	

	    if ( OMPI_SUCCESS != ret ){
	      return ret;
	    }
	    
	    j++;
	    buf_idx[i] += send_size[i];
	  }
    }
    else if(nprocs_send && (!(fh->f_flags & OMPIO_CONTIGUOUS_MEMORY))){
      send_buf = (char **) malloc(fh->f_size*sizeof(char*));
      if ( NULL == send_buf ){
	return OMPI_ERR_OUT_OF_RESOURCE;
      }
      for (i=0; i < fh->f_size; i++){
	if (send_size[i]) {
	  send_buf[i] = (char *) malloc(send_size[i]);
	  
	  if ( NULL == send_buf[i] ){
	    return OMPI_ERR_OUT_OF_RESOURCE;
	  }
	}
      }
      
      ret = two_phase_fill_send_buffer(fh, buf,flat_buf, send_buf,
				       offset_length, send_size,
				       send_req,sent_to_proc,
				       contig_access_count, 
				       min_st_offset, fd_size,
				       fd_start, fd_end, send_buf_idx,
				       curr_to_proc, done_to_proc,
				       iter, buftype_extent, striping_unit,
				       aggregator_list);
      
      if ( OMPI_SUCCESS != ret ){
	return ret;
      }
    }

    for (i=0; i<nprocs_recv; i++) MPI_Type_free(recv_types+i);
    free(recv_types);
    ret = ompi_request_wait_all (nprocs_send+nprocs_recv,
				 requests,
				 MPI_STATUS_IGNORE);
    

    if ( NULL != requests ){
      free(requests);
    }
    
    return ret;
}
예제 #12
0
/*
 *	gather_intra_linear_sync
 *
 *	Function:	- synchronized gather operation with
 *	Accepts:	- same arguments as MPI_Gather(), first segment size
 *	Returns:	- MPI_SUCCESS or error code
 */
int
ompi_coll_base_gather_intra_linear_sync(const void *sbuf, int scount,
                                         struct ompi_datatype_t *sdtype,
                                         void *rbuf, int rcount,
                                         struct ompi_datatype_t *rdtype,
                                         int root,
                                         struct ompi_communicator_t *comm,
                                         mca_coll_base_module_t *module,
                                         int first_segment_size)
{
    int i, ret, line, rank, size, first_segment_count;
    ompi_request_t **reqs = NULL;
    MPI_Aint extent, lb;
    size_t typelng;

    size = ompi_comm_size(comm);
    rank = ompi_comm_rank(comm);

    OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
                 "ompi_coll_base_gather_intra_linear_sync rank %d, segment %d", rank, first_segment_size));

    if (rank != root) {
        /* Non-root processes:
           - receive zero byte message from the root,
           - send the first segment of the data synchronously,
           - send the second segment of the data.
        */

        ompi_datatype_type_size(sdtype, &typelng);
        ompi_datatype_get_extent(sdtype, &lb, &extent);
        first_segment_count = scount;
        COLL_BASE_COMPUTED_SEGCOUNT( (size_t) first_segment_size, typelng,
                                      first_segment_count );

        ret = MCA_PML_CALL(recv(rbuf, 0, MPI_BYTE, root,
                                MCA_COLL_BASE_TAG_GATHER,
                                comm, MPI_STATUS_IGNORE));
        if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }

        ret = MCA_PML_CALL(send(sbuf, first_segment_count, sdtype, root,
                                MCA_COLL_BASE_TAG_GATHER,
                                MCA_PML_BASE_SEND_STANDARD, comm));
        if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }

        ret = MCA_PML_CALL(send((char*)sbuf + extent * first_segment_count,
                                (scount - first_segment_count), sdtype,
                                root, MCA_COLL_BASE_TAG_GATHER,
                                MCA_PML_BASE_SEND_STANDARD, comm));
        if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }

    } else {

        /* Root process,
           - For every non-root node:
           - post irecv for the first segment of the message
           - send zero byte message to signal node to send the message
           - post irecv for the second segment of the message
           - wait for the first segment to complete
           - Copy local data if necessary
           - Waitall for all the second segments to complete.
        */
        char *ptmp;
        ompi_request_t *first_segment_req;
        reqs = coll_base_comm_get_reqs(module->base_data, size);
        if (NULL == reqs) { ret = -1; line = __LINE__; goto error_hndl; }

        ompi_datatype_type_size(rdtype, &typelng);
        ompi_datatype_get_extent(rdtype, &lb, &extent);
        first_segment_count = rcount;
        COLL_BASE_COMPUTED_SEGCOUNT( (size_t)first_segment_size, typelng,
                                      first_segment_count );

        ptmp = (char *) rbuf;
        for (i = 0; i < size; ++i) {
            if (i == rank) {
                /* skip myself */
                reqs[i] = MPI_REQUEST_NULL;
                continue;
            }

            /* irecv for the first segment from i */
            ptmp = (char*)rbuf + (ptrdiff_t)i * (ptrdiff_t)rcount * extent;
            ret = MCA_PML_CALL(irecv(ptmp, first_segment_count, rdtype, i,
                                     MCA_COLL_BASE_TAG_GATHER, comm,
                                     &first_segment_req));
            if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }

            /* send sync message */
            ret = MCA_PML_CALL(send(rbuf, 0, MPI_BYTE, i,
                                    MCA_COLL_BASE_TAG_GATHER,
                                    MCA_PML_BASE_SEND_STANDARD, comm));
            if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }

            /* irecv for the second segment */
            ptmp = (char*)rbuf + ((ptrdiff_t)i * (ptrdiff_t)rcount + first_segment_count) * extent;
            ret = MCA_PML_CALL(irecv(ptmp, (rcount - first_segment_count),
                                     rdtype, i, MCA_COLL_BASE_TAG_GATHER, comm,
                                     &reqs[i]));
            if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }

            /* wait on the first segment to complete */
            ret = ompi_request_wait(&first_segment_req, MPI_STATUS_IGNORE);
            if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
        }

        /* copy local data if necessary */
        if (MPI_IN_PLACE != sbuf) {
            ret = ompi_datatype_sndrcv((void *)sbuf, scount, sdtype,
                                       (char*)rbuf + (ptrdiff_t)rank * (ptrdiff_t)rcount * extent,
                                       rcount, rdtype);
            if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
        }

        /* wait all second segments to complete */
        ret = ompi_request_wait_all(size, reqs, MPI_STATUSES_IGNORE);
        if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
    }

    /* All done */
    return MPI_SUCCESS;
 error_hndl:
    if (NULL != reqs) {
        ompi_coll_base_free_reqs(reqs, size);
    }
    OPAL_OUTPUT (( ompi_coll_base_framework.framework_output,
                   "ERROR_HNDL: node %d file %s line %d error %d\n",
                   rank, __FILE__, line, ret ));
    (void)line;  // silence compiler warning
    return ret;
}
예제 #13
0
static int
mca_coll_basic_neighbor_alltoallv_cart(const void *sbuf, const int scounts[], const int sdisps[],
                                       struct ompi_datatype_t *sdtype, void *rbuf, const int rcounts[],
                                       const int rdisps[], struct ompi_datatype_t *rdtype,
                                       struct ompi_communicator_t *comm, mca_coll_base_module_t *module)
{
    mca_coll_basic_module_t *basic_module = (mca_coll_basic_module_t *) module;
    const mca_topo_base_comm_cart_2_1_0_t *cart = comm->c_topo->mtc.cart;
    const int rank = ompi_comm_rank (comm);
    int rc = MPI_SUCCESS, dim, i, nreqs;
    ptrdiff_t lb, rdextent, sdextent;
    ompi_request_t **reqs;

    ompi_datatype_get_extent(rdtype, &lb, &rdextent);
    ompi_datatype_get_extent(sdtype, &lb, &sdextent);

    /* post receives first */
    for (dim = 0, nreqs = 0, i = 0, reqs = basic_module->mccb_reqs ; dim < cart->ndims ; ++dim, i += 2) {
        int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;

        if (cart->dims[dim] > 1) {
            mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
        } else if (1 == cart->dims[dim] && cart->periods[dim]) {
            srank = drank = rank;
        }

        if (MPI_PROC_NULL != srank) {
            rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i] * rdextent, rcounts[i], rdtype, srank,
                                    MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
            if (OMPI_SUCCESS != rc) break;
            nreqs++;
        }

        if (MPI_PROC_NULL != drank) {
            rc = MCA_PML_CALL(irecv((char *) rbuf + rdisps[i+1] * rdextent, rcounts[i+1], rdtype, drank,
                                    MCA_COLL_BASE_TAG_ALLTOALL, comm, reqs++));
            if (OMPI_SUCCESS != rc) break;
            nreqs++;
        }
    }

    if (OMPI_SUCCESS != rc) {
        /* should probably try to clean up here */
        return rc;
    }

    for (dim = 0, i = 0 ; dim < cart->ndims ; ++dim, i += 2) {
        int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;

        if (cart->dims[dim] > 1) {
            mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
        } else if (1 == cart->dims[dim] && cart->periods[dim]) {
            srank = drank = rank;
        }

        if (MPI_PROC_NULL != srank) {
            /* remove cast from const when the pml layer is updated to take a const for the send buffer */
            rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i] * sdextent, scounts[i], sdtype, srank,
                                    MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
            if (OMPI_SUCCESS != rc) break;
            nreqs++;
        }

        if (MPI_PROC_NULL != drank) {
            rc = MCA_PML_CALL(isend((char *) sbuf + sdisps[i+1] * sdextent, scounts[i+1], sdtype, drank,
                                    MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD, comm, reqs++));
            if (OMPI_SUCCESS != rc) break;
            nreqs++;
        }
    }

    if (OMPI_SUCCESS != rc) {
        /* should probably try to clean up here */
        return rc;
    }

    return ompi_request_wait_all (nreqs, basic_module->mccb_reqs, MPI_STATUSES_IGNORE);
}
예제 #14
0
/**
 * This is a generic implementation of the reduce protocol. It used the tree
 * provided as an argument and execute all operations using a segment of
 * count times a datatype.
 * For the last communication it will update the count in order to limit
 * the number of datatype to the original count (original_count)
 *
 * Note that for non-commutative operations we cannot save memory copy
 * for the first block: thus we must copy sendbuf to accumbuf on intermediate
 * to keep the optimized loop happy.
 */
int ompi_coll_base_reduce_generic( const void* sendbuf, void* recvbuf, int original_count,
                                    ompi_datatype_t* datatype, ompi_op_t* op,
                                    int root, ompi_communicator_t* comm,
                                    mca_coll_base_module_t *module,
                                    ompi_coll_tree_t* tree, int count_by_segment,
                                    int max_outstanding_reqs )
{
    char *inbuf[2] = {NULL, NULL}, *inbuf_free[2] = {NULL, NULL};
    char *accumbuf = NULL, *accumbuf_free = NULL;
    char *local_op_buffer = NULL, *sendtmpbuf = NULL;
    ptrdiff_t extent, size, gap, segment_increment;
    ompi_request_t **sreq = NULL, *reqs[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL};
    int num_segments, line, ret, segindex, i, rank;
    int recvcount, prevcount, inbi;

    /**
     * Determine number of segments and number of elements
     * sent per operation
     */
    ompi_datatype_type_extent( datatype, &extent );
    num_segments = (int)(((size_t)original_count + (size_t)count_by_segment - (size_t)1) / (size_t)count_by_segment);
    segment_increment = (ptrdiff_t)count_by_segment * extent;

    sendtmpbuf = (char*) sendbuf;
    if( sendbuf == MPI_IN_PLACE ) {
        sendtmpbuf = (char *)recvbuf;
    }

    OPAL_OUTPUT((ompi_coll_base_framework.framework_output, "coll:base:reduce_generic count %d, msg size %ld, segsize %ld, max_requests %d",
                 original_count, (unsigned long)((ptrdiff_t)num_segments * (ptrdiff_t)segment_increment),
                 (unsigned long)segment_increment, max_outstanding_reqs));

    rank = ompi_comm_rank(comm);

    /* non-leaf nodes - wait for children to send me data & forward up
       (if needed) */
    if( tree->tree_nextsize > 0 ) {
        ptrdiff_t real_segment_size;

        /* handle non existant recv buffer (i.e. its NULL) and
           protect the recv buffer on non-root nodes */
        accumbuf = (char*)recvbuf;
        if( (NULL == accumbuf) || (root != rank) ) {
            /* Allocate temporary accumulator buffer. */
            size = opal_datatype_span(&datatype->super, original_count, &gap);
            accumbuf_free = (char*)malloc(size);
            if (accumbuf_free == NULL) {
                line = __LINE__; ret = -1; goto error_hndl;
            }
            accumbuf = accumbuf_free - gap;
        }

        /* If this is a non-commutative operation we must copy
           sendbuf to the accumbuf, in order to simplfy the loops */
        if (!ompi_op_is_commute(op)) {
            ompi_datatype_copy_content_same_ddt(datatype, original_count,
                                                (char*)accumbuf,
                                                (char*)sendtmpbuf);
        }
        /* Allocate two buffers for incoming segments */
        real_segment_size = opal_datatype_span(&datatype->super, count_by_segment, &gap);
        inbuf_free[0] = (char*) malloc(real_segment_size);
        if( inbuf_free[0] == NULL ) {
            line = __LINE__; ret = -1; goto error_hndl;
        }
        inbuf[0] = inbuf_free[0] - gap;
        /* if there is chance to overlap communication -
           allocate second buffer */
        if( (num_segments > 1) || (tree->tree_nextsize > 1) ) {
            inbuf_free[1] = (char*) malloc(real_segment_size);
            if( inbuf_free[1] == NULL ) {
                line = __LINE__; ret = -1; goto error_hndl;
            }
            inbuf[1] = inbuf_free[1] - gap;
        }

        /* reset input buffer index and receive count */
        inbi = 0;
        recvcount = 0;
        /* for each segment */
        for( segindex = 0; segindex <= num_segments; segindex++ ) {
            prevcount = recvcount;
            /* recvcount - number of elements in current segment */
            recvcount = count_by_segment;
            if( segindex == (num_segments-1) )
                recvcount = original_count - (ptrdiff_t)count_by_segment * (ptrdiff_t)segindex;

            /* for each child */
            for( i = 0; i < tree->tree_nextsize; i++ ) {
                /**
                 * We try to overlap communication:
                 * either with next segment or with the next child
                 */
                /* post irecv for current segindex on current child */
                if( segindex < num_segments ) {
                    void* local_recvbuf = inbuf[inbi];
                    if( 0 == i ) {
                        /* for the first step (1st child per segment) and
                         * commutative operations we might be able to irecv
                         * directly into the accumulate buffer so that we can
                         * reduce(op) this with our sendbuf in one step as
                         * ompi_op_reduce only has two buffer pointers,
                         * this avoids an extra memory copy.
                         *
                         * BUT if the operation is non-commutative or
                         * we are root and are USING MPI_IN_PLACE this is wrong!
                         */
                        if( (ompi_op_is_commute(op)) &&
                            !((MPI_IN_PLACE == sendbuf) && (rank == tree->tree_root)) ) {
                            local_recvbuf = accumbuf + (ptrdiff_t)segindex * (ptrdiff_t)segment_increment;
                        }
                    }

                    ret = MCA_PML_CALL(irecv(local_recvbuf, recvcount, datatype,
                                             tree->tree_next[i],
                                             MCA_COLL_BASE_TAG_REDUCE, comm,
                                             &reqs[inbi]));
                    if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl;}
                }
                /* wait for previous req to complete, if any.
                   if there are no requests reqs[inbi ^1] will be
                   MPI_REQUEST_NULL. */
                /* wait on data from last child for previous segment */
                ret = ompi_request_wait_all( 1, &reqs[inbi ^ 1],
                                             MPI_STATUSES_IGNORE );
                if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl;  }
                local_op_buffer = inbuf[inbi ^ 1];
                if( i > 0 ) {
                    /* our first operation is to combine our own [sendbuf] data
                     * with the data we recvd from down stream (but only
                     * the operation is commutative and if we are not root and
                     * not using MPI_IN_PLACE)
                     */
                    if( 1 == i ) {
                        if( (ompi_op_is_commute(op)) &&
                            !((MPI_IN_PLACE == sendbuf) && (rank == tree->tree_root)) ) {
                            local_op_buffer = sendtmpbuf + (ptrdiff_t)segindex * (ptrdiff_t)segment_increment;
                        }
                    }
                    /* apply operation */
                    ompi_op_reduce(op, local_op_buffer,
                                   accumbuf + (ptrdiff_t)segindex * (ptrdiff_t)segment_increment,
                                   recvcount, datatype );
                } else if ( segindex > 0 ) {
                    void* accumulator = accumbuf + (ptrdiff_t)(segindex-1) * (ptrdiff_t)segment_increment;
                    if( tree->tree_nextsize <= 1 ) {
                        if( (ompi_op_is_commute(op)) &&
                            !((MPI_IN_PLACE == sendbuf) && (rank == tree->tree_root)) ) {
                            local_op_buffer = sendtmpbuf + (ptrdiff_t)(segindex-1) * (ptrdiff_t)segment_increment;
                        }
                    }
                    ompi_op_reduce(op, local_op_buffer, accumulator, prevcount,
                                   datatype );

                    /* all reduced on available data this step (i) complete,
                     * pass to the next process unless you are the root.
                     */
                    if (rank != tree->tree_root) {
                        /* send combined/accumulated data to parent */
                        ret = MCA_PML_CALL( send( accumulator, prevcount,
                                                  datatype, tree->tree_prev,
                                                  MCA_COLL_BASE_TAG_REDUCE,
                                                  MCA_PML_BASE_SEND_STANDARD,
                                                  comm) );
                        if (ret != MPI_SUCCESS) {
                            line = __LINE__; goto error_hndl;
                        }
                    }

                    /* we stop when segindex = number of segments
                       (i.e. we do num_segment+1 steps for pipelining */
                    if (segindex == num_segments) break;
                }

                /* update input buffer index */
                inbi = inbi ^ 1;
            } /* end of for each child */
        } /* end of for each segment */

        /* clean up */
        if( inbuf_free[0] != NULL) free(inbuf_free[0]);
        if( inbuf_free[1] != NULL) free(inbuf_free[1]);
        if( accumbuf_free != NULL ) free(accumbuf_free);
    }

    /* leaf nodes
       Depending on the value of max_outstanding_reqs and
       the number of segments we have two options:
       - send all segments using blocking send to the parent, or
       - avoid overflooding the parent nodes by limiting the number of
       outstanding requests to max_oustanding_reqs.
       TODO/POSSIBLE IMPROVEMENT: If there is a way to determine the eager size
       for the current communication, synchronization should be used only
       when the message/segment size is smaller than the eager size.
    */
    else {

        /* If the number of segments is less than a maximum number of oustanding
           requests or there is no limit on the maximum number of outstanding
           requests, we send data to the parent using blocking send */
        if ((0 == max_outstanding_reqs) ||
            (num_segments <= max_outstanding_reqs)) {

            segindex = 0;
            while ( original_count > 0) {
                if (original_count < count_by_segment) {
                    count_by_segment = original_count;
                }
                ret = MCA_PML_CALL( send((char*)sendbuf +
                                         (ptrdiff_t)segindex * (ptrdiff_t)segment_increment,
                                         count_by_segment, datatype,
                                         tree->tree_prev,
                                         MCA_COLL_BASE_TAG_REDUCE,
                                         MCA_PML_BASE_SEND_STANDARD,
                                         comm) );
                if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
                segindex++;
                original_count -= count_by_segment;
            }
        }

        /* Otherwise, introduce flow control:
           - post max_outstanding_reqs non-blocking synchronous send,
           - for remaining segments
           - wait for a ssend to complete, and post the next one.
           - wait for all outstanding sends to complete.
        */
        else {

            int creq = 0;

            sreq = coll_base_comm_get_reqs(module->base_data, max_outstanding_reqs);
            if (NULL == sreq) { line = __LINE__; ret = -1; goto error_hndl; }

            /* post first group of requests */
            for (segindex = 0; segindex < max_outstanding_reqs; segindex++) {
                ret = MCA_PML_CALL( isend((char*)sendbuf +
                                          (ptrdiff_t)segindex * (ptrdiff_t)segment_increment,
                                          count_by_segment, datatype,
                                          tree->tree_prev,
                                          MCA_COLL_BASE_TAG_REDUCE,
                                          MCA_PML_BASE_SEND_SYNCHRONOUS, comm,
                                          &sreq[segindex]) );
                if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl;  }
                original_count -= count_by_segment;
            }

            creq = 0;
            while ( original_count > 0 ) {
                /* wait on a posted request to complete */
                ret = ompi_request_wait(&sreq[creq], MPI_STATUS_IGNORE);
                if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl;  }

                if( original_count < count_by_segment ) {
                    count_by_segment = original_count;
                }
                ret = MCA_PML_CALL( isend((char*)sendbuf +
                                          (ptrdiff_t)segindex * (ptrdiff_t)segment_increment,
                                          count_by_segment, datatype,
                                          tree->tree_prev,
                                          MCA_COLL_BASE_TAG_REDUCE,
                                          MCA_PML_BASE_SEND_SYNCHRONOUS, comm,
                                          &sreq[creq]) );
                if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl;  }
                creq = (creq + 1) % max_outstanding_reqs;
                segindex++;
                original_count -= count_by_segment;
            }

            /* Wait on the remaining request to complete */
            ret = ompi_request_wait_all( max_outstanding_reqs, sreq,
                                         MPI_STATUSES_IGNORE );
            if (ret != MPI_SUCCESS) { line = __LINE__; goto error_hndl;  }
        }
    }
    return OMPI_SUCCESS;

 error_hndl:  /* error handler */
    OPAL_OUTPUT (( ompi_coll_base_framework.framework_output,
                   "ERROR_HNDL: node %d file %s line %d error %d\n",
                   rank, __FILE__, line, ret ));
    (void)line;  // silence compiler warning
    if( inbuf_free[0] != NULL ) free(inbuf_free[0]);
    if( inbuf_free[1] != NULL ) free(inbuf_free[1]);
    if( accumbuf_free != NULL ) free(accumbuf);
    if( NULL != sreq ) {
        ompi_coll_base_free_reqs(sreq, max_outstanding_reqs);
    }
    return ret;
}
예제 #15
0
/* MPI_IN_PLACE all to all algorithm. TODO: implement a better one. */
int
mca_coll_base_alltoall_intra_basic_inplace(const void *rbuf, int rcount,
                                           struct ompi_datatype_t *rdtype,
                                           struct ompi_communicator_t *comm,
                                           mca_coll_base_module_t *module)
{
    mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
    int i, j, size, rank, err = MPI_SUCCESS, line;
    MPI_Request *preq;
    char *tmp_buffer;
    size_t max_size;
    ptrdiff_t ext;

    /* Initialize. */

    size = ompi_comm_size(comm);
    rank = ompi_comm_rank(comm);

    /* If only one process, we're done. */
    if (1 == size) {
        return MPI_SUCCESS;
    }

    /* Find the largest receive amount */
    ompi_datatype_type_extent (rdtype, &ext);
    max_size = ext * rcount;

    /* Allocate a temporary buffer */
    tmp_buffer = calloc (max_size, 1);
    if (NULL == tmp_buffer) {
      return OMPI_ERR_OUT_OF_RESOURCE;
    }

    /* in-place alltoall slow algorithm (but works) */
    for (i = 0 ; i < size ; ++i) {
        for (j = i+1 ; j < size ; ++j) {
            /* Initiate all send/recv to/from others. */
            preq = coll_base_comm_get_reqs(base_module->base_data, size * 2);

            if (i == rank) {
                /* Copy the data into the temporary buffer */
                err = ompi_datatype_copy_content_same_ddt (rdtype, rcount, tmp_buffer,
                                                       (char *) rbuf + j * max_size);
                if (MPI_SUCCESS != err) { line = __LINE__; goto error_hndl; }

                /* Exchange data with the peer */
                err = MCA_PML_CALL(irecv ((char *) rbuf + max_size * j, rcount, rdtype,
                                          j, MCA_COLL_BASE_TAG_ALLTOALL, comm, preq++));
                if (MPI_SUCCESS != err) { line = __LINE__; goto error_hndl; }

                err = MCA_PML_CALL(isend ((char *) tmp_buffer,  rcount, rdtype,
                                          j, MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
                                          comm, preq++));
                if (MPI_SUCCESS != err) { line = __LINE__; goto error_hndl; }
            } else if (j == rank) {
                /* Copy the data into the temporary buffer */
                err = ompi_datatype_copy_content_same_ddt (rdtype, rcount, tmp_buffer,
                                                       (char *) rbuf + i * max_size);
                if (MPI_SUCCESS != err) { line = __LINE__; goto error_hndl; }

                /* Exchange data with the peer */
                err = MCA_PML_CALL(irecv ((char *) rbuf + max_size * i, rcount, rdtype,
                                          i, MCA_COLL_BASE_TAG_ALLTOALL, comm, preq++));
                if (MPI_SUCCESS != err) { line = __LINE__; goto error_hndl; }

                err = MCA_PML_CALL(isend ((char *) tmp_buffer,  rcount, rdtype,
                                          i, MCA_COLL_BASE_TAG_ALLTOALL, MCA_PML_BASE_SEND_STANDARD,
                                          comm, preq++));
                if (MPI_SUCCESS != err) { line = __LINE__; goto error_hndl; }
            } else {
                continue;
            }

            /* Wait for the requests to complete */
            err = ompi_request_wait_all (2, base_module->base_data->mcct_reqs, MPI_STATUSES_IGNORE);
            if (MPI_SUCCESS != err) { line = __LINE__; goto error_hndl; }
        }
    }

 error_hndl:
    /* Free the temporary buffer */
    free (tmp_buffer);

    if( MPI_SUCCESS != err ) {
        OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
                     "%s:%4d\tError occurred %d, rank %2d", __FILE__, line, err,
                     rank));
        ompi_coll_base_free_reqs(base_module->base_data->mcct_reqs, 2);
    }

    /* All done */
    return err;
}
예제 #16
0
 int
 mca_fcoll_dynamic_file_read_all (mca_io_ompio_file_t *fh,
				  void *buf,
				  int count,
				  struct ompi_datatype_t *datatype,
				  ompi_status_public_t *status)
 {
     MPI_Aint position = 0;
     MPI_Aint total_bytes = 0;          /* total bytes to be read */
     MPI_Aint bytes_to_read_in_cycle = 0; /* left to be read in a cycle*/
     MPI_Aint bytes_per_cycle = 0;      /* total read in each cycle by each process*/
     int index = 0, ret=OMPI_SUCCESS;
     int cycles = 0;
     int i=0, j=0, l=0;
     int n=0; /* current position in total_bytes_per_process array */
     MPI_Aint bytes_remaining = 0; /* how many bytes have been read from the current
					 value from total_bytes_per_process */
     int *sorted_file_offsets=NULL, entries_per_aggregator=0;
     int bytes_received = 0;
     int blocks = 0;
     /* iovec structure and count of the buffer passed in */
     uint32_t iov_count = 0;
     struct iovec *decoded_iov = NULL;
     int iov_index = 0;
     size_t current_position = 0;
     struct iovec *local_iov_array=NULL, *global_iov_array=NULL;
     char *receive_buf = NULL;
     MPI_Aint *memory_displacements=NULL;
     /* global iovec at the readers that contain the iovecs created from
	file_set_view */
     uint32_t total_fview_count = 0;
     int local_count = 0;
     int *fview_count = NULL, *disp_index=NULL, *temp_disp_index=NULL;
     int current_index=0, temp_index=0;
     int **blocklen_per_process=NULL;
     MPI_Aint **displs_per_process=NULL;
     char *global_buf = NULL;
     MPI_Aint global_count = 0;
     local_io_array *file_offsets_for_agg=NULL;

     /* array that contains the sorted indices of the global_iov */
     int *sorted = NULL;
     int *displs = NULL;
     int dynamic_num_io_procs;
     size_t max_data = 0;
     int *bytes_per_process = NULL;
     MPI_Aint *total_bytes_per_process = NULL;
     ompi_datatype_t **sendtype = NULL;
     MPI_Request *send_req=NULL, *recv_req=NULL;


 #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
     double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0;
     double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0;
     double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0;
     mca_io_ompio_print_entry nentry;
 #endif


//     if (opal_datatype_is_contiguous_memory_layout(&datatype->super,1)) {
//	 fh->f_flags |= OMPIO_CONTIGUOUS_MEMORY;
//     }
     /**************************************************************************
      ** In case the data is not contigous in memory, decode it into an iovec **
      **************************************************************************/
     if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) {
	 ret = fh->f_decode_datatype ((struct mca_io_ompio_file_t *)fh,
				    datatype,
				    count,
				    buf,
				    &max_data,
				    &decoded_iov,
				    &iov_count);
       if (OMPI_SUCCESS != ret){
	 goto exit;
       }
     }
     else {
	 max_data = count * datatype->super.size;
     }

     if ( MPI_STATUS_IGNORE != status ) {
	 status->_ucount = max_data;
     }

     fh->f_get_num_aggregators ( &dynamic_num_io_procs);
     ret = fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh,
				       dynamic_num_io_procs,
				       max_data);
     if (OMPI_SUCCESS != ret){
	 goto exit;
     }

     total_bytes_per_process = (MPI_Aint*)malloc
	 (fh->f_procs_per_group*sizeof(MPI_Aint));
     if (NULL == total_bytes_per_process) {
	 opal_output (1, "OUT OF MEMORY\n");
	 ret = OMPI_ERR_OUT_OF_RESOURCE;
	 goto exit;
     }

     ret = fh->f_allgather_array (&max_data,
				  1,
				  MPI_LONG,
				  total_bytes_per_process,
				  1,
				  MPI_LONG,
				  fh->f_aggregator_index,
				  fh->f_procs_in_group,
				  fh->f_procs_per_group,
				  fh->f_comm);
     if (OMPI_SUCCESS != ret){
       goto exit;
     }

     for (i=0 ; i<fh->f_procs_per_group ; i++) {
	 total_bytes += total_bytes_per_process[i];
     }

     if (NULL != total_bytes_per_process) {
	 free (total_bytes_per_process);
	 total_bytes_per_process = NULL;
     }

     /*********************************************************************
      *** Generate the File offsets/lengths corresponding to this write ***
      ********************************************************************/
     ret = fh->f_generate_current_file_view ((struct mca_io_ompio_file_t *) fh,
					     max_data,
					     &local_iov_array,
					     &local_count);

     if (ret != OMPI_SUCCESS){
	 goto exit;
     }



     /* #########################################################*/

     /*************************************************************
      *** ALLGather the File View information at all processes ***
      *************************************************************/

     fview_count = (int *) malloc (fh->f_procs_per_group * sizeof (int));
     if (NULL == fview_count) {
	 opal_output (1, "OUT OF MEMORY\n");
	 ret = OMPI_ERR_OUT_OF_RESOURCE;
	 goto exit;
     }

     ret = fh->f_allgather_array (&local_count,
				  1,
				  MPI_INT,
				  fview_count,
				  1,
				  MPI_INT,
				  fh->f_aggregator_index,
				  fh->f_procs_in_group,
				  fh->f_procs_per_group,
				  fh->f_comm);

     if (OMPI_SUCCESS != ret){
	 goto exit;
     }

     displs = (int*)malloc (fh->f_procs_per_group*sizeof(int));
     if (NULL == displs) {
	 opal_output (1, "OUT OF MEMORY\n");
	 ret = OMPI_ERR_OUT_OF_RESOURCE;
	 goto exit;
     }

     displs[0] = 0;
     total_fview_count = fview_count[0];
     for (i=1 ; i<fh->f_procs_per_group ; i++) {
	 total_fview_count += fview_count[i];
	 displs[i] = displs[i-1] + fview_count[i-1];
     }

 #if DEBUG_ON
     if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
	 for (i=0 ; i<fh->f_procs_per_group ; i++) {
	     printf ("%d: PROCESS: %d  ELEMENTS: %d  DISPLS: %d\n",
		     fh->f_rank,
		     i,
		     fview_count[i],
		     displs[i]);
	 }
     }
 #endif

     /* allocate the global iovec  */
     if (0 != total_fview_count) {
       global_iov_array = (struct iovec*)malloc (total_fview_count *
						 sizeof(struct iovec));
       if (NULL == global_iov_array) {
	 opal_output (1, "OUT OF MEMORY\n");
	 ret = OMPI_ERR_OUT_OF_RESOURCE;
	 goto exit;
       }
     }

     ret =  fh->f_allgatherv_array (local_iov_array,
				    local_count,
				    fh->f_iov_type,
				    global_iov_array,
				    fview_count,
				    displs,
				    fh->f_iov_type,
				    fh->f_aggregator_index,
				    fh->f_procs_in_group,
				    fh->f_procs_per_group,
				    fh->f_comm);

     if (OMPI_SUCCESS != ret){
       goto exit;
     }

     /* sort it */
     if (0 != total_fview_count) {
	 sorted = (int *)malloc (total_fview_count * sizeof(int));
	 if (NULL == sorted) {
	     opal_output (1, "OUT OF MEMORY\n");
	     ret = OMPI_ERR_OUT_OF_RESOURCE;
	     goto exit;
	 }
	 fh->f_sort_iovec (global_iov_array, total_fview_count, sorted);
     }

     if (NULL != local_iov_array) {
	 free (local_iov_array);
	 local_iov_array = NULL;
     }

 #if DEBUG_ON
     if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
	 for (i=0 ; i<total_fview_count ; i++) {
	     printf("%d: OFFSET: %p   LENGTH: %d\n",
		    fh->f_rank,
		    global_iov_array[sorted[i]].iov_base,
		    global_iov_array[sorted[i]].iov_len);
	 }
     }
 #endif

     if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {

       disp_index = (int *)malloc (fh->f_procs_per_group * sizeof (int));
       if (NULL == disp_index) {
	 opal_output (1, "OUT OF MEMORY\n");
	 ret = OMPI_ERR_OUT_OF_RESOURCE;
	 goto exit;
       }

       blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*));
       if (NULL == blocklen_per_process) {
	 opal_output (1, "OUT OF MEMORY\n");
	 ret = OMPI_ERR_OUT_OF_RESOURCE;
	 goto exit;
       }

       displs_per_process = (MPI_Aint **)malloc (fh->f_procs_per_group * sizeof (MPI_Aint*));
       if (NULL == displs_per_process){
	 opal_output (1, "OUT OF MEMORY\n");
	 ret = OMPI_ERR_OUT_OF_RESOURCE;
	 goto exit;
       }

       for (i=0;i<fh->f_procs_per_group;i++){
	 blocklen_per_process[i] = NULL;
	 displs_per_process[i] = NULL;
       }
     }


     /*
      * Calculate how many bytes are read in each cycle
      */
     fh->f_get_bytes_per_agg ( (int *) &bytes_per_cycle);
     cycles = ceil((double)total_bytes/bytes_per_cycle);

     n = 0;
     bytes_remaining = 0;
     current_index = 0;


 #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
     start_rexch = MPI_Wtime();
 #endif
     for (index = 0; index < cycles; index++) {
       /* Getting ready for next cycle
	  Initializing and freeing buffers */
       if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
	 if (NULL == sendtype){
	   sendtype = (ompi_datatype_t **)
	     malloc (fh->f_procs_per_group  * sizeof(ompi_datatype_t *));
	   if (NULL == sendtype) {
	     opal_output (1, "OUT OF MEMORY\n");
	     ret = OMPI_ERR_OUT_OF_RESOURCE;
	     goto exit;
	   }
	 }

	 for(l=0;l<fh->f_procs_per_group;l++){

	   disp_index[l] =  1;

	   if (NULL != blocklen_per_process[l]){
	     free(blocklen_per_process[l]);
	     blocklen_per_process[l] = NULL;
	   }
	   if (NULL != displs_per_process[l]){
	     free(displs_per_process[l]);
	     displs_per_process[l] = NULL;
	   }
	   blocklen_per_process[l] = (int *) calloc (1, sizeof(int));
	   if (NULL == blocklen_per_process[l]) {
	     opal_output (1, "OUT OF MEMORY for blocklen\n");
	     ret = OMPI_ERR_OUT_OF_RESOURCE;
	     goto exit;
	   }
	   displs_per_process[l] = (MPI_Aint *) calloc (1, sizeof(MPI_Aint));
	   if (NULL == displs_per_process[l]){
	     opal_output (1, "OUT OF MEMORY for displs\n");
	     ret = OMPI_ERR_OUT_OF_RESOURCE;
	     goto exit;
	   }
	 }

	 if (NULL != sorted_file_offsets){
	   free(sorted_file_offsets);
	   sorted_file_offsets = NULL;
	 }

	 if(NULL != file_offsets_for_agg){
	   free(file_offsets_for_agg);
	   file_offsets_for_agg = NULL;
	 }
	 if (NULL != memory_displacements){
	   free(memory_displacements);
	   memory_displacements = NULL;
	 }
       }


       if (cycles-1 == index) {
	 bytes_to_read_in_cycle = total_bytes - bytes_per_cycle*index;
       }
       else {
	 bytes_to_read_in_cycle = bytes_per_cycle;
       }

 #if DEBUG_ON
       if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
	 printf ("****%d: CYCLE %d   Bytes %d**********\n",
		 fh->f_rank,
		 index,
		 bytes_to_write_in_cycle);
       }
 #endif

       /* Calculate how much data will be contributed in this cycle
	    by each process*/
       bytes_received = 0;

       while (bytes_to_read_in_cycle) {
	 blocks = fview_count[0];
	 for (j=0 ; j<fh->f_procs_per_group ; j++) {
	   if (sorted[current_index] < blocks) {
	     n = j;
	     break;
	   }
	   else {
	     blocks += fview_count[j+1];
	   }
	 }
	 if (bytes_remaining) {
	   if (bytes_remaining <= bytes_to_read_in_cycle) {

	     if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
	       blocklen_per_process[n][disp_index[n] - 1] = bytes_remaining;
	       displs_per_process[n][disp_index[n] - 1] =
		 (OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base +
		 (global_iov_array[sorted[current_index]].iov_len - bytes_remaining);
	     }
	     if (fh->f_procs_in_group[n] == fh->f_rank) {
	       bytes_received += bytes_remaining;
	     }
	     current_index ++;
	     bytes_to_read_in_cycle -= bytes_remaining;
	     bytes_remaining = 0;
	     if (fh->f_procs_in_group[fh->f_aggregator_index] ==
		 fh->f_rank) {
	       blocklen_per_process[n] = (int *) realloc
		 ((void *)blocklen_per_process[n], (disp_index[n]+1)*sizeof(int));
	       displs_per_process[n] = (MPI_Aint *) realloc
		 ((void *)displs_per_process[n], (disp_index[n]+1)*sizeof(MPI_Aint));
	       blocklen_per_process[n][disp_index[n]] = 0;
	       displs_per_process[n][disp_index[n]] = 0;
	       disp_index[n] += 1;
	     }
	     continue;
	   }
	   else {
	     if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
	       blocklen_per_process[n][disp_index[n] - 1] = bytes_to_read_in_cycle;
	       displs_per_process[n][disp_index[n] - 1] =
		 (OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base +
		 (global_iov_array[sorted[current_index]].iov_len
		  - bytes_remaining);
	     }
	     if (fh->f_procs_in_group[n] == fh->f_rank) {
	       bytes_received += bytes_to_read_in_cycle;
	     }
	     bytes_remaining -= bytes_to_read_in_cycle;
	     bytes_to_read_in_cycle = 0;
	     break;
	   }
	 }
	 else {
	   if (bytes_to_read_in_cycle <
		   (MPI_Aint) global_iov_array[sorted[current_index]].iov_len) {
	     if (fh->f_procs_in_group[fh->f_aggregator_index] ==
		 fh->f_rank) {

	       blocklen_per_process[n][disp_index[n] - 1] = bytes_to_read_in_cycle;
	       displs_per_process[n][disp_index[n] - 1] =
		 (OPAL_PTRDIFF_TYPE)global_iov_array[sorted[current_index]].iov_base ;
	     }

	     if (fh->f_procs_in_group[n] == fh->f_rank) {
	       bytes_received += bytes_to_read_in_cycle;
	     }
	     bytes_remaining = global_iov_array[sorted[current_index]].iov_len -
	       bytes_to_read_in_cycle;
	     bytes_to_read_in_cycle = 0;
	     break;
	   }
	   else {
	     if (fh->f_procs_in_group[fh->f_aggregator_index] ==
		 fh->f_rank) {
	       blocklen_per_process[n][disp_index[n] - 1] =
		 global_iov_array[sorted[current_index]].iov_len;
	       displs_per_process[n][disp_index[n] - 1] = (OPAL_PTRDIFF_TYPE)
		 global_iov_array[sorted[current_index]].iov_base;
	       blocklen_per_process[n] =
		 (int *) realloc ((void *)blocklen_per_process[n], (disp_index[n]+1)*sizeof(int));
	       displs_per_process[n] = (MPI_Aint *)realloc
		 ((void *)displs_per_process[n], (disp_index[n]+1)*sizeof(MPI_Aint));
	       blocklen_per_process[n][disp_index[n]] = 0;
	       displs_per_process[n][disp_index[n]] = 0;
	       disp_index[n] += 1;
	     }
	     if (fh->f_procs_in_group[n] == fh->f_rank) {
	       bytes_received +=
		 global_iov_array[sorted[current_index]].iov_len;
	     }
	     bytes_to_read_in_cycle -=
	       global_iov_array[sorted[current_index]].iov_len;
	     current_index ++;
	     continue;
	   }
	 }
       }
       /* Calculate the displacement on where to put the data and allocate
	  the recieve buffer (global_buf) */
       if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
	 entries_per_aggregator=0;
	 for (i=0;i<fh->f_procs_per_group; i++){
	   for (j=0;j<disp_index[i];j++){
	     if (blocklen_per_process[i][j] > 0)
	       entries_per_aggregator++ ;
	   }
	 }
	 if (entries_per_aggregator > 0){
	   file_offsets_for_agg = (local_io_array *)
	     malloc(entries_per_aggregator*sizeof(local_io_array));
	   if (NULL == file_offsets_for_agg) {
	     opal_output (1, "OUT OF MEMORY\n");
	     ret = OMPI_ERR_OUT_OF_RESOURCE;
	     goto exit;
	   }
	   sorted_file_offsets = (int *)
	     malloc (entries_per_aggregator*sizeof(int));
	   if (NULL == sorted_file_offsets){
	     opal_output (1, "OUT OF MEMORY\n");
	     ret =  OMPI_ERR_OUT_OF_RESOURCE;
	     goto exit;
	   }
	   /*Moving file offsets to an IO array!*/
	   temp_index = 0;
	   global_count = 0;
	   for (i=0;i<fh->f_procs_per_group; i++){
	     for(j=0;j<disp_index[i];j++){
	       if (blocklen_per_process[i][j] > 0){
		   file_offsets_for_agg[temp_index].length =
		     blocklen_per_process[i][j];
		   global_count += blocklen_per_process[i][j];
		   file_offsets_for_agg[temp_index].process_id = i;
		   file_offsets_for_agg[temp_index].offset =
		     displs_per_process[i][j];
		   temp_index++;
	       }
	     }
	   }
	 }
	 else{
	   continue;
	 }

	 read_heap_sort (file_offsets_for_agg,
			 entries_per_aggregator,
			 sorted_file_offsets);

	 memory_displacements = (MPI_Aint *) malloc
	   (entries_per_aggregator * sizeof(MPI_Aint));
	 memory_displacements[sorted_file_offsets[0]] = 0;
	 for (i=1; i<entries_per_aggregator; i++){
	   memory_displacements[sorted_file_offsets[i]] =
	     memory_displacements[sorted_file_offsets[i-1]] +
	     file_offsets_for_agg[sorted_file_offsets[i-1]].length;
	 }

	 global_buf = (char *) malloc (global_count * sizeof(char));
	 if (NULL == global_buf){
	   opal_output(1, "OUT OF MEMORY\n");
	   ret = OMPI_ERR_OUT_OF_RESOURCE;
	   goto exit;
	 }

	  fh->f_io_array = (mca_io_ompio_io_array_t *) malloc
	    (entries_per_aggregator * sizeof (mca_io_ompio_io_array_t));
	  if (NULL == fh->f_io_array) {
	    opal_output(1, "OUT OF MEMORY\n");
	    ret = OMPI_ERR_OUT_OF_RESOURCE;
	    goto exit;
	  }

	 fh->f_num_of_io_entries = 0;
	 fh->f_io_array[fh->f_num_of_io_entries].offset =
	     (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset;
	 fh->f_io_array[fh->f_num_of_io_entries].length =
	   file_offsets_for_agg[sorted_file_offsets[0]].length;
	 fh->f_io_array[fh->f_num_of_io_entries].memory_address =
	   global_buf+memory_displacements[sorted_file_offsets[0]];
	 fh->f_num_of_io_entries++;
	 for (i=1;i<entries_per_aggregator;i++){
	   if (file_offsets_for_agg[sorted_file_offsets[i-1]].offset +
	       file_offsets_for_agg[sorted_file_offsets[i-1]].length ==
	       file_offsets_for_agg[sorted_file_offsets[i]].offset){
	     fh->f_io_array[fh->f_num_of_io_entries - 1].length +=
	       file_offsets_for_agg[sorted_file_offsets[i]].length;
	   }
	   else{
	     fh->f_io_array[fh->f_num_of_io_entries].offset =
		 (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[i]].offset;
	     fh->f_io_array[fh->f_num_of_io_entries].length =
	       file_offsets_for_agg[sorted_file_offsets[i]].length;
	     fh->f_io_array[fh->f_num_of_io_entries].memory_address =
	       global_buf+memory_displacements[sorted_file_offsets[i]];
	     fh->f_num_of_io_entries++;
	   }
	 }


 #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
	 start_read_time = MPI_Wtime();
 #endif

	 if (fh->f_num_of_io_entries) {
	   if ( 0 >  fh->f_fbtl->fbtl_preadv (fh)) {
	     opal_output (1, "READ FAILED\n");
	     ret = OMPI_ERROR;
	     goto exit;
	   }
	 }

 #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
	 end_read_time = MPI_Wtime();
	 read_time += end_read_time - start_read_time;
 #endif
	 /**********************************************************
	  ******************** DONE READING ************************
	  *********************************************************/

	 temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int));
	 if (NULL == temp_disp_index) {
	   opal_output (1, "OUT OF MEMORY\n");
	   ret = OMPI_ERR_OUT_OF_RESOURCE;
	   goto exit;
	 }
	 for (i=0; i<entries_per_aggregator; i++){
	   temp_index =
	     file_offsets_for_agg[sorted_file_offsets[i]].process_id;
	   displs_per_process[temp_index][temp_disp_index[temp_index]] =
	     memory_displacements[sorted_file_offsets[i]];
	   if (temp_disp_index[temp_index] < disp_index[temp_index]){
	     temp_disp_index[temp_index] += 1;
	   }
	   else{
	     printf("temp_disp_index[%d]: %d is greater than disp_index[%d]: %d\n",
		    temp_index, temp_disp_index[temp_index],
		    temp_index, disp_index[temp_index]);
	   }
	 }
	 if (NULL != temp_disp_index){
	   free(temp_disp_index);
	   temp_disp_index = NULL;
	 }

	 send_req = (MPI_Request *)
	   malloc (fh->f_procs_per_group * sizeof(MPI_Request));
	 if (NULL == send_req){
	   opal_output ( 1, "OUT OF MEMORY\n");
	   ret = OMPI_ERR_OUT_OF_RESOURCE;
	   goto exit;
	 }
 #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
	 start_rcomm_time = MPI_Wtime();
 #endif
	 for (i=0;i<fh->f_procs_per_group;i++){
	   ompi_datatype_create_hindexed(disp_index[i],
					 blocklen_per_process[i],
					 displs_per_process[i],
					 MPI_BYTE,
					 &sendtype[i]);
	   ompi_datatype_commit(&sendtype[i]);
	   ret = MCA_PML_CALL (isend(global_buf,
				     1,
				     sendtype[i],
				     fh->f_procs_in_group[i],
				     123,
				     MCA_PML_BASE_SEND_STANDARD,
				     fh->f_comm,
				     &send_req[i]));
	   if(OMPI_SUCCESS != ret){
	       goto exit;
	   }
	 }
 #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
	 end_rcomm_time = MPI_Wtime();
	 rcomm_time += end_rcomm_time - start_rcomm_time;
 #endif
       }

       /**********************************************************
	********* Scatter the Data from the readers **************
	*********************************************************/
       if (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY) {
	 receive_buf = &((char*)buf)[position];
       }
       else if (bytes_received) {
	 /* allocate a receive buffer and copy the data that needs
	    to be received into it in case the data is non-contigous
	    in memory */
	 receive_buf = malloc (bytes_received);
	 if (NULL == receive_buf) {
	   opal_output (1, "OUT OF MEMORY\n");
	   ret = OMPI_ERR_OUT_OF_RESOURCE;
	   goto exit;
	 }
       }

 #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
       start_rcomm_time = MPI_Wtime();
 #endif
       recv_req = (MPI_Request *) malloc (sizeof (MPI_Request));
       if (NULL == recv_req){
	 opal_output (1, "OUT OF MEMORY\n");
	 ret = OMPI_ERR_OUT_OF_RESOURCE;
	 goto exit;
       }

       ret = MCA_PML_CALL(irecv(receive_buf,
				bytes_received,
				MPI_BYTE,
				fh->f_procs_in_group[fh->f_aggregator_index],
				123,
				fh->f_comm,
				recv_req));
       if (OMPI_SUCCESS != ret){
	 goto exit;
       }


       if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank){
	 ret = ompi_request_wait_all (fh->f_procs_per_group,
				    send_req,
				      MPI_STATUS_IGNORE);
	 if (OMPI_SUCCESS != ret){
	   goto exit;
	 }
       }

       ret = ompi_request_wait (recv_req, MPI_STATUS_IGNORE);
       if (OMPI_SUCCESS != ret){
	 goto exit;
       }
       position += bytes_received;

       /* If data is not contigous in memory, copy the data from the
	  receive buffer into the buffer passed in */
       if (!(fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) {
	 OPAL_PTRDIFF_TYPE mem_address;
	 size_t remaining = 0;
	 size_t temp_position = 0;

	 remaining = bytes_received;

	 while (remaining) {
	   mem_address = (OPAL_PTRDIFF_TYPE)
	     (decoded_iov[iov_index].iov_base) + current_position;

	   if (remaining >=
	       (decoded_iov[iov_index].iov_len - current_position)) {
	     memcpy ((IOVBASE_TYPE *) mem_address,
		     receive_buf+temp_position,
		     decoded_iov[iov_index].iov_len - current_position);
	     remaining = remaining -
	       (decoded_iov[iov_index].iov_len - current_position);
	     temp_position = temp_position +
	       (decoded_iov[iov_index].iov_len - current_position);
	     iov_index = iov_index + 1;
	     current_position = 0;
	   }
	   else {
	     memcpy ((IOVBASE_TYPE *) mem_address,
		     receive_buf+temp_position,
		     remaining);
	     current_position = current_position + remaining;
	     remaining = 0;
	   }
	 }

	 if (NULL != receive_buf) {
	   free (receive_buf);
	 receive_buf = NULL;
	 }
       }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
       end_rcomm_time = MPI_Wtime();
       rcomm_time += end_rcomm_time - start_rcomm_time;
#endif

       if (NULL != recv_req){
	 free(recv_req);
	 recv_req = NULL;
       }
       if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank){
	 fh->f_num_of_io_entries = 0;
	 if (NULL != fh->f_io_array) {
	   free (fh->f_io_array);
	   fh->f_io_array = NULL;
	 }
	 if (NULL != global_buf) {
	   free (global_buf);
	   global_buf = NULL;
	 }
	 for (i = 0; i < fh->f_procs_per_group; i++)
	   ompi_datatype_destroy(sendtype+i);
	 if (NULL != sendtype){
	   free(sendtype);
	   sendtype=NULL;
	 }
	 if (NULL != send_req){
	   free(send_req);
	   send_req = NULL;
	 }
	 if (NULL != sorted_file_offsets){
	   free(sorted_file_offsets);
	   sorted_file_offsets = NULL;
	 }
	 if (NULL != file_offsets_for_agg){
	   free(file_offsets_for_agg);
	   file_offsets_for_agg = NULL;
	 }
	 if (NULL != bytes_per_process){
	   free(bytes_per_process);
	   bytes_per_process =NULL;
	 }
	 if (NULL != memory_displacements){
	   free(memory_displacements);
	   memory_displacements= NULL;
	 }
       }
     }

 #if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
     end_rexch = MPI_Wtime();
     read_exch += end_rexch - start_rexch;
     nentry.time[0] = read_time;
     nentry.time[1] = rcomm_time;
     nentry.time[2] = read_exch;
     if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank)
       nentry.aggregator = 1;
     else
       nentry.aggregator = 0;
     nentry.nprocs_for_coll = dynamic_num_io_procs;
     if (!fh->f_full_print_queue(READ_PRINT_QUEUE)){
       fh->f_register_print_entry(READ_PRINT_QUEUE,
				  nentry);
     }
 #endif

 exit:
     if (NULL != sorted) {
       free (sorted);
       sorted = NULL;
     }
     if (NULL != global_iov_array) {
       free (global_iov_array);
       global_iov_array = NULL;
     }
     if (NULL != fview_count) {
       free (fview_count);
       fview_count = NULL;
     }
     if (NULL != decoded_iov) {
       free (decoded_iov);
       decoded_iov = NULL;
     }
     if (NULL != local_iov_array){
       free(local_iov_array);
       local_iov_array=NULL;
     }

     if (NULL != displs) {
       free (displs);
       displs = NULL;
     }
     if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {

       if (NULL != disp_index){
	 free(disp_index);
	 disp_index = NULL;
       }

       if ( NULL != blocklen_per_process){
	 for(l=0;l<fh->f_procs_per_group;l++){
	   if (NULL != blocklen_per_process[l]){
	     free(blocklen_per_process[l]);
	     blocklen_per_process[l] = NULL;
	   }
	 }

	 free(blocklen_per_process);
	 blocklen_per_process = NULL;
       }

       if (NULL != displs_per_process){
	 for (l=0; i<fh->f_procs_per_group; l++){
	   if (NULL != displs_per_process[l]){
	     free(displs_per_process[l]);
	     displs_per_process[l] = NULL;
	   }
	 }
	 free(displs_per_process);
	 displs_per_process = NULL;
       }

     }
     return ret;
 }
예제 #17
0
int ompi_coll_base_alltoall_intra_basic_linear(const void *sbuf, int scount,
                                               struct ompi_datatype_t *sdtype,
                                               void* rbuf, int rcount,
                                               struct ompi_datatype_t *rdtype,
                                               struct ompi_communicator_t *comm,
                                               mca_coll_base_module_t *module)
{
    int i, rank, size, err, nreqs, line;
    char *psnd, *prcv;
    MPI_Aint lb, sndinc, rcvinc;
    ompi_request_t **req, **sreq, **rreq;
    mca_coll_base_module_t *base_module = (mca_coll_base_module_t*) module;
    mca_coll_base_comm_t *data = base_module->base_data;

    if (MPI_IN_PLACE == sbuf) {
        return mca_coll_base_alltoall_intra_basic_inplace (rbuf, rcount, rdtype,
                                                            comm, module);
    }

    /* Initialize. */

    size = ompi_comm_size(comm);
    rank = ompi_comm_rank(comm);

    OPAL_OUTPUT((ompi_coll_base_framework.framework_output,
                 "ompi_coll_base_alltoall_intra_basic_linear rank %d", rank));

    err = ompi_datatype_get_extent(sdtype, &lb, &sndinc);
    if (OMPI_SUCCESS != err) {
        return err;
    }
    sndinc *= scount;

    err = ompi_datatype_get_extent(rdtype, &lb, &rcvinc);
    if (OMPI_SUCCESS != err) {
        return err;
    }
    rcvinc *= rcount;

    /* simple optimization */

    psnd = ((char *) sbuf) + (ptrdiff_t)rank * sndinc;
    prcv = ((char *) rbuf) + (ptrdiff_t)rank * rcvinc;

    err = ompi_datatype_sndrcv(psnd, scount, sdtype, prcv, rcount, rdtype);
    if (MPI_SUCCESS != err) {
        return err;
    }

    /* If only one process, we're done. */

    if (1 == size) {
        return MPI_SUCCESS;
    }

    /* Initiate all send/recv to/from others. */

    req = rreq = coll_base_comm_get_reqs(data, (size - 1) * 2);

    prcv = (char *) rbuf;
    psnd = (char *) sbuf;

    /* Post all receives first -- a simple optimization */

    for (nreqs = 0, i = (rank + 1) % size; i != rank;
         i = (i + 1) % size, ++rreq, ++nreqs) {
        err = MCA_PML_CALL(irecv_init
                           (prcv + (ptrdiff_t)i * rcvinc, rcount, rdtype, i,
                           MCA_COLL_BASE_TAG_ALLTOALL, comm, rreq));
        if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
    }

    /* Now post all sends in reverse order
       - We would like to minimize the search time through message queue
         when messages actually arrive in the order in which they were posted.
     */
    sreq = rreq;
    for (i = (rank + size - 1) % size; i != rank;
         i = (i + size - 1) % size, ++sreq, ++nreqs) {
        err = MCA_PML_CALL(isend_init
                           (psnd + (ptrdiff_t)i * sndinc, scount, sdtype, i,
                           MCA_COLL_BASE_TAG_ALLTOALL,
                           MCA_PML_BASE_SEND_STANDARD, comm, sreq));
        if (MPI_SUCCESS != err) { line = __LINE__; goto err_hndl; }
    }

    /* Start your engines.  This will never return an error. */

    MCA_PML_CALL(start(nreqs, req));

    /* Wait for them all.  If there's an error, note that we don't
     * care what the error was -- just that there *was* an error.  The
     * PML will finish all requests, even if one or more of them fail.
     * i.e., by the end of this call, all the requests are free-able.
     * So free them anyway -- even if there was an error, and return
     * the error after we free everything. */

    err = ompi_request_wait_all(nreqs, req, MPI_STATUSES_IGNORE);

 err_hndl:
    if( MPI_SUCCESS != err ) {
        OPAL_OUTPUT( (ompi_coll_base_framework.framework_output,"%s:%4d\tError occurred %d, rank %2d",
                      __FILE__, line, err, rank) );
    }
    /* Free the reqs in all cases as they are persistent requests */
    ompi_coll_base_free_reqs(req, nreqs);

    /* All done */
    return err;
}
int
mca_fcoll_static_file_read_all (mca_io_ompio_file_t *fh,
				void *buf,
				int count,
				struct ompi_datatype_t *datatype,
				ompi_status_public_t *status)
{

    int ret = OMPI_SUCCESS, iov_size=0, *bytes_remaining=NULL;
    int i, j, l,cycles=0, local_cycles=0, *current_index=NULL;
    int index, *disp_index=NULL, *bytes_per_process=NULL, current_position=0;
    int **blocklen_per_process=NULL, *iovec_count_per_process=NULL;
    int *displs=NULL, *sorted=NULL ,entries_per_aggregator=0;
    int *sorted_file_offsets=NULL, temp_index=0, position=0, *temp_disp_index=NULL;


    MPI_Aint **displs_per_process=NULL, global_iov_count=0, global_count=0;
    MPI_Aint *memory_displacements=NULL;
    int bytes_to_read_in_cycle=0;
    size_t max_data=0, bytes_per_cycle=0;
    uint32_t iov_count=0, iov_index=0;
    struct iovec *decoded_iov=NULL, *iov=NULL;
    mca_fcoll_static_local_io_array *local_iov_array=NULL, *global_iov_array=NULL;
    mca_fcoll_static_local_io_array *file_offsets_for_agg=NULL;

    char *global_buf=NULL, *receive_buf=NULL;

    int blocklen[3] = {1, 1, 1};
    int static_num_io_procs=1;
    OPAL_PTRDIFF_TYPE d[3], base;
    ompi_datatype_t *types[3];
    ompi_datatype_t *io_array_type=MPI_DATATYPE_NULL;
    ompi_datatype_t **sendtype = NULL;
    MPI_Request *send_req=NULL, recv_req=NULL;
    int my_aggregator=-1;
    bool recvbuf_is_contiguous=false;
    size_t ftype_size;
    OPAL_PTRDIFF_TYPE ftype_extent, lb;

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    double read_time = 0.0, start_read_time = 0.0, end_read_time = 0.0;
    double rcomm_time = 0.0, start_rcomm_time = 0.0, end_rcomm_time = 0.0;
    double read_exch = 0.0, start_rexch = 0.0, end_rexch = 0.0;
    mca_common_ompio_print_entry nentry;
#endif
#if DEBUG_ON
    MPI_Aint gc_in;
#endif
    opal_datatype_type_size ( &datatype->super, &ftype_size );
    opal_datatype_get_extent ( &datatype->super, &lb, &ftype_extent );

    /**************************************************************************
     ** 1.  In case the data is not contigous in memory, decode it into an iovec
     **************************************************************************/
    if ( ( ftype_extent == (OPAL_PTRDIFF_TYPE) ftype_size)             &&
         opal_datatype_is_contiguous_memory_layout(&datatype->super,1) &&
         0 == lb ) {
        recvbuf_is_contiguous = true;
    }


    /* In case the data is not contigous in memory, decode it into an iovec */
    if (!recvbuf_is_contiguous  ) {
        fh->f_decode_datatype ( (struct mca_io_ompio_file_t *)fh,
                                datatype,
                                count,
                                buf,
                                &max_data,
                                &decoded_iov,
                                &iov_count);
    }
    else {
        max_data = count * datatype->super.size;
    }

    if ( MPI_STATUS_IGNORE != status ) {
        status->_ucount = max_data;
    }


    fh->f_get_num_aggregators ( &static_num_io_procs );
    fh->f_set_aggregator_props ((struct mca_io_ompio_file_t *) fh,
                                static_num_io_procs,
                                max_data);
    my_aggregator = fh->f_procs_in_group[fh->f_aggregator_index];

    /*  printf("max_data %ld\n", max_data);  */
    ret = fh->f_generate_current_file_view((struct mca_io_ompio_file_t *)fh,
                                           max_data,
                                           &iov,
                                           &iov_size);
    if (ret != OMPI_SUCCESS){
        goto exit;
    }

    if ( iov_size > 0 ) {
        local_iov_array = (mca_fcoll_static_local_io_array *)malloc (iov_size * sizeof(mca_fcoll_static_local_io_array));
        if ( NULL == local_iov_array){
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }


        for (j=0; j < iov_size; j++){
            local_iov_array[j].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t)
                iov[j].iov_base;
            local_iov_array[j].length = (size_t)iov[j].iov_len;
            local_iov_array[j].process_id = fh->f_rank;

        }
    }
    else {
        /* Allocate at least one element to correctly create the derived
           data type */
        local_iov_array = (mca_fcoll_static_local_io_array *)malloc (sizeof(mca_fcoll_static_local_io_array));
        if ( NULL == local_iov_array){
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }


        local_iov_array[0].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t) 0;
        local_iov_array[0].length = (size_t) 0;
        local_iov_array[0].process_id = fh->f_rank;
    }

    d[0] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0];
    d[1] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].length;
    d[2] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].process_id;
    base = d[0];
    for (i=0 ; i<3 ; i++) {
        d[i] -= base;
    }

    /* io_array datatype  for using in communication*/
    types[0] = &ompi_mpi_long.dt;
    types[1] = &ompi_mpi_long.dt;
    types[2] = &ompi_mpi_int.dt;

    ompi_datatype_create_struct (3,
                                 blocklen,
                                 d,
                                 types,
                                 &io_array_type);
    ompi_datatype_commit (&io_array_type);

    /* #########################################################*/
    fh->f_get_bytes_per_agg ( (int*) &bytes_per_cycle);
    local_cycles = ceil((double)max_data*fh->f_procs_per_group/bytes_per_cycle);

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    start_rexch = MPI_Wtime();
#endif
    ret = fh->f_comm->c_coll.coll_allreduce (&local_cycles,
                                             &cycles,
                                             1,
                                             MPI_INT,
                                             MPI_MAX,
                                             fh->f_comm,
                                             fh->f_comm->c_coll.coll_allreduce_module);

    if (OMPI_SUCCESS != ret){
        goto exit;
    }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
        end_rcomm_time = MPI_Wtime();
        rcomm_time  += end_rcomm_time - start_rcomm_time;
#endif


    if (my_aggregator == fh->f_rank) {
        disp_index = (int *) malloc (fh->f_procs_per_group * sizeof(int));
        if (NULL == disp_index) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        bytes_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int ));
        if (NULL == bytes_per_process){
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        bytes_remaining = (int *) calloc (fh->f_procs_per_group, sizeof(int));
        if (NULL == bytes_remaining){
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        current_index = (int *) calloc (fh->f_procs_per_group, sizeof(int));
        if (NULL == current_index){
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        blocklen_per_process = (int **)calloc (fh->f_procs_per_group, sizeof (int*));
        if (NULL == blocklen_per_process) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        displs_per_process = (MPI_Aint **)calloc (fh->f_procs_per_group, sizeof (MPI_Aint*));
        if (NULL == displs_per_process) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }
    }


    iovec_count_per_process = (int *) calloc (fh->f_procs_per_group, sizeof(int));
    if (NULL == iovec_count_per_process){
        opal_output (1, "OUT OF MEMORY\n");
        ret = OMPI_ERR_OUT_OF_RESOURCE;
        goto exit;
    }

    displs = (int *) calloc (fh->f_procs_per_group, sizeof(int));
    if (NULL == displs){
        opal_output (1, "OUT OF MEMORY\n");
        ret = OMPI_ERR_OUT_OF_RESOURCE;
        goto exit;
    }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    start_rexch = MPI_Wtime();
#endif
    ret = fcoll_base_coll_allgather_array (&iov_size,
                                           1,
                                           MPI_INT,
                                           iovec_count_per_process,
                                           1,
                                           MPI_INT,
                                           fh->f_aggregator_index,
                                           fh->f_procs_in_group,
                                           fh->f_procs_per_group,
                                           fh->f_comm);

    if( OMPI_SUCCESS != ret){
        goto exit;
    }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
        end_rcomm_time = MPI_Wtime();
        rcomm_time  += end_rcomm_time - start_rcomm_time;
#endif

    if (my_aggregator == fh->f_rank) {
        displs[0] = 0;
        global_iov_count = iovec_count_per_process[0];
        for (i=1 ; i<fh->f_procs_per_group ; i++) {
            global_iov_count += iovec_count_per_process[i];
            displs[i] = displs[i-1] + iovec_count_per_process[i-1];
        }
    }


    if ( (my_aggregator == fh->f_rank) &&
         (global_iov_count >  0 )) {
        global_iov_array = (mca_fcoll_static_local_io_array *) malloc (global_iov_count *
                                                      sizeof(mca_fcoll_static_local_io_array));
        if (NULL == global_iov_array){
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }
    }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    start_rexch = MPI_Wtime();
#endif
    ret = fcoll_base_coll_gatherv_array (local_iov_array,
                                         iov_size,
                                         io_array_type,
                                         global_iov_array,
                                         iovec_count_per_process,
                                         displs,
                                         io_array_type,
                                         fh->f_aggregator_index,
                                         fh->f_procs_in_group,
                                         fh->f_procs_per_group,
                                         fh->f_comm);

    if (OMPI_SUCCESS != ret){
        fprintf(stderr,"global_iov_array gather error!\n");
        goto exit;
    }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
        end_rcomm_time = MPI_Wtime();
        rcomm_time  += end_rcomm_time - start_rcomm_time;
#endif


    if (NULL != local_iov_array){
        free(local_iov_array);
        local_iov_array = NULL;
    }

    if ( ( my_aggregator == fh->f_rank) &&
         ( global_iov_count > 0 )) {
        sorted = (int *)malloc (global_iov_count * sizeof(int));
        if (NULL == sorted) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }
        read_local_heap_sort (global_iov_array, global_iov_count, sorted);

        send_req = (MPI_Request *) malloc (fh->f_procs_per_group * sizeof(MPI_Request));
        if (NULL == send_req){
            opal_output ( 1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        sendtype = (ompi_datatype_t **) malloc (fh->f_procs_per_group * sizeof(ompi_datatype_t *));
        if (NULL == sendtype) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }
        for ( i=0; i<fh->f_procs_per_group; i++ ) {
            sendtype[i] = MPI_DATATYPE_NULL;
        }

        if (NULL == bytes_per_process){
            bytes_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int));
            if (NULL == bytes_per_process){
                opal_output (1, "OUT OF MEMORY\n");
                ret = OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }
        }
    }

#if DEBUG_ON

    if (my_aggregator == fh->f_rank) {
        for (gc_in=0; gc_in<global_iov_count; gc_in++){
            printf("%d: Offset[%ld]: %lld, Length[%ld]: %ld\n",
                   global_iov_array[sorted[gc_in]].process_id,
                   gc_in, global_iov_array[sorted[gc_in]].offset,
                   gc_in, global_iov_array[sorted[gc_in]].length);
        }
    }
#endif

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    start_rexch = MPI_Wtime();
#endif

    for (index = 0; index < cycles; index++){

        if (my_aggregator == fh->f_rank) {

            fh->f_num_of_io_entries = 0;
            if (NULL != fh->f_io_array) {
                free (fh->f_io_array);
                fh->f_io_array = NULL;
            }
            if (NULL != global_buf) {
                free (global_buf);
                global_buf = NULL;
            }

            if (NULL != sorted_file_offsets){
                free(sorted_file_offsets);
                sorted_file_offsets = NULL;
            }
            if (NULL != file_offsets_for_agg){
                free(file_offsets_for_agg);
                file_offsets_for_agg = NULL;
            }
            if (NULL != memory_displacements){
                free(memory_displacements);
                memory_displacements= NULL;
            }

            if ( NULL != sendtype ) {
                for ( i=0; i<fh->f_procs_per_group; i++ ) {
                    if ( MPI_DATATYPE_NULL != sendtype[i] ) {
                        ompi_datatype_destroy (&sendtype[i] );
                        sendtype[i] = MPI_DATATYPE_NULL;
                    }
                }
            }

            for(l=0;l<fh->f_procs_per_group;l++){
                disp_index[l] =  1;
                if (NULL != blocklen_per_process[l]){
                    free(blocklen_per_process[l]);
                    blocklen_per_process[l] = NULL;
                }
                if (NULL != displs_per_process[l]){
                    free(displs_per_process[l]);
                    displs_per_process[l] = NULL;
                }
                blocklen_per_process[l] = (int *) calloc (1, sizeof(int));
                if (NULL == blocklen_per_process[l]) {
                    opal_output (1, "OUT OF MEMORY for blocklen\n");
                    ret = OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
                displs_per_process[l] = (MPI_Aint *) calloc (1, sizeof(MPI_Aint));
                if (NULL == displs_per_process[l]){
                    opal_output (1, "OUT OF MEMORY for displs\n");
                    ret = OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
            }
        }

        if (index < local_cycles ) {
            if ((index == local_cycles-1) && (max_data % (bytes_per_cycle/fh->f_procs_per_group))) {
                bytes_to_read_in_cycle = max_data - position;
            }
            else if (max_data <= bytes_per_cycle/fh->f_procs_per_group) {
                bytes_to_read_in_cycle = max_data;
            }
            else {
                bytes_to_read_in_cycle = bytes_per_cycle/fh->f_procs_per_group;
            }
        }
        else {
            bytes_to_read_in_cycle = 0;
        }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    start_rexch = MPI_Wtime();
#endif
        fcoll_base_coll_gather_array (&bytes_to_read_in_cycle,
                                      1,
                                      MPI_INT,
                                      bytes_per_process,
                                      1,
                                      MPI_INT,
                                      fh->f_aggregator_index,
                                      fh->f_procs_in_group,
                                      fh->f_procs_per_group,
                                      fh->f_comm);

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
        end_rcomm_time = MPI_Wtime();
        rcomm_time  += end_rcomm_time - start_rcomm_time;
#endif

        if (recvbuf_is_contiguous ) {
            receive_buf = &((char*)buf)[position];
        }
        else if (bytes_to_read_in_cycle) {
            receive_buf = (char *) malloc (bytes_to_read_in_cycle * sizeof(char));
            if ( NULL == receive_buf){
                opal_output (1, "OUT OF MEMORY\n");
                ret = OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }
        }


#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
        start_rcomm_time = MPI_Wtime();
#endif

        ret = MCA_PML_CALL(irecv(receive_buf,
                                 bytes_to_read_in_cycle,
                                 MPI_BYTE,
                                 my_aggregator,
                                 123,
                                 fh->f_comm,
                                 &recv_req));
        if (OMPI_SUCCESS != ret){
            goto exit;
        }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
        end_rcomm_time = MPI_Wtime();
        rcomm_time  += end_rcomm_time - start_rcomm_time;
#endif


        if (my_aggregator == fh->f_rank) {
            for (i=0;i<fh->f_procs_per_group; i++){
                while (bytes_per_process[i] > 0){
                    /*printf("%d: bytes_per_process[%d]: %d, bytes_remaining[%d]: %d\n",
                      index, i, bytes_per_process[i], i, bytes_remaining[i]);*/
                    if (read_get_process_id(global_iov_array[sorted[current_index[i]]].process_id,
                                            fh) == i){ /* current id owns this entry!*/
                        if (bytes_remaining[i]){ /*Remaining bytes in the current entry of
                                                   the global offset array*/
                            if (bytes_remaining[i] <= bytes_per_process[i]){

                                blocklen_per_process[i][disp_index[i] - 1] = bytes_remaining[i];
                                displs_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].offset +
                                    (global_iov_array[sorted[current_index[i]]].length
                                     - bytes_remaining[i]);
                                blocklen_per_process[i] = (int *) realloc
                                    ((void *)blocklen_per_process[i], (disp_index[i]+1)*sizeof(int));
                                displs_per_process[i] = (MPI_Aint *)realloc
                                    ((void *)displs_per_process[i], (disp_index[i]+1)*sizeof(MPI_Aint));
                                bytes_per_process[i] -= bytes_remaining[i];
                                blocklen_per_process[i][disp_index[i]] = 0;
                                displs_per_process[i][disp_index[i]] = 0;
                                disp_index[i] += 1;
                                bytes_remaining[i] = 0;
                                /* This entry has been used up, we need to move to the
                                   next entry of this process and make current_index point there*/
                                current_index[i]  = read_find_next_index(i,
                                                                         current_index[i],
                                                                         fh,
                                                                         global_iov_array,
                                                                         global_iov_count,
                                                                         sorted);
                                if (current_index[i] == -1){
                                    break;
                                }
                                continue;
                            }
                            else{
                                blocklen_per_process[i][disp_index[i] - 1] = bytes_per_process[i];
                                displs_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].offset +
                                    (global_iov_array[sorted[current_index[i]]].length
                                     - bytes_remaining[i]);
                                bytes_remaining[i] -= bytes_per_process[i];
                                bytes_per_process[i] = 0;
                                break;
                            }
                        }
                        else{
                            if (bytes_per_process[i] <
                                global_iov_array[sorted[current_index[i]]].length){
                                blocklen_per_process[i][disp_index[i] - 1] =
                                    bytes_per_process[i];
                                displs_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].offset;
                                bytes_remaining[i] =
                                    global_iov_array[sorted[current_index[i]]].length -
                                    bytes_per_process[i];
                                bytes_per_process[i] = 0;
                                break;
                            }
                            else {
                                blocklen_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].length;
                                displs_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].offset;
                                blocklen_per_process[i] =
                                    (int *) realloc ((void *)blocklen_per_process[i], (disp_index[i]+1)*sizeof(int));
                                displs_per_process[i] = (MPI_Aint *)realloc
                                    ((void *)displs_per_process[i], (disp_index[i]+1)*sizeof(MPI_Aint));
                                blocklen_per_process[i][disp_index[i]] = 0;
                                displs_per_process[i][disp_index[i]] = 0;
                                disp_index[i] += 1;
                                bytes_per_process[i] -=
                                    global_iov_array[sorted[current_index[i]]].length;
                                current_index[i] = read_find_next_index(i,
                                                                        current_index[i],
                                                                        fh,
                                                                        global_iov_array,
                                                                        global_iov_count,
                                                                        sorted);
                                if (current_index[i] == -1){
                                    break;
                                }
                            }
                        }
                    }
                    else{
                        current_index[i] = read_find_next_index(i,
                                                                current_index[i],
                                                                fh,
                                                                global_iov_array,
                                                                global_iov_count,
                                                                sorted);
                        if (current_index[i] == -1){
                            bytes_per_process[i] = 0; /* no more entries left
                                                         to service this request*/
                            continue;
                        }
                    }
                }
            }

            entries_per_aggregator=0;
            for (i=0;i<fh->f_procs_per_group;i++){
                for (j=0;j<disp_index[i];j++){
                    if (blocklen_per_process[i][j] > 0){
                        entries_per_aggregator++;
#if DEBUG_ON
                        printf("%d sends blocklen[%d]: %d, disp[%d]: %ld to %d\n",
                               fh->f_procs_in_group[i],j,
                               blocklen_per_process[i][j],j,
                               displs_per_process[i][j],
                               fh->f_rank);

#endif
                    }
                }
            }

            if (entries_per_aggregator > 0){
                file_offsets_for_agg = (mca_fcoll_static_local_io_array *)
                    malloc(entries_per_aggregator*sizeof(mca_fcoll_static_local_io_array));
                if (NULL == file_offsets_for_agg) {
                    opal_output (1, "OUT OF MEMORY\n");
                    ret = OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
                sorted_file_offsets = (int *) malloc (entries_per_aggregator * sizeof(int));
                if (NULL == sorted_file_offsets){
                    opal_output (1, "OUT OF MEMORY\n");
                    ret =  OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
                temp_index=0;
                global_count = 0;
                for (i=0;i<fh->f_procs_per_group; i++){
                    for(j=0;j<disp_index[i]; j++){
                        if (blocklen_per_process[i][j] > 0){
                            file_offsets_for_agg[temp_index].length =
                                blocklen_per_process[i][j];
                            global_count += blocklen_per_process[i][j];
                            file_offsets_for_agg[temp_index].process_id = i;
                            file_offsets_for_agg[temp_index].offset =
                                displs_per_process[i][j];
                            temp_index++;
                        }
                    }
                }
            }
            else{
                continue;
            }
            read_local_heap_sort (file_offsets_for_agg,
                                  entries_per_aggregator,
                                  sorted_file_offsets);
            memory_displacements = (MPI_Aint *) malloc
                (entries_per_aggregator * sizeof(MPI_Aint));
            memory_displacements[sorted_file_offsets[0]] = 0;
            for (i=1; i<entries_per_aggregator; i++){
                memory_displacements[sorted_file_offsets[i]] =
                    memory_displacements[sorted_file_offsets[i-1]] +
                    file_offsets_for_agg[sorted_file_offsets[i-1]].length;
            }

            global_buf = (char *) malloc (global_count * sizeof(char));
            if (NULL == global_buf){
                opal_output(1, "OUT OF MEMORY\n");
                ret = OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }
#if DEBUG_ON
            printf("************Cycle: %d,  Aggregator: %d ***************\n",
                   index+1,fh->f_rank);
            for (i=0; i<entries_per_aggregator;i++){
                printf("%d: OFFSET: %lld   LENGTH: %ld, Mem-offset: %ld, disp_index :%d\n",
                       file_offsets_for_agg[sorted_file_offsets[i]].process_id,
                       file_offsets_for_agg[sorted_file_offsets[i]].offset,
                       file_offsets_for_agg[sorted_file_offsets[i]].length,
                       memory_displacements[sorted_file_offsets[i]],
                       disp_index[i]);
            }
#endif

            fh->f_io_array = (mca_io_ompio_io_array_t *) malloc
                (entries_per_aggregator * sizeof (mca_io_ompio_io_array_t));
            if (NULL == fh->f_io_array) {
                opal_output(1, "OUT OF MEMORY\n");
                ret =  OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }



            fh->f_num_of_io_entries = 0;
            fh->f_io_array[0].offset =
                (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset;
            fh->f_io_array[0].length = file_offsets_for_agg[sorted_file_offsets[0]].length;
            fh->f_io_array[0].memory_address = global_buf+memory_displacements[sorted_file_offsets[0]];
            fh->f_num_of_io_entries++;
            for (i=1;i<entries_per_aggregator;i++){
                if (file_offsets_for_agg[sorted_file_offsets[i-1]].offset +
                    file_offsets_for_agg[sorted_file_offsets[i-1]].length ==
                    file_offsets_for_agg[sorted_file_offsets[i]].offset){
                    fh->f_io_array[fh->f_num_of_io_entries - 1].length +=
                        file_offsets_for_agg[sorted_file_offsets[i]].length;
                }
                else{
                    fh->f_io_array[fh->f_num_of_io_entries].offset =
                        (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[i]].offset;
                    fh->f_io_array[fh->f_num_of_io_entries].length =
                        file_offsets_for_agg[sorted_file_offsets[i]].length;
                    fh->f_io_array[fh->f_num_of_io_entries].memory_address =
                        global_buf+memory_displacements[sorted_file_offsets[i]];
                    fh->f_num_of_io_entries++;
                }
            }

#if DEBUG_ON
            printf("*************************** %d\n", fh->f_num_of_io_entries);
            for (i=0 ; i<fh->f_num_of_io_entries ; i++) {
                printf(" ADDRESS: %p  OFFSET: %ld   LENGTH: %ld\n",
                       fh->f_io_array[i].memory_address,
                       (OPAL_PTRDIFF_TYPE)fh->f_io_array[i].offset,
                       fh->f_io_array[i].length);
            }
#endif
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
            start_read_time = MPI_Wtime();
#endif

            if (fh->f_num_of_io_entries) {
                if ( 0 > fh->f_fbtl->fbtl_preadv (fh)) {
                    opal_output (1, "READ FAILED\n");
                    ret = OMPI_ERROR;
                    goto exit;
                }
            }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
            end_read_time = MPI_Wtime();
            read_time += end_read_time - start_read_time;
#endif


#if DEBUG_ON
            printf("************Cycle: %d,  Aggregator: %d ***************\n",
                   index+1,fh->f_rank);
            if (my_aggregator == fh->f_rank){
                for (i=0 ; i<global_count/4 ; i++)
                    printf (" READ %d \n",((int *)global_buf)[i]);
            }
#endif

            temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int));
            if (NULL == temp_disp_index) {
                opal_output (1, "OUT OF MEMORY\n");
                ret = OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }

            for (i=0; i<entries_per_aggregator; i++){
                temp_index =
                    file_offsets_for_agg[sorted_file_offsets[i]].process_id;
                displs_per_process[temp_index][temp_disp_index[temp_index]] =
                    memory_displacements[sorted_file_offsets[i]];
                if (temp_disp_index[temp_index] < disp_index[temp_index]){
                    temp_disp_index[temp_index] += 1;
                }
                else{
                    printf("temp_disp_index[%d]: %d is greater than disp_index[%d]: %d\n",
                           temp_index, temp_disp_index[temp_index],
                           temp_index, disp_index[temp_index]);
                }
            }
            if (NULL != temp_disp_index){
                free(temp_disp_index);
                temp_disp_index = NULL;
            }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
            start_rcomm_time = MPI_Wtime();
#endif

            for (i=0;i<fh->f_procs_per_group; i++){
                send_req[i] = MPI_REQUEST_NULL;
                ompi_datatype_create_hindexed(disp_index[i],
                                              blocklen_per_process[i],
                                              displs_per_process[i],
                                              MPI_BYTE,
                                              &sendtype[i]);
                ompi_datatype_commit(&sendtype[i]);
                ret = MCA_PML_CALL (isend(global_buf,
                                          1,
                                          sendtype[i],
                                          fh->f_procs_in_group[i],
                                          123,
                                          MCA_PML_BASE_SEND_STANDARD,
                                          fh->f_comm,
                                          &send_req[i]));
                if(OMPI_SUCCESS != ret){
                    goto exit;
                }
            }

            ret = ompi_request_wait_all (fh->f_procs_per_group,
                                         send_req,
                                         MPI_STATUS_IGNORE);
            if (OMPI_SUCCESS != ret){
                goto exit;
            }
        } /* if ( my_aggregator == fh->f_rank ) */

        ret = ompi_request_wait (&recv_req, MPI_STATUS_IGNORE);
        if (OMPI_SUCCESS != ret){
            goto exit;
        }

#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
        end_rcomm_time = MPI_Wtime();
        rcomm_time += end_rcomm_time - start_rcomm_time;
#endif

        position += bytes_to_read_in_cycle;

        if (!recvbuf_is_contiguous) {
            OPAL_PTRDIFF_TYPE mem_address;
            size_t remaining = 0;
            size_t temp_position = 0;

            remaining = bytes_to_read_in_cycle;

            while (remaining && (iov_count > iov_index)){
                mem_address = (OPAL_PTRDIFF_TYPE)
                    (decoded_iov[iov_index].iov_base) + current_position;

                if (remaining >=
                    (decoded_iov[iov_index].iov_len - current_position)) {
                    memcpy ((IOVBASE_TYPE *) mem_address,
                            receive_buf+temp_position,
                            decoded_iov[iov_index].iov_len - current_position);
                    remaining = remaining -
                        (decoded_iov[iov_index].iov_len - current_position);
                    temp_position = temp_position +
                        (decoded_iov[iov_index].iov_len - current_position);
                    iov_index = iov_index + 1;
                    current_position = 0;
                }
                else{
                    memcpy ((IOVBASE_TYPE *) mem_address,
                            receive_buf+temp_position,
                            remaining);
                    current_position = current_position + remaining;
                    remaining = 0;
                }
            }
            if (NULL != receive_buf) {
                free (receive_buf);
                receive_buf = NULL;
            }
        }

    }
#if OMPIO_FCOLL_WANT_TIME_BREAKDOWN
    end_rexch = MPI_Wtime();
    read_exch += end_rexch - start_rexch;
    nentry.time[0] = read_time;
    nentry.time[1] = rcomm_time;
    nentry.time[2] = read_exch;
    if (my_aggregator == fh->f_rank)
        nentry.aggregator = 1;
    else
        nentry.aggregator = 0;
    nentry.nprocs_for_coll = static_num_io_procs;
    if (!mca_common_ompio_full_print_queue(fh->f_coll_read_time)){
        mca_common_ompio_register_print_entry(fh->f_coll_read_time,
                                              nentry);
    }
#endif

exit:
    if (NULL != decoded_iov){
        free(decoded_iov);
        decoded_iov = NULL;
    }

    if (NULL != displs){
        free(displs);
        displs = NULL;
    }

    if (NULL != iovec_count_per_process){
        free(iovec_count_per_process);
        iovec_count_per_process=NULL;
    }

    if (NULL != local_iov_array){
        free(local_iov_array);
        local_iov_array=NULL;
    }

    if (NULL != global_iov_array){
        free(global_iov_array);
        global_iov_array=NULL;
    }

    if (my_aggregator == fh->f_rank) {

        for(l=0;l<fh->f_procs_per_group;l++){
            if (blocklen_per_process) {
                free(blocklen_per_process[l]);
            }
            if (NULL != displs_per_process[l]){
                free(displs_per_process[l]);
                displs_per_process[l] = NULL;
            }
        }
    }

    if (NULL != bytes_per_process){
        free(bytes_per_process);
        bytes_per_process =NULL;
    }

    if (NULL != disp_index){
        free(disp_index);
        disp_index =NULL;
    }

    if (NULL != displs_per_process){
        free(displs_per_process);
        displs_per_process = NULL;
    }

    if(NULL != bytes_remaining){
        free(bytes_remaining);
        bytes_remaining = NULL;
    }

    if(NULL != current_index){
        free(current_index);
        current_index = NULL;
    }

    if (NULL != blocklen_per_process){
        free(blocklen_per_process);
        blocklen_per_process =NULL;
    }

    if (NULL != bytes_remaining){
        free(bytes_remaining);
        bytes_remaining =NULL;
    }

    if (NULL != memory_displacements){
        free(memory_displacements);
        memory_displacements= NULL;
    }

    if (NULL != file_offsets_for_agg){
        free(file_offsets_for_agg);
        file_offsets_for_agg = NULL;
    }

    if (NULL != sorted_file_offsets){
        free(sorted_file_offsets);
        sorted_file_offsets = NULL;
    }

    if (NULL != sendtype){
        free(sendtype);
        sendtype=NULL;
    }

    if ( !recvbuf_is_contiguous ) {
        if (NULL != receive_buf){
            free(receive_buf);
            receive_buf=NULL;
        }
    }

    if (NULL != global_buf) {
        free(global_buf);
        global_buf = NULL;
    }

    if (NULL != sorted) {
        free(sorted);
        sorted = NULL;
    }

    if (NULL != send_req){
        free(send_req);
        send_req = NULL;
    }


    return ret;

}
int
mca_fcoll_static_file_write_all (mca_io_ompio_file_t *fh,
                                 void *buf,
                                 int count,
                                 struct ompi_datatype_t *datatype,
                                 ompi_status_public_t *status)
{



    size_t max_data = 0, bytes_per_cycle=0;
    struct iovec *iov=NULL, *decoded_iov=NULL;
    uint32_t iov_count=0, iov_index=0;
    int i=0,j=0,l=0, temp_index;
    int ret=OMPI_SUCCESS, cycles, local_cycles, *bytes_per_process=NULL;
    int index, *disp_index=NULL, **blocklen_per_process=NULL;
    int *iovec_count_per_process=NULL, *displs=NULL;
    size_t total_bytes_written=0;
    MPI_Aint **displs_per_process=NULL, *memory_displacements=NULL;
    MPI_Aint bytes_to_write_in_cycle=0, global_iov_count=0, global_count=0;

    local_io_array *local_iov_array =NULL, *global_iov_array=NULL;
    local_io_array *file_offsets_for_agg=NULL;
    int *sorted=NULL, *sorted_file_offsets=NULL, temp_pindex, *temp_disp_index=NULL;
    char *send_buf=NULL, *global_buf=NULL;
    int iov_size=0, current_position=0, *current_index=NULL;
    int *bytes_remaining=NULL, entries_per_aggregator=0;
    ompi_datatype_t **recvtype = NULL;
    MPI_Request *send_req=NULL, *recv_req=NULL;
    /* For creating datatype of type io_array */
    int blocklen[3] = {1, 1, 1};
    int static_num_io_procs=1;
    OPAL_PTRDIFF_TYPE d[3], base;
    ompi_datatype_t *types[3];
    ompi_datatype_t *io_array_type=MPI_DATATYPE_NULL;
    /*----------------------------------------------*/
#if TIME_BREAKDOWN
    double write_time = 0.0, start_write_time = 0.0, end_write_time = 0.0;
    double comm_time = 0.0, start_comm_time = 0.0, end_comm_time = 0.0;
    double exch_write = 0.0, start_exch = 0.0, end_exch = 0.0;
    print_entry nentry;
#endif


#if DEBUG_ON
    MPI_Aint gc_in;
#endif

//  if (opal_datatype_is_contiguous_memory_layout(&datatype->super,1)) {
//    fh->f_flags |= OMPIO_CONTIGUOUS_MEMORY;
//  }

    /* In case the data is not contigous in memory, decode it into an iovec */
    if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) {
        ompi_io_ompio_decode_datatype (fh,
                                       datatype,
                                       count,
                                       buf,
                                       &max_data,
                                       &decoded_iov,
                                       &iov_count);
    }
    else {
        max_data = count * datatype->super.size;
    }

    if ( MPI_STATUS_IGNORE != status ) {
        status->_ucount = max_data;
    }

    mca_io_ompio_get_num_aggregators ( & static_num_io_procs );
    ompi_io_ompio_set_aggregator_props (fh,
                                        static_num_io_procs,
                                        max_data);


    /* io_array datatype  for using in communication*/
    types[0] = &ompi_mpi_long.dt;
    types[1] = &ompi_mpi_long.dt;
    types[2] = &ompi_mpi_int.dt;

    d[0] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0];
    d[1] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].length;
    d[2] = (OPAL_PTRDIFF_TYPE)&local_iov_array[0].process_id;
    base = d[0];
    for (i=0 ; i<3 ; i++) {
        d[i] -= base;
    }
    ompi_datatype_create_struct (3,
                                 blocklen,
                                 d,
                                 types,
                                 &io_array_type);
    ompi_datatype_commit (&io_array_type);
    /* #########################################################*/



    ret = ompi_io_ompio_generate_current_file_view(fh,
            max_data,
            &iov,
            &iov_size);
    if (ret != OMPI_SUCCESS) {
        fprintf(stderr,"Current File View Generation Error\n");
        goto exit;
    }

    if (0 == iov_size) {
        iov_size  = 1;
    }

    local_iov_array = (local_io_array *)malloc (iov_size * sizeof(local_io_array));
    if ( NULL == local_iov_array) {
        fprintf(stderr,"local_iov_array allocation error\n");
        ret = OMPI_ERR_OUT_OF_RESOURCE;
        goto exit;
    }


    for (j=0; j < iov_size; j++) {
        local_iov_array[j].offset = (OMPI_MPI_OFFSET_TYPE)(intptr_t)
                                    iov[j].iov_base;
        local_iov_array[j].length = (size_t)iov[j].iov_len;
        local_iov_array[j].process_id = fh->f_rank;

    }

    mca_io_ompio_get_bytes_per_agg ( (int *) &bytes_per_cycle);


    local_cycles = ceil((double)max_data/bytes_per_cycle);
    ret = fh->f_comm->c_coll.coll_allreduce (&local_cycles,
            &cycles,
            1,
            MPI_INT,
            MPI_MAX,
            fh->f_comm,
            fh->f_comm->c_coll.coll_allreduce_module);

    if (OMPI_SUCCESS != ret) {
        fprintf(stderr,"local cycles allreduce!\n");
        goto exit;
    }

    if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {

        disp_index = (int *)malloc (fh->f_procs_per_group * sizeof (int));
        if (NULL == disp_index) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        bytes_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int ));
        if (NULL == bytes_per_process) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        bytes_remaining = (int *) malloc (fh->f_procs_per_group * sizeof(int));
        if (NULL == bytes_remaining) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        current_index = (int *) malloc (fh->f_procs_per_group * sizeof(int));
        if (NULL == current_index) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        blocklen_per_process = (int **)malloc (fh->f_procs_per_group * sizeof (int*));
        if (NULL == blocklen_per_process) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        displs_per_process = (MPI_Aint **)
                             malloc (fh->f_procs_per_group * sizeof (MPI_Aint*));

        if (NULL == displs_per_process) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        for(i=0; i<fh->f_procs_per_group; i++) {
            current_index[i] = 0;
            bytes_remaining[i] =0;
            blocklen_per_process[i] = NULL;
            displs_per_process[i] = NULL;
        }
    }

    iovec_count_per_process = (int *) malloc (fh->f_procs_per_group * sizeof(int));
    if (NULL == iovec_count_per_process) {
        opal_output (1, "OUT OF MEMORY\n");
        ret = OMPI_ERR_OUT_OF_RESOURCE;
        goto exit;
    }

    displs = (int *) malloc (fh->f_procs_per_group * sizeof(int));
    if (NULL == displs) {
        opal_output (1, "OUT OF MEMORY\n");
        ret = OMPI_ERR_OUT_OF_RESOURCE;
        goto exit;
    }

    ret = ompi_io_ompio_allgather_array (&iov_size,
                                         1,
                                         MPI_INT,
                                         iovec_count_per_process,
                                         1,
                                         MPI_INT,
                                         fh->f_aggregator_index,
                                         fh->f_procs_in_group,
                                         fh->f_procs_per_group,
                                         fh->f_comm);

    if( OMPI_SUCCESS != ret) {
        fprintf(stderr,"iov size allgatherv array!\n");
        goto exit;
    }


    if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
        displs[0] = 0;
        global_iov_count = iovec_count_per_process[0];
        for (i=1 ; i<fh->f_procs_per_group ; i++) {
            global_iov_count += iovec_count_per_process[i];
            displs[i] = displs[i-1] + iovec_count_per_process[i-1];
        }
    }


    if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
        global_iov_array = (local_io_array *) malloc (global_iov_count *
                           sizeof(local_io_array));
        if (NULL == global_iov_array) {
            opal_output (1, "OUT OF MEMORY\n");
            return OMPI_ERR_OUT_OF_RESOURCE;
        }
    }

    ret = ompi_io_ompio_gatherv_array (local_iov_array,
                                       iov_size,
                                       io_array_type,
                                       global_iov_array,
                                       iovec_count_per_process,
                                       displs,
                                       io_array_type,
                                       fh->f_aggregator_index,
                                       fh->f_procs_in_group,
                                       fh->f_procs_per_group,
                                       fh->f_comm);
    if (OMPI_SUCCESS != ret) {
        fprintf(stderr,"global_iov_array gather error!\n");
        goto exit;
    }

    if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {

        if ( 0 == global_iov_count) {
            global_iov_count =  1;
        }

        sorted = (int *)malloc (global_iov_count * sizeof(int));
        if (NULL == sorted) {
            opal_output (1, "OUT OF MEMORY\n");
            return OMPI_ERR_OUT_OF_RESOURCE;
        }
        local_heap_sort (global_iov_array, global_iov_count, sorted);
    }

#if DEBUG_ON

    if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
        for (gc_in=0; gc_in<global_iov_count; gc_in++) {
            printf("%d: Offset[%ld]: %lld, Length[%ld]: %ld\n",
                   global_iov_array[gc_in].process_id,
                   gc_in, global_iov_array[gc_in].offset,
                   gc_in, global_iov_array[gc_in].length);
        }
    }
#endif

#if TIME_BREAKDOWN
    start_exch = MPI_Wtime();
#endif


    for (index = 0; index < cycles; index++) {
        if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
            if (NULL == recvtype) {
                recvtype = (ompi_datatype_t **)
                           malloc (fh->f_procs_per_group  * sizeof(ompi_datatype_t *));
                if (NULL == recvtype) {
                    opal_output (1, "OUT OF MEMORY\n");
                    ret = OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
            }
            for(l=0; l<fh->f_procs_per_group; l++) {
                disp_index[l] =  1;
                if (NULL != blocklen_per_process[l]) {
                    free(blocklen_per_process[l]);
                    blocklen_per_process[l] = NULL;
                }
                if (NULL != displs_per_process[l]) {
                    free(displs_per_process[l]);
                    displs_per_process[l] = NULL;
                }
                blocklen_per_process[l] = (int *) calloc (1, sizeof(int));
                if (NULL == blocklen_per_process[l]) {
                    opal_output (1, "OUT OF MEMORY for blocklen\n");
                    ret = OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
                displs_per_process[l] = (MPI_Aint *) calloc (1, sizeof(MPI_Aint));
                if (NULL == displs_per_process[l]) {
                    opal_output (1, "OUT OF MEMORY for displs\n");
                    ret = OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
            }
            if (NULL != sorted_file_offsets) {
                free(sorted_file_offsets);
                sorted_file_offsets = NULL;
            }

            if(NULL != file_offsets_for_agg) {
                free(file_offsets_for_agg);
                file_offsets_for_agg = NULL;
            }

            if (NULL != memory_displacements) {
                free(memory_displacements);
                memory_displacements = NULL;
            }

        }
        if (local_cycles > index) {
            if ((index == local_cycles-1) && (max_data % bytes_per_cycle)) {
                bytes_to_write_in_cycle = max_data % bytes_per_cycle;
            }
            else if (max_data <= bytes_per_cycle) {
                bytes_to_write_in_cycle = max_data;
            }
            else {
                bytes_to_write_in_cycle = bytes_per_cycle;
            }
        }
        else {
            bytes_to_write_in_cycle = 0;
        }
#if DEBUG_ON
        /*    if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {*/
        printf ("***%d: CYCLE %d   Bytes %ld**********\n",
                fh->f_rank,
                index,
                bytes_to_write_in_cycle);
        /* }*/
#endif
        /**********************************************************
         **Gather the Data from all the processes at the writers **
         *********************************************************/

        /* gather from each process how many bytes each will be sending */
        ompi_io_ompio_gather_array (&bytes_to_write_in_cycle,
                                    1,
                                    MPI_INT,
                                    bytes_per_process,
                                    1,
                                    MPI_INT,
                                    fh->f_aggregator_index,
                                    fh->f_procs_in_group,
                                    fh->f_procs_per_group,
                                    fh->f_comm);

        /*
           For each aggregator
           it needs to get bytes_to_write_in_cycle from each process
           in group which adds up to bytes_per_cycle

        */
        if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
            for (i=0; i<fh->f_procs_per_group; i++) {
                /*	    printf("bytes_per_process[%d]: %d\n", i, bytes_per_process[i]);
                 */

#if DEBUG_ON
                printf ("%d : bytes_per_process : %d\n",
                        fh->f_procs_in_group[i],
                        bytes_per_process[i]);
#endif

                while (bytes_per_process[i] > 0) {
                    if (get_process_id(global_iov_array[sorted[current_index[i]]].process_id,
                                       fh) == i) { /* current id owns this entry!*/

                        /*Add and subtract length and create
                          blocklength and displs array*/
                        if (bytes_remaining[i]) {
                            /*Remaining bytes in the current entry of
                            		     the global offset array*/
                            if (bytes_remaining[i] <= bytes_per_process[i]) {
                                blocklen_per_process[i][disp_index[i] - 1] = bytes_remaining[i];
                                displs_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].offset +
                                    (global_iov_array[sorted[current_index[i]]].length
                                     - bytes_remaining[i]);

                                blocklen_per_process[i] = (int *) realloc
                                                          ((void *)blocklen_per_process[i], (disp_index[i]+1)*sizeof(int));
                                displs_per_process[i] = (MPI_Aint *)realloc
                                                        ((void *)displs_per_process[i], (disp_index[i]+1)*sizeof(MPI_Aint));
                                bytes_per_process[i] -= bytes_remaining[i];
                                blocklen_per_process[i][disp_index[i]] = 0;
                                displs_per_process[i][disp_index[i]] = 0;
                                bytes_remaining[i] = 0;
                                disp_index[i] += 1;
                                /* This entry has been used up, we need to move to the
                                next entry of this process and make current_index point there*/
                                current_index[i]  = find_next_index(i,
                                                                    current_index[i],
                                                                    fh,
                                                                    global_iov_array,
                                                                    global_iov_count,
                                                                    sorted);
                                if (current_index[i] == -1) {
                                    /* No more entries left, so Its all done! exit!*/
                                    break;
                                }
                                continue;
                            }
                            else {
                                blocklen_per_process[i][disp_index[i] - 1] = bytes_per_process[i];
                                displs_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].offset +
                                    (global_iov_array[sorted[current_index[i]]].length
                                     - bytes_remaining[i]);
                                bytes_remaining[i] -= bytes_per_process[i];
                                bytes_per_process[i] = 0;
                                break;
                            }
                        }
                        else {
                            if (bytes_per_process[i] <
                                    global_iov_array[sorted[current_index[i]]].length) {
                                blocklen_per_process[i][disp_index[i] - 1] =
                                    bytes_per_process[i];
                                displs_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].offset;

                                bytes_remaining[i] =
                                    global_iov_array[sorted[current_index[i]]].length -
                                    bytes_per_process[i];
                                bytes_per_process[i] = 0;
                                break;
                            }
                            else {
                                blocklen_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].length;
                                displs_per_process[i][disp_index[i] - 1] =
                                    global_iov_array[sorted[current_index[i]]].offset;
                                blocklen_per_process[i] =
                                    (int *) realloc ((void *)blocklen_per_process[i], (disp_index[i]+1)*sizeof(int));
                                displs_per_process[i] = (MPI_Aint *)realloc
                                                        ((void *)displs_per_process[i], (disp_index[i]+1)*sizeof(MPI_Aint));
                                blocklen_per_process[i][disp_index[i]] = 0;
                                displs_per_process[i][disp_index[i]] = 0;
                                disp_index[i] += 1;
                                bytes_per_process[i] -=
                                    global_iov_array[sorted[current_index[i]]].length;
                                current_index[i] = find_next_index(i,
                                                                   current_index[i],
                                                                   fh,
                                                                   global_iov_array,
                                                                   global_iov_count,
                                                                   sorted);
                                if (current_index[i] == -1) {
                                    break;
                                }
                            }
                        }
                    }
                    else {
                        current_index[i] = find_next_index(i,
                                                           current_index[i],
                                                           fh,
                                                           global_iov_array,
                                                           global_iov_count,
                                                           sorted);
                        if (current_index[i] == -1) {
                            bytes_per_process[i] = 0; /* no more entries left
						   to service this request*/
                            continue;
                        }
                    }
                }
            }
            entries_per_aggregator=0;
            for (i=0; i<fh->f_procs_per_group; i++) {
                for (j=0; j<disp_index[i]; j++) {
                    if (blocklen_per_process[i][j] > 0) {
                        entries_per_aggregator++;
#if DEBUG_ON
                        printf("%d sends blocklen[%d]: %d, disp[%d]: %ld to %d\n",
                               fh->f_procs_in_group[i],j,
                               blocklen_per_process[i][j],j,
                               displs_per_process[i][j],
                               fh->f_rank);

#endif
                    }

                }
            }

            if (entries_per_aggregator > 0) {
                file_offsets_for_agg = (local_io_array *)
                                       malloc(entries_per_aggregator*sizeof(local_io_array));
                if (NULL == file_offsets_for_agg) {
                    opal_output (1, "OUT OF MEMORY\n");
                    ret = OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
                sorted_file_offsets = (int *)
                                      malloc (entries_per_aggregator*sizeof(int));
                if (NULL == sorted_file_offsets) {
                    opal_output (1, "OUT OF MEMORY\n");
                    ret =  OMPI_ERR_OUT_OF_RESOURCE;
                    goto exit;
                }
                temp_index = 0;
                for (i=0; i<fh->f_procs_per_group; i++) {
                    for(j=0; j<disp_index[i]; j++) {
                        if (blocklen_per_process[i][j] > 0) {
                            file_offsets_for_agg[temp_index].length =
                                blocklen_per_process[i][j];
                            file_offsets_for_agg[temp_index].process_id = i;
                            file_offsets_for_agg[temp_index].offset =
                                displs_per_process[i][j];
                            temp_index++;
                        }
                    }
                }
            }
            else {
                continue;
            }
            local_heap_sort (file_offsets_for_agg,
                             entries_per_aggregator,
                             sorted_file_offsets);

            memory_displacements = (MPI_Aint *) malloc
                                   (entries_per_aggregator * sizeof(MPI_Aint));
            memory_displacements[sorted_file_offsets[0]] = 0;
            for (i=1; i<entries_per_aggregator; i++) {
                memory_displacements[sorted_file_offsets[i]] =
                    memory_displacements[sorted_file_offsets[i-1]] +
                    file_offsets_for_agg[sorted_file_offsets[i-1]].length;
            }

            temp_disp_index = (int *)calloc (1, fh->f_procs_per_group * sizeof (int));
            if (NULL == temp_disp_index) {
                opal_output (1, "OUT OF MEMORY\n");
                return OMPI_ERR_OUT_OF_RESOURCE;
            }
            global_count = 0;
            for (i=0; i<entries_per_aggregator; i++) {
                temp_pindex =
                    file_offsets_for_agg[sorted_file_offsets[i]].process_id;
                displs_per_process[temp_pindex][temp_disp_index[temp_pindex]] =
                    memory_displacements[sorted_file_offsets[i]];
                if (temp_disp_index[temp_pindex] < disp_index[temp_pindex])
                    temp_disp_index[temp_pindex] += 1;
                else {
                    printf("temp_disp_index[%d]: %d is greater than disp_index[%d]: %d\n",
                           temp_pindex, temp_disp_index[temp_pindex],
                           temp_pindex, disp_index[temp_pindex]);
                }
                global_count +=
                    file_offsets_for_agg[sorted_file_offsets[i]].length;
            }
            if (NULL != temp_disp_index) {
                free(temp_disp_index);
                temp_disp_index = NULL;
            }

#if DEBUG_ON
            printf("************Cycle: %d,  Aggregator: %d ***************\n",
                   index+1,fh->f_rank);
            for (i=0; i<entries_per_aggregator; i++) {
                printf("%d: OFFSET: %lld   LENGTH: %ld, Mem-offset: %ld, disp : %d\n",
                       file_offsets_for_agg[sorted_file_offsets[i]].process_id,
                       file_offsets_for_agg[sorted_file_offsets[i]].offset,
                       file_offsets_for_agg[sorted_file_offsets[i]].length,
                       memory_displacements[sorted_file_offsets[i]],
                       disp_index[ file_offsets_for_agg[sorted_file_offsets[i]].process_id]);
            }
#endif

#if DEBUG_ON
            printf("%d: global_count : %ld, bytes_to_write_in_cycle : %ld, procs_per_group: %d\n",
                   fh->f_rank,
                   global_count,
                   bytes_to_write_in_cycle,
                   fh->f_procs_per_group);
#endif
#if TIME_BREAKDOWN
            start_comm_time = MPI_Wtime();
#endif
            global_buf  = (char *) malloc (global_count);
            if (NULL == global_buf) {
                opal_output(1, "OUT OF MEMORY");
                ret = OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }

            recv_req = (MPI_Request *)
                       malloc (fh->f_procs_per_group * sizeof(MPI_Request));
            if (NULL == recv_req) {
                opal_output (1, "OUT OF MEMORY\n");
                ret = OMPI_ERR_OUT_OF_RESOURCE;
                goto exit;
            }
            for (i=0; i<fh->f_procs_per_group; i++) {
                ompi_datatype_create_hindexed(disp_index[i],
                                              blocklen_per_process[i],
                                              displs_per_process[i],
                                              MPI_BYTE,
                                              &recvtype[i]);
                ompi_datatype_commit(&recvtype[i]);
                ret = MCA_PML_CALL(irecv(global_buf,
                                         1,
                                         recvtype[i],
                                         fh->f_procs_in_group[i],
                                         123,
                                         fh->f_comm,
                                         &recv_req[i]));
                if (OMPI_SUCCESS != ret) {
                    fprintf(stderr,"irecv Error!\n");
                    goto exit;
                }
            }
        }

        if (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY) {
            send_buf = &((char*)buf)[total_bytes_written];
        }
        else if (bytes_to_write_in_cycle) {
            /* allocate a send buffer and copy the data that needs
               to be sent into it in case the data is non-contigous
               in memory */
            OPAL_PTRDIFF_TYPE mem_address;
            size_t remaining = 0;
            size_t temp_position = 0;

            send_buf = malloc (bytes_to_write_in_cycle);
            if (NULL == send_buf) {
                opal_output (1, "OUT OF MEMORY\n");
                return OMPI_ERR_OUT_OF_RESOURCE;
            }
            remaining = bytes_to_write_in_cycle;

            while (remaining) {
                mem_address = (OPAL_PTRDIFF_TYPE)
                              (decoded_iov[iov_index].iov_base) + current_position;

                if (remaining >=
                        (decoded_iov[iov_index].iov_len - current_position)) {
                    memcpy (send_buf+temp_position,
                            (IOVBASE_TYPE *)mem_address,
                            decoded_iov[iov_index].iov_len - current_position);
                    remaining = remaining -
                                (decoded_iov[iov_index].iov_len - current_position);
                    temp_position = temp_position +
                                    (decoded_iov[iov_index].iov_len - current_position);
                    iov_index = iov_index + 1;
                    current_position = 0;
                }
                else {
                    memcpy (send_buf+temp_position,
                            (IOVBASE_TYPE *)mem_address,
                            remaining);
                    current_position = current_position + remaining;
                    remaining = 0;
                }
            }
        }
        total_bytes_written += bytes_to_write_in_cycle;

        send_req = (MPI_Request *) malloc (sizeof(MPI_Request));
        if (NULL == send_req) {
            opal_output (1, "OUT OF MEMORY\n");
            ret = OMPI_ERR_OUT_OF_RESOURCE;
            goto exit;
        }

        ret = MCA_PML_CALL(isend(send_buf,
                                 bytes_to_write_in_cycle,
                                 MPI_BYTE,
                                 fh->f_procs_in_group[fh->f_aggregator_index],
                                 123,
                                 MCA_PML_BASE_SEND_STANDARD,
                                 fh->f_comm,
                                 send_req));

        if ( OMPI_SUCCESS != ret ) {
            fprintf(stderr,"isend error!\n");
            goto exit;
        }

        ret = ompi_request_wait (send_req, MPI_STATUS_IGNORE);
        if (OMPI_SUCCESS != ret) {
            goto exit;
        }

        if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
            ret = ompi_request_wait_all (fh->f_procs_per_group,
                                         recv_req,
                                         MPI_STATUS_IGNORE);
            if (OMPI_SUCCESS != ret) {
                goto exit;
            }

#if DEBUG_ON
            printf("************Cycle: %d,  Aggregator: %d ***************\n",
                   index+1,fh->f_rank);
            if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
                for (i=0 ; i<global_count/4 ; i++)
                    printf (" RECV %d \n",((int *)global_buf)[i]);
            }
#endif
        }
#if TIME_BREAKDOWN
        end_comm_time = MPI_Wtime();
        comm_time += end_comm_time - start_comm_time;
#endif



        if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
            fh->f_io_array = (mca_io_ompio_io_array_t *) malloc
                             (entries_per_aggregator * sizeof (mca_io_ompio_io_array_t));
            if (NULL == fh->f_io_array) {
                opal_output(1, "OUT OF MEMORY\n");
                return OMPI_ERR_OUT_OF_RESOURCE;
            }
            fh->f_num_of_io_entries = 0;
            /*First entry for every aggregator*/
            fh->f_io_array[fh->f_num_of_io_entries].offset =
                (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[0]].offset;
            fh->f_io_array[fh->f_num_of_io_entries].length =
                file_offsets_for_agg[sorted_file_offsets[0]].length;
            fh->f_io_array[fh->f_num_of_io_entries].memory_address =
                global_buf+memory_displacements[sorted_file_offsets[0]];
            fh->f_num_of_io_entries++;
            for (i=1; i<entries_per_aggregator; i++) {
                if (file_offsets_for_agg[sorted_file_offsets[i-1]].offset +
                        file_offsets_for_agg[sorted_file_offsets[i-1]].length ==
                        file_offsets_for_agg[sorted_file_offsets[i]].offset) {
                    fh->f_io_array[fh->f_num_of_io_entries - 1].length +=
                        file_offsets_for_agg[sorted_file_offsets[i]].length;
                }
                else {
                    fh->f_io_array[fh->f_num_of_io_entries].offset =
                        (IOVBASE_TYPE *)(intptr_t)file_offsets_for_agg[sorted_file_offsets[i]].offset;
                    fh->f_io_array[fh->f_num_of_io_entries].length =
                        file_offsets_for_agg[sorted_file_offsets[i]].length;
                    fh->f_io_array[fh->f_num_of_io_entries].memory_address =
                        global_buf+memory_displacements[sorted_file_offsets[i]];
                    fh->f_num_of_io_entries++;
                }
            }
#if DEBUG_ON
            printf("*************************** %d\n", fh->f_num_of_io_entries);
            for (i=0 ; i<fh->f_num_of_io_entries ; i++) {
                printf(" ADDRESS: %p  OFFSET: %ld   LENGTH: %ld\n",
                       fh->f_io_array[i].memory_address,
                       (OPAL_PTRDIFF_TYPE)fh->f_io_array[i].offset,
                       fh->f_io_array[i].length);
            }
#endif

#if TIME_BREAKDOWN
            start_write_time = MPI_Wtime();
#endif

            if (fh->f_num_of_io_entries) {
                if ( 0 >  fh->f_fbtl->fbtl_pwritev (fh)) {
                    opal_output (1, "WRITE FAILED\n");
                    ret = OMPI_ERROR;
                    goto exit;
                }
            }

#if TIME_BREAKDOWN
            end_write_time = MPI_Wtime();
            write_time += end_write_time - start_write_time;
#endif

        }
        if (NULL != send_req) {
            free(send_req);
            send_req = NULL;
        }

        if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {
            fh->f_num_of_io_entries = 0;
            if (NULL != fh->f_io_array) {
                free (fh->f_io_array);
                fh->f_io_array = NULL;
            }
            for (i = 0; i < fh->f_procs_per_group; i++)
                ompi_datatype_destroy(recvtype+i);
            if (NULL != recvtype) {
                free(recvtype);
                recvtype=NULL;
            }
            if (NULL != recv_req) {
                free(recv_req);
                recv_req = NULL;
            }
            if (NULL != global_buf) {
                free (global_buf);
                global_buf = NULL;
            }
        }
    }

#if TIME_BREAKDOWN
    end_exch = MPI_Wtime();
    exch_write += end_exch - start_exch;
    nentry.time[0] = write_time;
    nentry.time[1] = comm_time;
    nentry.time[2] = exch_write;
    if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank)
        nentry.aggregator = 1;
    else
        nentry.aggregator = 0;
    nentry.nprocs_for_coll = static_num_io_procs;
    if (!ompi_io_ompio_full_print_queue(WRITE_PRINT_QUEUE)) {
        ompi_io_ompio_register_print_entry(WRITE_PRINT_QUEUE,
                                           nentry);
    }
#endif



exit:
    if (NULL != decoded_iov) {
        free(decoded_iov);
        decoded_iov = NULL;
    }

    if (fh->f_procs_in_group[fh->f_aggregator_index] == fh->f_rank) {

        if (NULL != disp_index) {
            free(disp_index);
            disp_index = NULL;
        }

        if (NULL != local_iov_array) {
            free(local_iov_array);
            local_iov_array = NULL;
        }
        for(l=0; l<fh->f_procs_per_group; l++) {
            if (NULL != blocklen_per_process[l]) {
                free(blocklen_per_process[l]);
                blocklen_per_process[l] = NULL;
            }
            if (NULL != displs_per_process[l]) {
                free(displs_per_process[l]);
                displs_per_process[l] = NULL;
            }
        }
        if (NULL != blocklen_per_process) {
            free(blocklen_per_process);
            blocklen_per_process = NULL;
        }
        if (NULL != displs_per_process) {
            free(displs_per_process);
            displs_per_process = NULL;
        }
        if(NULL != bytes_remaining) {
            free(bytes_remaining);
            bytes_remaining = NULL;
        }
        if(NULL != current_index) {
            free(current_index);
            current_index = NULL;
        }
    }
    return ret;
}
static int
mca_coll_basic_neighbor_allgather_cart(const void *sbuf, int scount,
                                       struct ompi_datatype_t *sdtype, void *rbuf,
                                       int rcount, struct ompi_datatype_t *rdtype,
                                       struct ompi_communicator_t *comm,
                                       mca_coll_base_module_t *module)
{
    const mca_topo_base_comm_cart_2_2_0_t *cart = comm->c_topo->mtc.cart;
    const int rank = ompi_comm_rank (comm);
    ompi_request_t **reqs, **preqs;
    ptrdiff_t lb, extent;
    int rc = MPI_SUCCESS, dim, nreqs;

    ompi_datatype_get_extent(rdtype, &lb, &extent);

    reqs = preqs = coll_base_comm_get_reqs( module->base_data, 4 * cart->ndims );
    if( NULL == reqs ) { return OMPI_ERR_OUT_OF_RESOURCE; }

    /* The ordering is defined as -1 then +1 in each dimension in
     * order of dimension. */
    for (dim = 0, nreqs = 0 ; dim < cart->ndims ; ++dim) {
        int srank = MPI_PROC_NULL, drank = MPI_PROC_NULL;

        if (cart->dims[dim] > 1) {
            mca_topo_base_cart_shift (comm, dim, 1, &srank, &drank);
        } else if (1 == cart->dims[dim] && cart->periods[dim]) {
            srank = drank = rank;
        }

        if (MPI_PROC_NULL != srank) {
            nreqs++;
            rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, srank,
                                    MCA_COLL_BASE_TAG_ALLGATHER,
                                    comm, preqs++));
            if (OMPI_SUCCESS != rc) break;

            nreqs++;
            /* remove cast from const when the pml layer is updated to take
             * a const for the send buffer. */
            rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, srank,
                                    MCA_COLL_BASE_TAG_ALLGATHER,
                                    MCA_PML_BASE_SEND_STANDARD,
                                    comm, preqs++));
            if (OMPI_SUCCESS != rc) break;
        }

        rbuf = (char *) rbuf + extent * rcount;

        if (MPI_PROC_NULL != drank) {
            nreqs++;
            rc = MCA_PML_CALL(irecv(rbuf, rcount, rdtype, drank,
                                    MCA_COLL_BASE_TAG_ALLGATHER,
                                    comm, preqs++));
            if (OMPI_SUCCESS != rc) break;

            nreqs++;
            rc = MCA_PML_CALL(isend((void *) sbuf, scount, sdtype, drank,
                                    MCA_COLL_BASE_TAG_ALLGATHER,
                                    MCA_PML_BASE_SEND_STANDARD,
                                    comm, preqs++));
            if (OMPI_SUCCESS != rc) break;
        }

        rbuf = (char *) rbuf + extent * rcount;
    }

    if (OMPI_SUCCESS != rc) {
        ompi_coll_base_free_reqs(reqs, nreqs);
        return rc;
    }

    rc = ompi_request_wait_all (nreqs, reqs, MPI_STATUSES_IGNORE);
    if (OMPI_SUCCESS != rc) {
        ompi_coll_base_free_reqs(reqs, nreqs);
    }
    return rc;
}
예제 #21
0
static int two_phase_exchange_data(mca_io_ompio_file_t *fh,
				   void *buf, struct iovec *offset_len,
				   int *send_size, int *start_pos, int *recv_size,
				   int *count, int *partial_send, 
				   int *recd_from_proc, int contig_access_count,
				   OMPI_MPI_OFFSET_TYPE min_st_offset,
				   OMPI_MPI_OFFSET_TYPE fd_size,
				   OMPI_MPI_OFFSET_TYPE *fd_start,
				   OMPI_MPI_OFFSET_TYPE *fd_end, 
				   Flatlist_node *flat_buf,
				   mca_io_ompio_access_array_t *others_req, 
				   int iter, int *buf_idx, 
				   MPI_Aint buftype_extent, int striping_unit,
				   int *aggregator_list)
{
  
  int i=0, j=0, k=0, tmp=0, nprocs_recv=0, nprocs_send=0;
  int ret = OMPI_SUCCESS;
  char **recv_buf = NULL;
  MPI_Request *requests=NULL;
  MPI_Datatype send_type;


#if TIME_BREAKDOWN
    start_rcomm_time = MPI_Wtime();
#endif

  ret = fh->f_comm->c_coll.coll_alltoall (send_size,
					  1,
					  MPI_INT,
					  recv_size,
					  1,
					  MPI_INT,
					  fh->f_comm,
					  fh->f_comm->c_coll.coll_alltoall_module);

  if ( OMPI_SUCCESS != ret ){
    return ret;
  }
  
  
#if DEBUG
  for (i=0; i<fh->f_size; i++){
    printf("%d: RS[%d]: %d\n", fh->f_rank,
	   i,
	   recv_size[i]);
  }
#endif

  
  nprocs_recv = 0;
  for (i=0; i < fh->f_size; i++) 
    if (recv_size[i]) nprocs_recv++;

  nprocs_send = 0;
  for (i=0; i< fh->f_size; i++) 
    if (send_size[i]) nprocs_send++;

  requests = (MPI_Request *)
    malloc((nprocs_send+nprocs_recv+1) *  sizeof(MPI_Request));
  
  if (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY) {
    j = 0;
    for (i=0; i < fh->f_size; i++){
      if (recv_size[i]){
	ret = MCA_PML_CALL(irecv(((char *) buf)+ buf_idx[i],
				 recv_size[i],
				 MPI_BYTE,
				 i,
				 fh->f_rank+i+100*iter,
				 fh->f_comm,
				 requests+j));
	
	if ( OMPI_SUCCESS != ret ){
	  return ret;
	}
	j++;
	buf_idx[i] += recv_size[i];
      }
    }
  }
  else{

    recv_buf = (char **)malloc(fh->f_size * sizeof(char *));
    if (NULL == recv_buf){
      return OMPI_ERR_OUT_OF_RESOURCE;
    }

    for (i=0; i < fh->f_size; i++)
      if(recv_size[i]) recv_buf[i] = 
			 (char *) malloc (recv_size[i] *  sizeof(char));
    j = 0;
    for(i=0; i<fh->f_size; i++)
      if (recv_size[i]) {
	ret = MCA_PML_CALL(irecv(recv_buf[i],
				 recv_size[i],
				 MPI_BYTE,
				 i,
				 fh->f_rank+i+100*iter,
				 fh->f_comm,
				 requests+j));
	j++;
	
      }
  }
    
  

  j = 0;
  for (i = 0; i< fh->f_size; i++){
    if (send_size[i]){
      if (partial_send[i]){
	k = start_pos[i] + count[i] - 1;
	tmp = others_req[i].lens[k];
	others_req[i].lens[k] = partial_send[i];
      }

      MPI_Type_hindexed(count[i],
			&(others_req[i].lens[start_pos[i]]),
			&(others_req[i].mem_ptrs[start_pos[i]]),
			MPI_BYTE,
			&send_type);
      MPI_Type_commit(&send_type);

      ret = MCA_PML_CALL(isend(MPI_BOTTOM,
			       1,
			       send_type,
			       i,
			       fh->f_rank+i+100*iter,
			       MCA_PML_BASE_SEND_STANDARD,
			       fh->f_comm,
			       requests+nprocs_recv+j));
      MPI_Type_free(&send_type);
      if (partial_send[i]) others_req[i].lens[k] = tmp;
      j++;
    }
  }


  if (nprocs_recv) {

    ret = ompi_request_wait_all(nprocs_recv,
				requests, 
				MPI_STATUS_IGNORE);
    
    
    if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)) {

      two_phase_fill_user_buffer(fh, buf, flat_buf,
				 recv_buf, offset_len,
				 (unsigned *)recv_size, requests,
				 recd_from_proc, contig_access_count,
				 min_st_offset, fd_size, fd_start, fd_end,
				 buftype_extent, striping_unit, aggregator_list);
    }
  }

  ret = ompi_request_wait_all(nprocs_send,
			      requests+nprocs_recv, 
			      MPI_STATUS_IGNORE);

  if (NULL != requests){
    free(requests);
    requests = NULL;
  }
  
  if (! (fh->f_flags & OMPIO_CONTIGUOUS_MEMORY)){
    for (i=0; i< fh->f_size; i++){
      if (recv_size[i]){
	free(recv_buf[i]);
      }
    }
    free(recv_buf);
  }

#if TIME_BREAKDOWN
    end_rcomm_time = MPI_Wtime();
    rcomm_time += (end_rcomm_time - start_rcomm_time);
#endif

  return ret;

}
예제 #22
0
/**
 * All-reduce - subgroup in communicator
 */
OMPI_DECLSPEC int comm_allgather_pml(void *src_buf, void *dest_buf, int count,
        ompi_datatype_t *dtype, int my_rank_in_group,
        int n_peers, int *ranks_in_comm,ompi_communicator_t *comm)
{
    /* local variables */
    int rc=OMPI_SUCCESS,msg_cnt;
    int pair_rank,exchange,extra_rank, n_extra_nodes,n_extra;
    int proc_block,extra_start,extra_end,iovec_len;
    int remote_data_start_rank,remote_data_end_rank;
    int local_data_start_rank;
    netpatterns_pair_exchange_node_t my_exchange_node;
    size_t message_extent,current_data_extent,current_data_count;
    size_t dt_size;
    OPAL_PTRDIFF_TYPE dt_extent;
    char *src_buf_current;
    char *dest_buf_current;
    struct iovec send_iov[2] = {{0,0},{0,0}}, 
                 recv_iov[2] = {{0,0},{0,0}};
    ompi_request_t *requests[4];

    /* get size of data needed - same layout as user data, so that
     *   we can apply the reudction routines directly on these buffers
     */
    rc = ompi_datatype_type_size(dtype, &dt_size);
    if( OMPI_SUCCESS != rc ) {
        goto Error;
    }

    rc = ompi_datatype_type_extent(dtype, &dt_extent);
    if( OMPI_SUCCESS != rc ) {
        goto Error;
    }
    message_extent = dt_extent*count;

    /* place my data in the correct destination buffer */
    rc=ompi_datatype_copy_content_same_ddt(dtype,count,
            (char *)dest_buf+my_rank_in_group*message_extent,
            (char *)src_buf);
    if( OMPI_SUCCESS != rc ) {
        goto Error;
    }

    /* 1 process special case */
    if(1 == n_peers) {
        return OMPI_SUCCESS;
    }

    /* get my reduction communication pattern */
    memset(&my_exchange_node, 0, sizeof(netpatterns_pair_exchange_node_t));
    rc = netpatterns_setup_recursive_doubling_tree_node(n_peers, 
            my_rank_in_group, &my_exchange_node);
    if(OMPI_SUCCESS != rc){
        return rc;
    }

    n_extra_nodes=n_peers-my_exchange_node.n_largest_pow_2;

    /* get the data from the extra sources */
    if(0 < my_exchange_node.n_extra_sources)  {

        if ( EXCHANGE_NODE == my_exchange_node.node_type ) {

            /*
             ** Receive data from extra node
             */

            extra_rank=my_exchange_node.rank_extra_source;
            /* receive the data into the correct location - will use 2
             * messages in the recursive doubling phase */
            dest_buf_current=(char *)dest_buf+message_extent*extra_rank;
            rc=MCA_PML_CALL(recv(dest_buf_current,
                    count,dtype,ranks_in_comm[extra_rank],
                    -OMPI_COMMON_TAG_ALLREDUCE,
                    comm, MPI_STATUSES_IGNORE));
            if( 0 > rc ) {
                goto  Error;
            }

        } else {

            /*
             ** Send data to "partner" node
             */
            extra_rank=my_exchange_node.rank_extra_source;
            src_buf_current=(char *)src_buf;
            rc=MCA_PML_CALL(send(src_buf_current,
                    count,dtype,ranks_in_comm[extra_rank],
                    -OMPI_COMMON_TAG_ALLREDUCE,
                    MCA_PML_BASE_SEND_STANDARD,
                    comm));
            if( 0 > rc ) {
                goto  Error;
            }
        }
    }

    current_data_extent=message_extent;
    current_data_count=count;
    src_buf_current=(char *)dest_buf+my_rank_in_group*message_extent;
    proc_block=1;
    local_data_start_rank=my_rank_in_group;
    /* loop over data exchanges */
    for(exchange=0 ; exchange < my_exchange_node.n_exchanges ; exchange++) {

        /* is the remote data read */
        pair_rank=my_exchange_node.rank_exchanges[exchange];
        msg_cnt=0;

        /*
         * Power of 2 data segment 
         */
        /* post non-blocking receive */
        if(pair_rank > my_rank_in_group ){
            recv_iov[0].iov_base=src_buf_current+current_data_extent;
            recv_iov[0].iov_len=current_data_extent;
            iovec_len=1;
            remote_data_start_rank=local_data_start_rank+proc_block;
            remote_data_end_rank=remote_data_start_rank+proc_block-1;
        } else {
            recv_iov[0].iov_base=src_buf_current-current_data_extent;
            recv_iov[0].iov_len=current_data_extent;
            iovec_len=1;
            remote_data_start_rank=local_data_start_rank-proc_block;
            remote_data_end_rank=remote_data_start_rank+proc_block-1;
        }
        /* the data from the non power of 2 ranks */
        if(remote_data_start_rank<n_extra_nodes) {
            /* figure out how much data is at the remote rank */
            /* last rank with data */
            extra_start=remote_data_start_rank;
            extra_end=remote_data_end_rank;
            if(extra_end >= n_extra_nodes ) {
                /* if last rank exceeds the ranks with extra data,
                 * adjust this.
                 */
                extra_end=n_extra_nodes-1;
            }
            /* get the number of ranks whos data is to be grabbed */
            n_extra=extra_end-extra_start+1;

            recv_iov[1].iov_base=(char *)dest_buf+
                (extra_start+my_exchange_node.n_largest_pow_2)*message_extent;
            recv_iov[1].iov_len=n_extra*count;
            iovec_len=2;
        }

        rc=MCA_PML_CALL(irecv(recv_iov[0].iov_base,
                    current_data_count,dtype,ranks_in_comm[pair_rank],
                    -OMPI_COMMON_TAG_ALLREDUCE,
                    comm,&(requests[msg_cnt])));
        if( 0 > rc ) {
            goto Error;
        }
        msg_cnt++;

        if(iovec_len > 1 ) {
            rc=MCA_PML_CALL(irecv(recv_iov[1].iov_base,
                        recv_iov[1].iov_len,dtype,ranks_in_comm[pair_rank],
                        -OMPI_COMMON_TAG_ALLREDUCE,
                        comm,&(requests[msg_cnt])));
            if( 0 > rc ) {
                goto Error;
            }
            msg_cnt++;
        }

        /* post non-blocking send */
        send_iov[0].iov_base=src_buf_current;
        send_iov[0].iov_len=current_data_extent;
        iovec_len=1;
        /* the data from the non power of 2 ranks */
        if(local_data_start_rank<n_extra_nodes) {
            /* figure out how much data is at the remote rank */
            /* last rank with data */
            extra_start=local_data_start_rank;
            extra_end=extra_start+proc_block-1;
            if(extra_end >= n_extra_nodes ) {
                /* if last rank exceeds the ranks with extra data,
                 * adjust this.
                 */
                extra_end=n_extra_nodes-1;
            }
            /* get the number of ranks whos data is to be grabbed */
            n_extra=extra_end-extra_start+1;

            send_iov[1].iov_base=(char *)dest_buf+
                (extra_start+my_exchange_node.n_largest_pow_2)*message_extent;
            send_iov[1].iov_len=n_extra*count;
            iovec_len=2;
        }

        rc=MCA_PML_CALL(isend(send_iov[0].iov_base,
                    current_data_count,dtype,ranks_in_comm[pair_rank],
                    -OMPI_COMMON_TAG_ALLREDUCE,MCA_PML_BASE_SEND_STANDARD,
                    comm,&(requests[msg_cnt])));
        if( 0 > rc ) {
            goto Error;
        }
        msg_cnt++;
        if( iovec_len > 1 ) { 
            rc=MCA_PML_CALL(isend(send_iov[1].iov_base,
                        send_iov[1].iov_len,dtype,ranks_in_comm[pair_rank],
                        -OMPI_COMMON_TAG_ALLREDUCE,MCA_PML_BASE_SEND_STANDARD,
                        comm,&(requests[msg_cnt])));
            if( 0 > rc ) {
                goto Error;
            }
            msg_cnt++;
        }

        /* prepare the source buffer for the next iteration */
        if(pair_rank < my_rank_in_group ){
            src_buf_current-=current_data_extent;
            local_data_start_rank-=proc_block;
        } 
        proc_block*=2;
        current_data_extent*=2;
        current_data_count*=2;

        /* wait on send and receive completion */
        ompi_request_wait_all(msg_cnt,requests,MPI_STATUSES_IGNORE);
    }

    /* copy data in from the "extra" source, if need be */
    if(0 < my_exchange_node.n_extra_sources)  {

        if ( EXTRA_NODE == my_exchange_node.node_type ) {
            /* 
             ** receive the data 
             ** */
            extra_rank=my_exchange_node.rank_extra_source;

            rc=MCA_PML_CALL(recv(dest_buf,
                    count*n_peers,dtype,ranks_in_comm[extra_rank],
                    -OMPI_COMMON_TAG_ALLREDUCE,
                    comm,MPI_STATUSES_IGNORE));
            if(0 > rc ) {
                goto  Error;
            }
        } else {
            /* send the data to the pair-rank outside of the power of 2 set
             ** of ranks
             */

            extra_rank=my_exchange_node.rank_extra_source;
            rc=MCA_PML_CALL(send(dest_buf,
                    count*n_peers,dtype,ranks_in_comm[extra_rank],
                    -OMPI_COMMON_TAG_ALLREDUCE,
                    MCA_PML_BASE_SEND_STANDARD,
                    comm));
            if( 0 > rc ) {
                goto  Error;
            }
        }
    }

    netpatterns_cleanup_recursive_doubling_tree_node(&my_exchange_node);

    /* return */
    return OMPI_SUCCESS;

Error:
    return rc;
}