Example #1
0
/**
 *  Conversion function. They deal with data-types in 3 ways, always making local copies.
 * In order to allow performance testings, there are 3 functions:
 *  - one copying directly from one memory location to another one using the
 *    data-type copy function.
 *  - one which use a 2 convertors created with the same data-type
 *  - and one using 2 convertors created from different data-types.
 *
 */
static int local_copy_ddt_count( ompi_datatype_t* pdt, int count )
{
    MPI_Aint extent;
    void *pdst, *psrc;
    TIMER_DATA_TYPE start, end;
    long total_time;

    ompi_ddt_type_extent( pdt, &extent );

    pdst = malloc( extent * count );
    psrc = malloc( extent * count );

    {
        int i;
        for( i = 0; i < (count * extent); i++ )
            ((char*)psrc)[i] = i % 128 + 32;
    }
    memset( pdst, 0, count * extent );

    cache_trash();  /* make sure the cache is useless */

    GET_TIME( start );
    if( OMPI_SUCCESS != ompi_ddt_copy_content_same_ddt( pdt, count, pdst, psrc ) ) {
        printf( "Unable to copy the datatype in the function local_copy_ddt_count."
                " Is the datatype committed ?\n" );
    }
    GET_TIME( end );
    total_time = ELAPSED_TIME( start, end );
    printf( "direct local copy in %ld microsec\n", total_time );
    free( pdst );
    free( psrc );

    return OMPI_SUCCESS;
}
Example #2
0
int MPI_Scatterv(void *sendbuf, int *sendcounts, int *displs,
                 MPI_Datatype sendtype, void *recvbuf, int recvcount,
                 MPI_Datatype recvtype, int root, MPI_Comm comm) 
{
    int i, size, err;

    MEMCHECKER(
        ptrdiff_t ext;

        size = ompi_comm_size(comm);
        ompi_ddt_type_extent(recvtype, &ext);

        memchecker_comm(comm);
        if(OMPI_COMM_IS_INTRA(comm)) {
              if(ompi_comm_rank(comm) == root) {
                memchecker_datatype(sendtype);
                /* check whether root's send buffer is defined. */
                for (i = 0; i < size; i++) {
                    memchecker_call(&opal_memchecker_base_isdefined,
                                    (char *)(sendbuf)+displs[i]*ext,
                                    sendcounts[i], sendtype);
                }
                if(MPI_IN_PLACE != recvbuf) {
                    memchecker_datatype(recvtype);
                    /* check whether receive buffer is addressable. */
                    memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
                }
              } else {
                  memchecker_datatype(recvtype);
                  /* check whether receive buffer is addressable. */
                  memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
              }
        } else {
            if(MPI_ROOT == root) {
                  memchecker_datatype(sendtype);
                  /* check whether root's send buffer is defined. */
                  for (i = 0; i < size; i++) {
                      memchecker_call(&opal_memchecker_base_isdefined,
                                      (char *)(sendbuf)+displs[i]*ext,
                                      sendcounts[i], sendtype);
                  }
            } else if (MPI_PROC_NULL != root) {
                /* check whether receive buffer is addressable. */
                memchecker_call(&opal_memchecker_base_isaddressable, recvbuf, recvcount, recvtype);
            }
        }
    );
int
ompi_coll_tuned_alltoallv_intra_pairwise(void *sbuf, int *scounts, int *sdisps,
                                         struct ompi_datatype_t *sdtype,
                                         void* rbuf, int *rcounts, int *rdisps,
                                         struct ompi_datatype_t *rdtype,
                                         struct ompi_communicator_t *comm,
                                         mca_coll_base_module_t *module)
{
    int line = -1, err = 0;
    int rank, size, step;
    int sendto, recvfrom;
    void *psnd, *prcv;
    ptrdiff_t sext, rext;

    size = ompi_comm_size(comm);
    rank = ompi_comm_rank(comm);

    OPAL_OUTPUT((ompi_coll_tuned_stream,
                 "coll:tuned:alltoallv_intra_pairwise rank %d", rank));

    ompi_ddt_type_extent(sdtype, &sext);
    ompi_ddt_type_extent(rdtype, &rext);

    psnd = ((char *) sbuf) + (sdisps[rank] * sext);
    prcv = ((char *) rbuf) + (rdisps[rank] * rext);

    if (0 != scounts[rank]) {
        err = ompi_ddt_sndrcv(psnd, scounts[rank], sdtype,
                              prcv, rcounts[rank], rdtype);
        if (MPI_SUCCESS != err) {
            return err;
        }
    }

    /* If only one process, we're done. */
    if (1 == size) {
        return MPI_SUCCESS;
    }

    /* Perform pairwise exchange starting from 1 since local exhange is done */
    for (step = 1; step < size + 1; step++) {

        /* Determine sender and receiver for this step. */
        sendto  = (rank + step) % size;
        recvfrom = (rank + size - step) % size;

        /* Determine sending and receiving locations */
        psnd = (char*)sbuf + sdisps[sendto] * sext;
        prcv = (char*)rbuf + rdisps[recvfrom] * rext;

        /* send and receive */
        err = ompi_coll_tuned_sendrecv( psnd, scounts[sendto], sdtype, sendto, 
                                        MCA_COLL_BASE_TAG_ALLTOALLV,
                                        prcv, rcounts[recvfrom], rdtype, recvfrom, 
                                        MCA_COLL_BASE_TAG_ALLTOALLV,
                                        comm, MPI_STATUS_IGNORE, rank);
        if (err != MPI_SUCCESS) { line = __LINE__; goto err_hndl;  }
    }

    return MPI_SUCCESS;
 
 err_hndl:
    OPAL_OUTPUT((ompi_coll_tuned_stream,
                 "%s:%4d\tError occurred %d, rank %2d", __FILE__, line, 
                 err, rank));
    return err;
}
/*  
 * Linear functions are copied from the basic coll module.  For
 * some small number of nodes and/or small data sizes they are just as
 * fast as tuned/tree based segmenting operations and as such may be
 * selected by the decision functions.  These are copied into this module
 * due to the way we select modules in V1. i.e. in V2 we will handle this
 * differently and so will not have to duplicate code.  
 * GEF Oct05 after asking Jeff.  
 */
int
ompi_coll_tuned_alltoallv_intra_basic_linear(void *sbuf, int *scounts, int *sdisps,
                                            struct ompi_datatype_t *sdtype,
                                            void *rbuf, int *rcounts, int *rdisps,
                                            struct ompi_datatype_t *rdtype,
                                            struct ompi_communicator_t *comm,
                                            mca_coll_base_module_t *module)
{
    int i, size, rank, err;
    char *psnd, *prcv;
    int nreqs;
    ptrdiff_t sext, rext;
    MPI_Request *preq;
    mca_coll_tuned_module_t *tuned_module = (mca_coll_tuned_module_t*) module;
    mca_coll_tuned_comm_t *data = tuned_module->tuned_data;

    size = ompi_comm_size(comm);
    rank = ompi_comm_rank(comm);

    OPAL_OUTPUT((ompi_coll_tuned_stream,
                 "coll:tuned:alltoallv_intra_basic_linear rank %d", rank));

    ompi_ddt_type_extent(sdtype, &sext);
    ompi_ddt_type_extent(rdtype, &rext);

    /* Simple optimization - handle send to self first */
    psnd = ((char *) sbuf) + (sdisps[rank] * sext);
    prcv = ((char *) rbuf) + (rdisps[rank] * rext);
    if (0 != scounts[rank]) {
        err = ompi_ddt_sndrcv(psnd, scounts[rank], sdtype,
                              prcv, rcounts[rank], rdtype);
        if (MPI_SUCCESS != err) {
            return err;
        }
    }

    /* If only one process, we're done. */
    if (1 == size) {
        return MPI_SUCCESS;
    }

    /* Now, initiate all send/recv to/from others. */
    nreqs = 0;
    preq = data->mcct_reqs;

    /* Post all receives first */
    for (i = 0; i < size; ++i) {
        if (i == rank || 0 == rcounts[i]) {
            continue;
        }

        prcv = ((char *) rbuf) + (rdisps[i] * rext);
        err = MCA_PML_CALL(irecv_init(prcv, rcounts[i], rdtype,
                                      i, MCA_COLL_BASE_TAG_ALLTOALLV, comm,
                                      preq++));
        ++nreqs;
        if (MPI_SUCCESS != err) {
            ompi_coll_tuned_free_reqs(data->mcct_reqs, nreqs);
            return err;
        }
    }

    /* Now post all sends */
    for (i = 0; i < size; ++i) {
        if (i == rank || 0 == scounts[i]) {
            continue;
        }

        psnd = ((char *) sbuf) + (sdisps[i] * sext);
        err = MCA_PML_CALL(isend_init(psnd, scounts[i], sdtype,
                                      i, MCA_COLL_BASE_TAG_ALLTOALLV,
                                      MCA_PML_BASE_SEND_STANDARD, comm,
                                      preq++));
        ++nreqs;
        if (MPI_SUCCESS != err) {
            ompi_coll_tuned_free_reqs(data->mcct_reqs, nreqs);
            return err;
        }
    }

    /* Start your engines.  This will never return an error. */
    MCA_PML_CALL(start(nreqs, data->mcct_reqs));

    /* Wait for them all.  If there's an error, note that we don't care
     * what the error was -- just that there *was* an error.  The PML
     * will finish all requests, even if one or more of them fail.
     * i.e., by the end of this call, all the requests are free-able.
     * So free them anyway -- even if there was an error, and return the
     * error after we free everything. */
    err = ompi_request_wait_all(nreqs, data->mcct_reqs,
                                MPI_STATUSES_IGNORE);

    /* Free the requests. */
    ompi_coll_tuned_free_reqs(data->mcct_reqs, nreqs);

    return err;
}
Example #5
0
int MPI_Type_create_darray(int size,
                           int rank,
                           int ndims,
                           int gsize_array[],
                           int distrib_array[],
                           int darg_array[],
                           int psize_array[],
                           int order,
                           MPI_Datatype oldtype,
                           MPI_Datatype *newtype)

{
    ompi_datatype_t *lastType;
    ptrdiff_t orig_extent, *st_offsets = NULL;
    int i, start_loop, end_loop, step;
    int *coords = NULL, rc = OMPI_SUCCESS;

    if (MPI_PARAM_CHECK) {
        int prod_psize = 1;
        OMPI_ERR_INIT_FINALIZE(FUNC_NAME);
        if( (rank < 0) || (size < 0) || (rank >= size) ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
        } else if( ndims < 0 ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_COUNT, FUNC_NAME);
        } else if( (NULL == gsize_array) || (NULL == distrib_array) || (NULL == darg_array) || (NULL == psize_array)) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
        } else if (NULL == newtype) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_TYPE, FUNC_NAME);
        } else if( !(DT_FLAG_DATA & oldtype ->flags) ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_TYPE, FUNC_NAME);
        } else if( (MPI_ORDER_C != order) && (MPI_ORDER_FORTRAN != order) ) {
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
        }
        for( i = 0; i < ndims; i++ ) {
            if( (MPI_DISTRIBUTE_BLOCK != distrib_array[i]) && 
                (MPI_DISTRIBUTE_CYCLIC != distrib_array[i]) &&
                (MPI_DISTRIBUTE_NONE != distrib_array[i]) ) {
                return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
            } else if( (gsize_array[i] < 1) || (psize_array[i] < 0) ||
                       ((darg_array[i] < 0) && (MPI_DISTRIBUTE_DFLT_DARG != darg_array[i]) ) ) {
                return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
            } else if( (MPI_DISTRIBUTE_DFLT_DARG != darg_array[i]) &&
                       (MPI_DISTRIBUTE_BLOCK == distrib_array[i]) &&
                       ((darg_array[i] * psize_array[i]) < gsize_array[i]) ) {
                return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
            } else if( 1 > psize_array[i] )
                return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
            prod_psize *= psize_array[i];
        }
        if( prod_psize != size )
            return OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_ARG, FUNC_NAME);
    }

    /* speedy corner case */
    if (ndims < 1) {
        /* Don't just return MPI_DATATYPE_NULL as that can't be
           MPI_TYPE_FREE()ed, and that seems bad */
        *newtype = ompi_ddt_create(0);
        ompi_ddt_add(*newtype, &ompi_mpi_datatype_null, 0, 0, 0);
        return MPI_SUCCESS;
    }

    rc = ompi_ddt_type_extent(oldtype, &orig_extent);
    if (MPI_SUCCESS != rc) goto cleanup;

    /* calculate position in grid using row-major ordering */
    {
        int tmp_rank = rank, procs = size;

        coords = (int *) malloc(ndims * sizeof(int));
        for (i = 0 ; i < ndims ; i++) {
            procs = procs / psize_array[i];
            coords[i] = tmp_rank / procs;
            tmp_rank = tmp_rank % procs;
        }
    }

    st_offsets = (ptrdiff_t *) malloc(ndims * sizeof(ptrdiff_t));

    /* duplicate type to here to 1) deal with constness without
       casting and 2) eliminate need to for conditional destroy below.
       Lame, yes.  But cleaner code all around. */
    rc = ompi_ddt_duplicate(oldtype, &lastType);
    if (OMPI_SUCCESS != rc) goto cleanup;

    /* figure out ordering issues */
    if (MPI_ORDER_C == order) {
        start_loop = ndims - 1 ; step = -1; end_loop = -1;
    } else {
        start_loop = 0 ; step = 1; end_loop = ndims;
    }

    /* Build up array */
    for (i = start_loop ; i != end_loop; i += step) {
        int nprocs, rank;

        switch(distrib_array[i]) {
        case MPI_DISTRIBUTE_BLOCK:
            rc = block(gsize_array, i, ndims, psize_array[i], coords[i], 
                       darg_array[i], order, orig_extent, 
                       lastType, newtype, st_offsets+i); 
            break;
        case MPI_DISTRIBUTE_CYCLIC:
            rc = cyclic(gsize_array, i, ndims, psize_array[i], coords[i], 
                        darg_array[i], order, orig_extent, 
                        lastType, newtype, st_offsets+i);
            break;
        case MPI_DISTRIBUTE_NONE:
            /* treat it as a block distribution on 1 process */
            if (order == MPI_ORDER_C) {
                nprocs = psize_array[i]; rank = coords[i];
            } else {
                nprocs = 1; rank = 0;
            }

            rc = block(gsize_array, i, ndims, nprocs, rank,
                       MPI_DISTRIBUTE_DFLT_DARG, order, orig_extent, 
                       lastType, newtype, st_offsets+i); 
            break;
        default:
            rc = MPI_ERR_ARG;
        }
        ompi_ddt_destroy(&lastType);
        /* need to destroy the old type even in error condition, so
           don't check return code from above until after cleanup. */
        if (MPI_SUCCESS != rc) goto cleanup;
        lastType = *newtype;
    }


    /* set displacement and UB correctly.  Use struct instead of
       resized for same reason as subarray */
    { 
        ptrdiff_t displs[3];
        ompi_datatype_t *types[3];
        int tmp_size, blength[3] = { 1, 1, 1};

        displs[1] = st_offsets[start_loop];
        tmp_size = 1;
        for (i = start_loop + step ; i != end_loop ; i += step) {
            tmp_size *= gsize_array[i - step];
            displs[1] += tmp_size * st_offsets[i];
        }

        displs[0] = 0; 
        displs[1] *= orig_extent;
        displs[2] = orig_extent;
        for (i = 0 ; i < ndims ; i++) {
            displs[2] *= gsize_array[i];
        }
        types[0] = MPI_LB; types[1] = lastType; types[2] = MPI_UB;

        rc = ompi_ddt_create_struct(3, blength, displs, types, newtype);
        ompi_ddt_destroy(&lastType);
        /* need to destroy the old type even in error condition, so
           don't check return code from above until after cleanup. */
        if (MPI_SUCCESS != rc) goto cleanup;
    }

    {
        int* a_i[8];

        a_i[0] = &size;
        a_i[1] = &rank;
        a_i[2] = &ndims;
        a_i[3] = gsize_array;
        a_i[4] = distrib_array;
        a_i[5] = darg_array;
        a_i[6] = psize_array;
        a_i[7] = &order;

        ompi_ddt_set_args( *newtype, 4 * ndims + 4, a_i, 0, NULL, 1, &oldtype,
                           MPI_COMBINER_DARRAY );
    }

 cleanup:
    if (NULL != st_offsets) free(st_offsets);
    if (NULL != coords) free(coords);

    OMPI_ERRHANDLER_RETURN(rc, MPI_COMM_WORLD, rc, FUNC_NAME);
}
Example #6
0
static int local_copy_with_convertor( ompi_datatype_t* pdt, int count, int chunk )
{
    MPI_Aint extent;
    void *pdst = NULL, *psrc = NULL, *ptemp = NULL;
    ompi_convertor_t *send_convertor = NULL, *recv_convertor = NULL;
    struct iovec iov;
    uint32_t iov_count;
    size_t max_data;
    int32_t length = 0, done1 = 0, done2 = 0;
    TIMER_DATA_TYPE start, end, unpack_start, unpack_end;
    long total_time, unpack_time = 0;

    ompi_ddt_type_extent( pdt, &extent );

    pdst  = malloc( extent * count );
    psrc  = malloc( extent * count );
    ptemp = malloc( chunk );

    {
        int i;
        for( i = 0; i < (count * extent); i++ )
            ((char*)psrc)[i] = i % 128 + 32;
    }
    memset( pdst, 0, count * extent );

    send_convertor = ompi_convertor_create( remote_arch, 0 );
    if( OMPI_SUCCESS != ompi_convertor_prepare_for_send( send_convertor, pdt, count, psrc ) ) {
        printf( "Unable to create the send convertor. Is the datatype committed ?\n" );
        goto clean_and_return;
    }

    recv_convertor = ompi_convertor_create( remote_arch, 0 );
    if( OMPI_SUCCESS != ompi_convertor_prepare_for_recv( recv_convertor, pdt, count, pdst ) ) {
        printf( "Unable to create the recv convertor. Is the datatype committed ?\n" );
        goto clean_and_return;
    }

    cache_trash();  /* make sure the cache is useless */

    GET_TIME( start );
    while( (done1 & done2) != 1 ) {
        /* They are supposed to finish in exactly the same time. */
        if( done1 | done2 ) {
            printf( "WRONG !!! the send is %s but the receive is %s in local_copy_with_convertor\n",
                    (done1 ? "finish" : "not finish"),
                    (done2 ? "finish" : "not finish") );
        }

        max_data = chunk;
        iov_count = 1;
        iov.iov_base = ptemp;
        iov.iov_len = chunk;

        if( done1 == 0 ) {
            done1 = ompi_convertor_pack( send_convertor, &iov, &iov_count, &max_data );
        }

        if( done2 == 0 ) {
            GET_TIME( unpack_start );
            done2 = ompi_convertor_unpack( recv_convertor, &iov, &iov_count, &max_data );
            GET_TIME( unpack_end );
            unpack_time += ELAPSED_TIME( unpack_start, unpack_end );
        }

        length += max_data;
    }
    GET_TIME( end );
    total_time = ELAPSED_TIME( start, end );
    printf( "copying same data-type using convertors in %ld microsec\n", total_time );
    printf( "\t unpack in %ld microsec [pack in %ld microsec]\n", unpack_time,
            total_time - unpack_time );
 clean_and_return:
    if( NULL != send_convertor ) OBJ_RELEASE( send_convertor );
    if( NULL != recv_convertor ) OBJ_RELEASE( recv_convertor );

    if( NULL != pdst ) free( pdst );
    if( NULL != psrc ) free( psrc );
    if( NULL != ptemp ) free( ptemp );
    return OMPI_SUCCESS;
}
Example #7
0
static int mpich_typeub3( void )
{
   int blocklen[2], err = 0, idisp[3];
   size_t sz;
   MPI_Aint disp[3], lb, ub, ex;
   ompi_datatype_t *types[3], *dt1, *dt2, *dt3, *dt4, *dt5;

   /* Create a datatype with explicit LB and UB */
   blocklen[0] = 1;
   blocklen[1] = 1;
   blocklen[2] = 1;
   disp[0] = -3;
   disp[1] = 0; 
   disp[2] = 6;
   types[0] = &ompi_mpi_lb;  /* ompi_ddt_basicDatatypes[DT_LB]; */
   types[1] = &ompi_mpi_int;  /* ompi_ddt_basicDatatypes[DT_INT]; */
   types[2] = &ompi_mpi_ub;  /* ompi_ddt_basicDatatypes[DT_UB]; */
   
   /* Generate samples for contiguous, hindexed, hvector, indexed, and vector (struct and contiguous tested in typeub2) */                                                                                                                         
   ompi_ddt_create_struct(3,blocklen,disp, types,&dt1);
   ompi_ddt_commit(&dt1);

   /* This type is the same as in typeub2, and is tested there */
   types[0]=dt1;               types[1]=dt1;
   blocklen[0]=1;              blocklen[1]=1;
   disp[0]=-4;                 disp[1]=7;
   idisp[0]=-4;                idisp[1]=7;

   ompi_ddt_create_hindexed( 2, blocklen, disp, dt1, &dt2 );
   ompi_ddt_commit( &dt2 );

   ompi_ddt_type_lb( dt2, &lb );       ompi_ddt_type_ub( dt2, &ub );
   ompi_ddt_type_extent( dt2, &ex );   ompi_ddt_type_size( dt2, &sz );

   if (lb != -7 || ub != 13 || ex != 20) {
      printf("hindexed lb %d ub %d extent %d size %d\n", (int)-7, (int)13, (int)20, (int)sz);
      printf("hindexed lb %d ub %d extent %d size %d\n", (int)lb, (int)ub, (int)ex, (int)sz);
      err++;
   }
   else
      printf( "hindexed ok\n" );

   ompi_ddt_create_indexed( 2, blocklen, idisp, dt1, &dt3 );
   ompi_ddt_commit( &dt3 );

   ompi_ddt_type_lb( dt3, &lb );       ompi_ddt_type_ub( dt3, &ub );
   ompi_ddt_type_extent( dt3, &ex );   ompi_ddt_type_size( dt3, &sz );

   if (lb != -39 || ub != 69 || ex != 108) {
      printf("indexed lb %d ub %d extent %d size %d\n", (int)-39, (int)69, (int)108, (int)sz);
      printf("indexed lb %d ub %d extent %d size %d\n", (int)lb, (int)ub, (int)ex, (int)sz);
      err++;
   }
   else
      printf( "indexed ok\n" );

   ompi_ddt_create_hvector( 2, 1, 14, dt1, &dt4 );
   ompi_ddt_commit( &dt4 );

   ompi_ddt_type_lb( dt4, &lb );       ompi_ddt_type_ub( dt4, &ub );
   ompi_ddt_type_extent( dt4, &ex );   ompi_ddt_type_size( dt4, &sz );

   if (lb != -3 || ub != 20 || ex != 23) {
      printf("hvector lb %d ub %d extent %d size %d\n", (int)-3, (int)20, (int)23, (int)sz);
      printf("hvector lb %d ub %d extent %d size %d\n", (int)lb, (int)ub, (int)ex, (int)sz);
      err++;
   }
   else
      printf( "hvector ok\n" );

   ompi_ddt_create_vector( 2, 1, 14, dt1, &dt5 );
   ompi_ddt_commit( &dt5 );

   ompi_ddt_type_lb( dt5, &lb );       ompi_ddt_type_ub( dt5, &ub );
   ompi_ddt_type_extent( dt5, &ex );   ompi_ddt_type_size( dt5, &sz );

   if (lb != -3 || ub != 132 || ex != 135) {
      printf("vector lb %d ub %d extent %d size %d\n", (int)-3, (int)132, (int)135, (int)sz);
      printf("vector lb %d ub %d extent %d size %d\n", (int)lb, (int)ub, (int)ex, (int)sz);
      err++;
   }
   else
      printf( "vector ok\n" );

   OBJ_RELEASE( dt1 ); /*assert( dt1 == NULL );*/
   OBJ_RELEASE( dt2 ); /*assert( dt2 == NULL );*/
   OBJ_RELEASE( dt3 ); /*assert( dt3 == NULL );*/
   OBJ_RELEASE( dt4 ); /*assert( dt4 == NULL );*/
   OBJ_RELEASE( dt5 ); assert( dt5 == NULL );
   return err;
}
Example #8
0
static int mpich_typeub2( void )
{
   int blocklen[3], err = 0;
   size_t sz1, sz2, sz3;
   MPI_Aint disp[3], lb, ub, ex1, ex2, ex3;
   ompi_datatype_t *types[3], *dt1, *dt2, *dt3;

   blocklen[0] = 1;
   blocklen[1] = 1;
   blocklen[2] = 1;
   disp[0] = -3;
   disp[1] = 0;
   disp[2] = 6;
   types[0] = &ompi_mpi_lb;  /* ompi_ddt_basicDatatypes[DT_LB]; */
   types[1] = &ompi_mpi_int;  /* ompi_ddt_basicDatatypes[DT_INT]; */
   types[2] = &ompi_mpi_ub;  /* ompi_ddt_basicDatatypes[DT_UB]; */

   ompi_ddt_create_struct(3,blocklen,disp, types,&dt1);
   ompi_ddt_commit(&dt1);

   ompi_ddt_type_lb(dt1, &lb);          ompi_ddt_type_ub(dt1, &ub);
   ompi_ddt_type_extent(dt1,&ex1);      ompi_ddt_type_size(dt1,&sz1);

   /* Values should be lb = -3, ub = 6 extent 9; size depends on implementation */
   if (lb != -3 || ub != 6 || ex1 != 9) {
      printf("Example 3.26 type1 lb %d ub %d extent %d size %d\n", (int)lb, (int)ub, (int)ex1, (int)sz1);
      err++;
   }
   else
      printf("Example 3.26 type1 correct\n" );

   ompi_ddt_create_contiguous(2,dt1,&dt2);
   ompi_ddt_type_lb(dt2, &lb);          ompi_ddt_type_ub(dt2, &ub);
   ompi_ddt_type_extent(dt2,&ex2);      ompi_ddt_type_size(dt2,&sz2);
   /* Values should be lb = -3, ub = 15, extent = 18, size depends on implementation */
   if (lb != -3 || ub != 15 || ex2 != 18) {
      printf("Example 3.26 type2 lb %d ub %d extent %d size %d\n", (int)-3, (int)15, (int)18, 8);
      printf("Example 3.26 type2 lb %d ub %d extent %d size %d\n", (int)lb, (int)ub, (int)ex2, (int)sz2);
      err++;
   }
   else
      printf("Example 3.26 type1 correct\n" );
   OBJ_RELEASE( dt2 ); assert( dt2 == NULL );
   ompi_ddt_create_contiguous(2,dt1,&dt2);
   ompi_ddt_type_lb(dt2, &lb);          ompi_ddt_type_ub(dt2, &ub);
   ompi_ddt_type_extent(dt2,&ex2);      ompi_ddt_type_size(dt2,&sz2);
   /* Values should be lb = -3, ub = 15, extent = 18, size depends on implementation */
   if (lb != -3 || ub != 15 || ex2 != 18) {
      printf("Example 3.26 type2 lb %d ub %d extent %d size %d\n", (int)-3, (int)15, (int)18, 8);
      printf("Example 3.26 type2 lb %d ub %d extent %d size %d\n", (int)lb, (int)ub, (int)ex2, (int)sz2);
      err++;
   }
   else
      printf( "Example 3.26 type2 correct\n" );

   types[0]=dt1;               types[1]=dt1;
   blocklen[0]=1;              blocklen[1]=1;
   disp[0]=0;                  disp[1]=ex1;

   ompi_ddt_create_struct(2, blocklen, disp, types, &dt3);
   ompi_ddt_commit(&dt3);

   ompi_ddt_type_lb(dt3, &lb);          ompi_ddt_type_ub(dt3, &ub);
   ompi_ddt_type_extent(dt3,&ex3);      ompi_ddt_type_size(dt3,&sz3);
   /* Another way to express type2 */
   if (lb != -3 || ub != 15 || ex3 != 18) {
      printf("type3 lb %d ub %d extent %d size %d\n", (int)-3, (int)15, (int)18, 8);
      printf("type3 lb %d ub %d extent %d size %d\n", (int)lb, (int)ub, (int)ex3, (int)sz2);
      err++;
   }
   else
      printf( "type3 correct\n" );

   OBJ_RELEASE( dt1 ); /*assert( dt1 == NULL );*/
   OBJ_RELEASE( dt2 ); /*assert( dt2 == NULL );*/
   OBJ_RELEASE( dt3 ); assert( dt3 == NULL );
   return err;
}