Пример #1
0
/*
 * Add a new handle on the end of an array of handles
 * Formerly
NC_incr_array(array, tail)
 */
static int
incr_NC_vararray(NC_vararray *ncap,
                 NC_var      *newvarp)
{
    NC_var **vp;

    assert(ncap != NULL);

    if (ncap->nalloc == 0) { /* no variable has been allocated yet */
        assert(ncap->ndefined == 0);
        vp = (NC_var **) NCI_Malloc(NC_ARRAY_GROWBY * sizeof(NC_var *));
        if (vp == NULL) return NC_ENOMEM;

        ncap->value = vp;
        ncap->nalloc = NC_ARRAY_GROWBY;
    }
    else if (ncap->ndefined + 1 > ncap->nalloc) {
        vp = (NC_var **) NCI_Realloc(ncap->value, (ncap->nalloc + NC_ARRAY_GROWBY) * sizeof(NC_var *));
        if (vp == NULL) return NC_ENOMEM;

        ncap->value = vp;
        ncap->nalloc += NC_ARRAY_GROWBY;
    }

    if (newvarp != NULL) {
        ncap->value[ncap->ndefined] = newvarp;
        ncap->ndefined++;
    }

    return NC_NOERR;
}
Пример #2
0
/*
 * Add a new handle to the end of an array of handles
 * Formerly, NC_incr_array(array, tail)
 */
static int
incr_NC_dimarray(NC_dimarray *ncap,
                 NC_dim      *newdimp)
{
    NC_dim **vp;

    assert(ncap != NULL);

    if (ncap->nalloc == 0) {
        assert(ncap->ndefined == 0);
        vp = (NC_dim **) NCI_Malloc(NC_ARRAY_GROWBY * sizeof(NC_dim *));
        if (vp == NULL) return NC_ENOMEM;

        ncap->value = vp;
        ncap->nalloc = NC_ARRAY_GROWBY;
    }
    else if (ncap->ndefined + 1 > ncap->nalloc) {
        vp = (NC_dim **) NCI_Realloc(ncap->value,
             (ncap->nalloc + NC_ARRAY_GROWBY) * sizeof(NC_dim *));
        if (vp == NULL) return NC_ENOMEM;

        ncap->value = vp;
        ncap->nalloc += NC_ARRAY_GROWBY;
    }
    /* else here means some space still available */

    if (newdimp != NULL) {
        ncap->value[ncap->ndefined] = newdimp;
        ncap->ndefined++;
    }

    return NC_NOERR;
}
Пример #3
0
/* allocate and return a new NC_dim object */
NC_dim *
ncmpii_new_x_NC_dim(NC_string *name)
{
    NC_dim *dimp;

    dimp = (NC_dim *) NCI_Malloc(sizeof(NC_dim));
    if (dimp == NULL) return NULL;

    dimp->name = name;
    dimp->size = 0;

    return(dimp);
}
Пример #4
0
/*
 * Initialize the put list
 *
 */
int ncdwio_put_list_init(NC_dw *ncdwp){
    int i;
    NC_dw_put_list *lp = &(ncdwp->putlist);

    /* Initialize parameter and allocate the array  */
    lp->nused = 0;
    lp->nalloc = PUT_ARRAY_SIZE;
    lp->reqs = (NC_dw_put_req*)NCI_Malloc(lp->nalloc * sizeof(NC_dw_put_req));
    lp->ids = (int*)NCI_Malloc(lp->nalloc * SIZEOF_INT);
    if (lp->reqs == NULL || lp->ids == NULL){
        DEBUG_RETURN_ERROR(NC_ENOMEM);
    }

    /* Initialize values of ids and reqs
     * Assign increasing unique id
     */
    for(i = 0; i < lp->nalloc; i++){
        lp->ids[i] = i; // Unique ids
        lp->reqs[i].valid = 0;  // Not in use
    }

    return NC_NOERR;
}
Пример #5
0
int
ncmpii_dup_NC_vararrayV(NC_vararray *ncap, const NC_vararray *ref)
{
        int status = NC_NOERR;

        assert(ref != NULL);
        assert(ncap != NULL);

        if(ref->ndefined != 0)
        {
                const MPI_Offset sz = ref->ndefined * sizeof(NC_var *);
                ncap->value = (NC_var **) NCI_Malloc(sz);
                if(ncap->value == NULL)
                        return NC_ENOMEM;
                (void) memset(ncap->value, 0, sz);
                ncap->nalloc = ref->ndefined;
        }

        ncap->ndefined = 0;
        {
                NC_var **vpp = ncap->value;
                const NC_var **drpp = (const NC_var **)ref->value;
                NC_var *const *const end = &vpp[ref->ndefined];
                for( /*NADA*/; vpp < end; drpp++, vpp++, ncap->ndefined++)
                {
                        *vpp = dup_NC_var(*drpp);
                        if(*vpp == NULL)
                        {
                                status = NC_ENOMEM;
                                break;
                        }
                }
        }

        if(status != NC_NOERR)
        {
                ncmpii_free_NC_vararrayV(ncap);
                return status;
        }

        assert(ncap->ndefined == ref->ndefined);

        return NC_NOERR;
}
Пример #6
0
/* 
 * Common code for ncmpii_new_NC_var() 
 * and ncx_get_NC_var()
 */
NC_var *
ncmpii_new_x_NC_var(NC_string *strp,
                    size_t     ndims)
{
    NC_var *varp;
    const MPI_Offset o1 = M_RNDUP(ndims * sizeof(MPI_Offset));
    const MPI_Offset o2 = M_RNDUP(ndims * sizeof(MPI_Offset));
    const MPI_Offset sz = M_RNDUP(sizeof(NC_var)) + o1 + o2 + ndims * sizeof(MPI_Offset);

    /* wkliao: this function allocates a contiguous memory space to put all
     * members of NC_var structure together: o1 is for shape[], o2 is for
     * dsizes[] ad the 3rd is for dimids[]
     * (I don't know why M_RNDUP is needed here and why they should be kept
     * in a contiguous memory space.)
     */
    varp = (NC_var *) NCI_Malloc(sz);
    if (varp == NULL ) return NULL;

    (void) memset(varp, 0, sz);

    varp->name = strp;
    varp->ndims = ndims;

    if (ndims != 0) {
        /*
         * NOTE: lint may complain about the next 3 lines:
         * "pointer cast may result in improper alignment".
         * We use the M_RNDUP() macro to get the proper alignment.
         * roundup to a double
         */
        varp->dimids = (int *)((char *)varp + M_RNDUP(sizeof(NC_var)));
        varp->shape  = (MPI_Offset *)((char *)varp->dimids + o1);
        varp->dsizes = (MPI_Offset *)((char *)varp->shape + o2);
    }

    varp->xsz = 0;
    varp->len = 0;
    varp->begin = 0;

    return varp;
}
Пример #7
0
/*
 * Commit log file into CDF file
 * Meta data is stored in memory, metalog is only used for restoration after abnormal shutdown
 * IN    ncdwp:    log structure
 */
int log_flush(NC_dw *ncdwp) {
    int i, j, lb, ub, err, status = NC_NOERR;
    int *reqids, *stats;
    int ready, ready_all;
    size_t databufferused, databuffersize, dataread;
    NC_dw_metadataentry *entryp;
    MPI_Offset *start, *count, *stride;
    MPI_Datatype buftype;
    char *databuffer, *databufferoff;
    NC_dw_metadataheader *headerp;
    NC_dw_metadataptr *ip;
#ifdef PNETCDF_PROFILING
    double t1, t2, t3, t4;

    t1 = MPI_Wtime();
#endif

    /* Read datalog in to memory */
    /*
     * Prepare data buffer
     * We determine the data buffer size according to:
     * hints, size of data log, the largest size of single record
     * 0 in hint means no limit
     * (Buffer size) = max((largest size of single record), min((size of data log), (size specified in hint)))
     */
    databuffersize = ncdwp->datalogsize;
    if (ncdwp->flushbuffersize > 0 && databuffersize > ncdwp->flushbuffersize){
        databuffersize = ncdwp->flushbuffersize;
    }
    if (databuffersize < ncdwp->maxentrysize){
        databuffersize = ncdwp->maxentrysize;
    }

#ifdef PNETCDF_PROFILING
    if (ncdwp->max_buffer < databuffersize){
        ncdwp->max_buffer = databuffersize;
    }
#endif

    /* Allocate buffer */
    databuffer = (char*)NCI_Malloc(databuffersize);
    if(databuffer == NULL){
        DEBUG_RETURN_ERROR(NC_ENOMEM);
    }

    /* Seek to the start position of first data record */
    err = ncdwio_bufferedfile_seek(ncdwp->datalog_fd, 8, SEEK_SET);
    if (err != NC_NOERR){
        return err;
    }

    /* Initialize buffer status */
    databufferused = 0;
    dataread = 0;

    reqids = (int*)NCI_Malloc(ncdwp->entrydatasize.nused * SIZEOF_INT);
    stats = (int*)NCI_Malloc(ncdwp->entrydatasize.nused * SIZEOF_INT);

    /*
     * Iterate through meta log entries
     */
    headerp = (NC_dw_metadataheader*)ncdwp->metadata.buffer;
    entryp = (NC_dw_metadataentry*)(((char*)ncdwp->metadata.buffer) + headerp->entry_begin);
    for (lb = 0; lb < ncdwp->metaidx.nused;){
        for (ub = lb; ub < ncdwp->metaidx.nused; ub++) {
            if (ncdwp->metaidx.entries[ub].valid){
                if(ncdwp->entrydatasize.values[ub] + databufferused > databuffersize) {
                    break;  // Buffer full
                }
                else{
                    databufferused += ncdwp->entrydatasize.values[ub]; // Record size of entry
                }
            }
            else{
                // We encounter a canceled record
                // Read unread data into data buffer and jump through the gap
                /*
                 * Read data to buffer
                 * We read only what needed by pending requests
                 */
                if (dataread < databufferused){
#ifdef PNETCDF_PROFILING
                    t2 = MPI_Wtime();
#endif
                    err = ncdwio_bufferedfile_read(ncdwp->datalog_fd, databuffer + dataread, databufferused - dataread);
                    if (err != NC_NOERR){
                        return err;
                    }
#ifdef PNETCDF_PROFILING
                    t3 = MPI_Wtime();
                    ncdwp->flush_data_rd_time += t3 - t2;
#endif
                    dataread = databufferused;
                }

                // Skip canceled entry
                err = ncdwio_bufferedfile_seek(ncdwp->datalog_fd, ncdwp->entrydatasize.values[ub], SEEK_CUR);
                if (err != NC_NOERR){
                    return err;
                }
            }
        }

        /*
         * Read data to buffer
         * We read only what needed by pending requests
         */
        if (dataread < databufferused){
#ifdef PNETCDF_PROFILING
            t2 = MPI_Wtime();
#endif
            err = ncdwio_bufferedfile_read(ncdwp->datalog_fd, databuffer + dataread, databufferused - dataread);
            if (err != NC_NOERR){
                return err;
            }
#ifdef PNETCDF_PROFILING
            t3 = MPI_Wtime();
            ncdwp->flush_data_rd_time += t3 - t2;
#endif
            dataread = databufferused;
        }

        // Pointer points to the data of current entry
        databufferoff = databuffer;

        j = 0;
        for(i = lb; i < ub; i++){
            ip = ncdwp->metaidx.entries + i;

            if (ip->valid) {
                /* start, count, stride */
                start = (MPI_Offset*)(entryp + 1);
                count = start + entryp->ndims;
                stride = count + entryp->ndims;

                // Convert from log type to MPI type
                err = logtype2mpitype(entryp->itype, &buftype);
                if (err != NC_NOERR){
                    return err;
                }

                /* Determine API_Kind */
                if (entryp->api_kind == NC_LOG_API_KIND_VARA){
                    stride = NULL;
                }

#ifdef PNETCDF_PROFILING
                t2 = MPI_Wtime();
#endif

                /* Replay event with non-blocking call */
                err = ncdwp->ncmpio_driver->iput_var(ncdwp->ncp, entryp->varid, start, count, stride, NULL, (void*)(databufferoff), -1, buftype, reqids + j, NC_REQ_WR | NC_REQ_NBI | NC_REQ_HL);
                if (status == NC_NOERR) {
                    status = err;
                }

#ifdef PNETCDF_PROFILING
                t3 = MPI_Wtime();
                ncdwp->flush_put_time += t3 - t2;
#endif

                // Move to next data location
                databufferoff += entryp->data_len;
                j++;
            }

            /* Move to next position */
            entryp = (NC_dw_metadataentry*)(((char*)entryp) + entryp->esize);
        }

#ifdef PNETCDF_PROFILING
        t2 = MPI_Wtime();
#endif
        /*
         * Wait must be called first or previous data will be corrupted
         */
        if (ncdwp->isindep) {
            err = ncdwp->ncmpio_driver->wait(ncdwp->ncp, j, reqids, stats, NC_REQ_INDEP);
        }
        else{
            err = ncdwp->ncmpio_driver->wait(ncdwp->ncp, j, reqids, stats, NC_REQ_COLL);
        }
        if (status == NC_NOERR) {
            status = err;
        }

#ifdef PNETCDF_PROFILING
        t3 = MPI_Wtime();
        ncdwp->flush_wait_time += t3 - t2;
#endif

        // Fill up the status for nonblocking request
        for(i = lb; i < ub; i++){
            ip = ncdwp->metaidx.entries + i;
            j = 0;
            if (ip->valid) {
                if (ip->reqid >= 0){
                    ncdwp->putlist.reqs[ip->reqid].status = stats[j];
                    ncdwp->putlist.reqs[ip->reqid].ready = 1;
                }
                j++;
            }
        }

        /* Update batch status */
        databufferused = 0;

        // Mark as complete
        lb = ub;

        /*
         * In case of collective flush, we sync our status with other processes
         */
        if (!ncdwp->isindep){
            if (lb >= ncdwp->metaidx.nused){
                ready = 1;
            }
            else{
                ready = 0;
            }

            // Sync status
            err = MPI_Allreduce(&ready, &ready_all, 1, MPI_INT, MPI_LAND, ncdwp->comm);
            if (err != MPI_SUCCESS){
                DEBUG_RETURN_ERROR(ncmpii_error_mpi2nc(err, "MPI_Allreduce"));
            }
        }
    }

    /*
     * In case of collective flush, we must continue to call wait until every process is ready
     */
    if (!ncdwp->isindep){
        while(!ready_all){
            // Participate collective wait
            err = ncdwp->ncmpio_driver->wait(ncdwp->ncp, 0, NULL, NULL, NC_REQ_COLL);
            if (status == NC_NOERR) {
                status = err;
            }

            // Sync status
            err = MPI_Allreduce(&ready, &ready_all, 1, MPI_INT, MPI_LAND, ncdwp->comm);
            if (err != MPI_SUCCESS){
                DEBUG_RETURN_ERROR(ncmpii_error_mpi2nc(err, "MPI_Allreduce"));
            }
        }
    }

    /* Free the data buffer */
    NCI_Free(databuffer);
    NCI_Free(reqids);
    NCI_Free(stats);

#ifdef PNETCDF_PROFILING
    t4 = MPI_Wtime();
    ncdwp->flush_replay_time += t4 - t1;
#endif

    return status;
}
Пример #8
0
/*----< ncmpii_vars_create_filetype() >--------------------------------------*/
int
ncmpii_vars_create_filetype(NC               *ncp,
                            NC_var           *varp,
                            const MPI_Offset  start[],
                            const MPI_Offset  count[],
                            const MPI_Offset  stride[],
                            int               rw_flag,
                            MPI_Offset       *offset_ptr,
                            MPI_Datatype     *filetype_ptr)
{
    int          dim, status;
    MPI_Offset   offset, nelems=1;
    MPI_Datatype filetype;

    if (stride == NULL)
        return ncmpii_vara_create_filetype(ncp, varp, start, count, rw_flag,
                                           offset_ptr, filetype_ptr);
    offset   = varp->begin;
    filetype = MPI_BYTE;

    for (dim=0; dim<varp->ndims && stride[dim]==1; dim++) ;

    if (dim == varp->ndims)
        return ncmpii_vara_create_filetype(ncp, varp, start, count, rw_flag,
                                           offset_ptr, filetype_ptr);

    /* New coordinate/edge check to fix NC_EINVALCOORDS bug */
    status = NCedgeck(ncp, varp, start, count);
    if ((status != NC_NOERR) ||
        (rw_flag == READ_REQ && IS_RECVAR(varp) && *start + *count > NC_get_numrecs(ncp)))
    {
        status = NCcoordck(ncp, varp, start);
        if (status != NC_NOERR)
            return status;
        else
            return NC_EEDGE;
    }

    status = NCstrideedgeck(ncp, varp, start, count, stride);
    if (status != NC_NOERR)
        return status;

    if ( rw_flag == READ_REQ && IS_RECVAR(varp) &&
        ( (*count > 0 && *start+1 + (*count-1) * *stride > NC_get_numrecs(ncp)) ||
          (*count == 0 && *start > NC_get_numrecs(ncp)) ) )
        return NC_EEDGE;

    for (dim=0; dim<varp->ndims; dim++) nelems *= count[dim];

    /* filetype is defined only when varp is not a scalar and
       the number of requested elemenst > 0
       (varp->ndims == 0 meaning this is a scalar variable)
       Otherwise, keep filetype MPI_BYTE
     */
    if (varp->ndims > 0 && nelems > 0) {
        int ndims;
        MPI_Datatype tmptype;
        MPI_Offset *blocklens, *blockstride, *blockcount;

        ndims       = varp->ndims;
        blocklens   = (MPI_Offset*) NCI_Malloc(3 * ndims * sizeof(MPI_Offset));
        blockstride = blocklens   + ndims;
        blockcount  = blockstride + ndims;

        tmptype = MPI_BYTE;

        blocklens[ndims-1]  = varp->xsz;
        blockcount[ndims-1] = count[ndims-1];
        if (ndims == 1 && IS_RECVAR(varp)) {
            check_recsize_too_big(ncp);
            blockstride[ndims-1] = stride[ndims-1] * ncp->recsize;
            offset += start[ndims - 1] * ncp->recsize;
        } else {
            blockstride[ndims-1] = stride[ndims-1] * varp->xsz;
            offset += start[ndims-1] * varp->xsz;
        }

        for (dim=ndims-1; dim>=0; dim--) {
#if (MPI_VERSION < 2)
            MPI_Type_hvector(blockcount[dim], blocklens[dim], blockstride[dim],
                             tmptype, &filetype);
#else
            MPI_Type_create_hvector(blockcount[dim], blocklens[dim],
                                    blockstride[dim], tmptype, &filetype);
#endif
            MPI_Type_commit(&filetype);
            if (tmptype != MPI_BYTE)
                MPI_Type_free(&tmptype);
            tmptype = filetype;

            if (dim - 1 >= 0) {
                blocklens[dim-1]  = 1;
                blockcount[dim-1] = count[dim - 1];
                if (dim-1 == 0 && IS_RECVAR(varp)) {
                    blockstride[dim-1] = stride[dim-1] * ncp->recsize;
                    offset += start[dim-1] * ncp->recsize;
                } else {
                    blockstride[dim-1] = stride[dim-1] * varp->dsizes[dim]
                                       * varp->xsz;
                    offset += start[dim-1] * varp->dsizes[dim] * varp->xsz;
                }
            }
        }
        NCI_Free(blocklens);
    }

    *offset_ptr   = offset;
    *filetype_ptr = filetype;

    return NC_NOERR;
}
Пример #9
0
/*----< ncmpii_vara_create_filetype() >--------------------------------------*/
static int
ncmpii_vara_create_filetype(NC               *ncp,
                            NC_var           *varp,
                            const MPI_Offset *start,
                            const MPI_Offset *count,
                            int               rw_flag,
                            MPI_Offset       *offset_ptr,
                            MPI_Datatype     *filetype_ptr)
{
    int          dim, status;
    MPI_Offset   offset, nelems=1;
    MPI_Datatype filetype;

    offset   = varp->begin;
    filetype = MPI_BYTE;

    /* New coordinate/edge check to fix NC_EINVALCOORDS bug */
    status = NCedgeck(ncp, varp, start, count);
    if (status != NC_NOERR ||
        (rw_flag == READ_REQ && IS_RECVAR(varp) && *start + *count > NC_get_numrecs(ncp)))
    {
        status = NCcoordck(ncp, varp, start);
        if (status != NC_NOERR)
            return status;
        else
            return NC_EEDGE;
    }

    /* check if the request is contiguous in file
       if yes, there is no need to create a filetype */
    if (ncmpii_is_request_contiguous(varp, start, count)) {
        status = ncmpii_get_offset(ncp, varp, start, NULL, NULL, &offset);
        *offset_ptr   = offset;
        *filetype_ptr = filetype;
        return status;
    }

    for (dim=0; dim<varp->ndims; dim++) nelems *= count[dim];

    /* filetype is defined only when varp is not a scalar and
       the number of requested elemenst > 0
       (varp->ndims == 0 meaning this is a scalar variable)
       Otherwise, keep filetype MPI_BYTE
     */
    if (varp->ndims > 0 && nelems > 0) {
        int i, ndims, blklens[3], tag=0;
        int *shape=NULL, *subcount=NULL, *substart=NULL; /* all in bytes */
        MPI_Offset *shape64=NULL, *subcount64=NULL, *substart64=NULL;
        MPI_Offset size, disps[3];
        MPI_Datatype rectype, types[3], type1;

        ndims    = varp->ndims;
        shape    = (int*) NCI_Malloc(3 * ndims * sizeof(int));
        subcount = shape    + ndims;
        substart = subcount + ndims;

        /* here, request size has been checked and it must > 0 */
        if (IS_RECVAR(varp)) {
            subcount[0] = count[0];
            substart[0] = 0;
            shape[0]    = subcount[0];

            if (ncp->recsize <= varp->len) {
                /* the only record variable */
                if (varp->ndims == 1) {
                    shape[0] *= varp->xsz;
                    subcount[0] *= varp->xsz;
                }
                else {
                    for (dim = 1; dim < ndims-1; dim++) {
                        shape[dim]    = varp->shape[dim];
                        subcount[dim] = count[dim];
                        substart[dim] = start[dim];
                    }
                    shape[dim]    = varp->xsz * varp->shape[dim];
                    subcount[dim] = varp->xsz * count[dim];
                    substart[dim] = varp->xsz * start[dim];
                }
                offset += start[0] * ncp->recsize;

                MPI_Type_create_subarray(ndims, shape, subcount, substart,
                                         MPI_ORDER_C, MPI_BYTE, &filetype);
                MPI_Type_commit(&filetype);
            }
            else {
                check_recsize_too_big(ncp);
                /* more than one record variables */

                offset += start[0] * ncp->recsize;
                if (varp->ndims == 1) {
#if (MPI_VERSION < 2)
                    MPI_Type_hvector(subcount[0], varp->xsz, ncp->recsize,
                                     MPI_BYTE, &filetype);
#else
                    MPI_Type_create_hvector(subcount[0], varp->xsz, ncp->recsize,
                                            MPI_BYTE, &filetype);
#endif
                    MPI_Type_commit(&filetype);
                }
                else {
                    for (dim = 1; dim < ndims-1; dim++) {
                        shape[dim]    = varp->shape[dim];
                        subcount[dim] = count[dim];
                        substart[dim] = start[dim];
                    }
                    shape[dim]    = varp->xsz * varp->shape[dim];
                    subcount[dim] = varp->xsz * count[dim];
                    substart[dim] = varp->xsz * start[dim];

                    MPI_Type_create_subarray(ndims-1, shape+1, subcount+1, substart+1,
                                             MPI_ORDER_C, MPI_BYTE, &rectype);
                    MPI_Type_commit(&rectype);
#if (MPI_VERSION < 2)
                    MPI_Type_hvector(subcount[0], 1, ncp->recsize, rectype,
                                     &filetype);
#else
                    MPI_Type_create_hvector(subcount[0], 1, ncp->recsize, rectype,
                                            &filetype);
#endif
                    MPI_Type_commit(&filetype);
                    MPI_Type_free(&rectype);
                }
            }
        }
        else { /* non record variable */
            tag = 0;
            for (dim=0; dim< ndims-1; dim++) {
                if (varp->shape[dim] > 2147483647) { /* if shape > 2^31-1 */
                    tag = 1;
                    break;
                }
            }
            if ((varp->shape[dim]*varp->xsz)  > 2147483647)
                tag = 1;

            if (tag == 0) {
                for (dim = 0; dim < ndims-1; dim++ ) {
                    shape[dim]    = varp->shape[dim];
                    subcount[dim] = count[dim];
                    substart[dim] = start[dim];
                }
                shape[dim]    = varp->xsz * varp->shape[dim];
                subcount[dim] = varp->xsz * count[dim];
                substart[dim] = varp->xsz * start[dim];

                MPI_Type_create_subarray(ndims, shape, subcount, substart,
                                         MPI_ORDER_C, MPI_BYTE, &filetype);
                MPI_Type_commit(&filetype);
            }
            else {
                shape64 = (MPI_Offset*) NCI_Malloc(3 * ndims * sizeof(MPI_Offset));
                subcount64 = shape64    + ndims;
                substart64 = subcount64 + ndims;

                if (ndims == 1) {  // for 64-bit support,  added July 23, 2008
                    shape64[0]    = varp->shape[0];
                    subcount64[0] = count[0];
                    substart64[0] = start[0];

                    offset += start[0]*varp->xsz;

                    MPI_Type_contiguous(subcount64[0]*varp->xsz, MPI_BYTE, &type1);
                    MPI_Type_commit(&type1);
#if (MPI_VERSION < 2)
                    MPI_Type_hvector(subcount64[0], varp->xsz, shape64[0]*varp->xsz,
                                     MPI_BYTE, &filetype);
#else
                    MPI_Type_create_hvector(1, 1, shape64[0]*varp->xsz,
                                            type1, &filetype);
#endif
                    MPI_Type_commit(&filetype);
                    MPI_Type_free(&type1);
                }
                else {
                    for (dim = 0; dim < ndims-1; dim++ ) {
                        shape64[dim]    = varp->shape[dim];
                        subcount64[dim] = count[dim];
                        substart64[dim] = start[dim];
                    }
                    shape64[dim]    = varp->xsz * varp->shape[dim];
                    subcount64[dim] = varp->xsz * count[dim];
                    substart64[dim] = varp->xsz * start[dim];

                    MPI_Type_hvector(subcount64[dim-1],
                                     subcount64[dim],
                                     varp->xsz * varp->shape[dim],
                                     MPI_BYTE,
                                     &type1);
                    MPI_Type_commit(&type1);

                    size = shape[dim];
                    for (i=dim-2; i>=0; i--) {
                        size *= shape[i+1];
                        MPI_Type_hvector(subcount64[i],
                                         1,
                                         size,
                                         type1,
                                         &filetype);
                        MPI_Type_commit(&filetype);

                        MPI_Type_free(&type1);
                        type1 = filetype;
                    }
                    disps[1] = substart64[dim];
                    size = 1;
                    for (i=dim-1; i>=0; i--) {
                        size *= shape64[i+1];
                        disps[1] += size*substart64[i];
                    }
                    disps[2] = 1;
                    for (i=0; i<ndims; i++) disps[2] *= shape64[i];

                    disps[0] = 0;
                    blklens[0] = blklens[1] = blklens[2] = 1;
                    types[0] = MPI_LB;
                    types[1] = type1;
                    types[2] = MPI_UB;

                    MPI_Type_struct(3,
                                    blklens,
                                    (MPI_Aint*) disps,
                                    types,
                                    &filetype);

                    MPI_Type_free(&type1);
                }
                NCI_Free(shape64);
            }
        }
        NCI_Free(shape);
    }

    *offset_ptr   = offset;
    *filetype_ptr = filetype;

    return NC_NOERR;
}
Пример #10
0
/*----< ncmpii_get_offset() >------------------------------------------------*/
int
ncmpii_get_offset(NC               *ncp,
                  NC_var           *varp,
                  const MPI_Offset  starts[],   /* [varp->ndims] */
                  const MPI_Offset  counts[],   /* [varp->ndims] */
                  const MPI_Offset  strides[],  /* [varp->ndims] */
                  MPI_Offset       *offset_ptr) /* return file offset */
{
    /* returns the starting file offset when this variable is get/put
       with starts[] */
    MPI_Offset offset, *end_off=NULL;
    int status, i, ndims;

    offset = varp->begin; /* beginning file offset of this variable */
    ndims  = varp->ndims; /* number of dimensions of this variable */

    if (counts != NULL)
        end_off = (MPI_Offset*) NCI_Malloc(ndims * sizeof(MPI_Offset));

    if (counts != NULL && strides != NULL) {
        for (i=0; i<ndims; i++)
            end_off[i] = starts[i] + counts[i] * strides[i] - 1;
    }
    else if (counts != NULL) { /* strides == NULL */
        for (i=0; i<ndims; i++)
            end_off[i] = starts[i] + counts[i] - 1;
    }
    else { /* when counts == NULL strides is of no use */
        end_off = (MPI_Offset*) starts;
    }

    status = NCcoordck(ncp, varp, end_off);  /* validate end_off[] */
    if (status != NC_NOERR) {
#ifdef CDEBUG
        printf("ncmpii_get_offset(): NCcoordck() fails\n");
#endif
        return status;
    }

    if (ndims > 0) {
        if (IS_RECVAR(varp))
            /* no need to check recsize here: if MPI_Offset is only 32 bits we
               will have had problems long before here */
            offset += end_off[0] * ncp->recsize;
        else
            offset += end_off[ndims-1] * varp->xsz;

        if (ndims > 1) {
            if (IS_RECVAR(varp))
                offset += end_off[ndims - 1] * varp->xsz;
            else
                offset += end_off[0] * varp->dsizes[1] * varp->xsz;

            for (i=1; i<ndims-1; i++)
                offset += end_off[i] * varp->dsizes[i+1] * varp->xsz;
        }
    }
    if (counts != NULL && end_off != NULL)
        NCI_Free(end_off);

    *offset_ptr = offset;
    return NC_NOERR;
}
Пример #11
0
/*----< ncmpii_mgetput_varm() >-----------------------------------------------*/
static int
ncmpii_mgetput_varm(int                ncid, 
                    int                num, 
                    int                varids[],    /* [num] */
                    MPI_Offset* const  starts[],    /* [num] */
                    MPI_Offset* const  counts[],    /* [num] */
                    MPI_Offset* const  strides[],   /* [num] */
                    MPI_Offset* const  imaps[],     /* [num] */
                    void              *bufs[],      /* [num] */
                    MPI_Offset         bufcounts[], /* [num] */
                    MPI_Datatype       datatypes[], /* [num] */
                    int                rw_flag,     /* WRITE_REQ or READ_REQ */
                    int                io_method)   /* COLL_IO or INDEP_IO */
{
    int i, status=NC_NOERR, *req_ids=NULL, *statuses=NULL;
    NC *ncp=NULL;

    CHECK_NCID
    if (rw_flag == WRITE_REQ)
        CHECK_WRITE_PERMISSION

    if (NC_indef(ncp)) return NC_EINDEFINE;

    /* check to see that the desired MPI file handle is opened */
    if (io_method == COLL_IO)
        CHECK_COLLECTIVE_FH
    else
        CHECK_INDEP_FH
  
    if (num > 0) {
        req_ids  = (int*) NCI_Malloc(2 * num * sizeof(int));
        statuses = req_ids + num;
    }

    /* for each request call ncmpi_igetput_varm() */
    for (i=0; i<num; i++) {
        NC_var *varp;
        MPI_Offset *start, *count;

        CHECK_VARID(varids[i], varp)

        if (starts == NULL) {         /* var */
            GET_FULL_DIMENSIONS
            status = ncmpii_igetput_varm(ncp, varp, start, count, NULL,
                                         NULL, bufs[i], bufcounts[i],
                                         datatypes[i], &req_ids[i], rw_flag, 0);
            if (varp->ndims > 0) NCI_Free(start);
        } else if (counts == NULL) {  /* var1 */
            GET_FULL_DIMENSIONS
            GET_ONE_COUNT
            status = ncmpii_igetput_varm(ncp, varp, starts[i], count, NULL,
                                         NULL, bufs[i], bufcounts[i],
                                         datatypes[i], &req_ids[i], rw_flag, 0);
            if (varp->ndims > 0) NCI_Free(count);
        } else if (strides == NULL) { /* vara */
            status = ncmpii_igetput_varm(ncp, varp, starts[i], counts[i], NULL,
                                         NULL, bufs[i], bufcounts[i],
                                         datatypes[i], &req_ids[i], rw_flag, 0);
        } else if (imaps == NULL) {   /* vars */
            status = ncmpii_igetput_varm(ncp, varp, starts[i], counts[i],
                                         strides[i], NULL, bufs[i], bufcounts[i],
                                         datatypes[i], &req_ids[i], rw_flag, 0);
        } else {                      /* varm */
            status = ncmpii_igetput_varm(ncp, varp, starts[i], counts[i],
                                         strides[i], imaps[i], bufs[i],
                                         bufcounts[i], datatypes[i],
                                         &req_ids[i], rw_flag, 0);
        }
    }

    if (status != NC_NOERR)
        return status;

    if (io_method == COLL_IO)
        status = ncmpi_wait_all(ncid, num, req_ids, statuses);
    else
        status = ncmpi_wait(ncid, num, req_ids, statuses);
    if (status != NC_NOERR)
        return status;

    if (num > 0)
        NCI_Free(req_ids);

    for (i=0; i<num; i++)
        if (statuses[i] != NC_NOERR)
            return statuses[i];

    return NC_NOERR;
}