Example #1
0
PetscErrorCode MPIPetsc_Type_compare(MPI_Datatype a,MPI_Datatype b,PetscBool *match)
{
  PetscErrorCode ierr;
  MPI_Datatype   atype,btype;
  PetscMPIInt    aintcount,aaddrcount,atypecount,acombiner;
  PetscMPIInt    bintcount,baddrcount,btypecount,bcombiner;
  PetscBool      freeatype, freebtype;

  PetscFunctionBegin;
  ierr   = MPIPetsc_Type_unwrap(a,&atype,&freeatype);CHKERRQ(ierr);
  ierr   = MPIPetsc_Type_unwrap(b,&btype,&freebtype);CHKERRQ(ierr);
  *match = PETSC_FALSE;
  if (atype == btype) {
    *match = PETSC_TRUE;
    goto free_types;
  }
  ierr = MPI_Type_get_envelope(atype,&aintcount,&aaddrcount,&atypecount,&acombiner);CHKERRQ(ierr);
  ierr = MPI_Type_get_envelope(btype,&bintcount,&baddrcount,&btypecount,&bcombiner);CHKERRQ(ierr);
  if (acombiner == bcombiner && aintcount == bintcount && aaddrcount == baddrcount && atypecount == btypecount && (aintcount > 0 || aaddrcount > 0 || atypecount > 0)) {
    PetscMPIInt  *aints,*bints;
    MPI_Aint     *aaddrs,*baddrs;
    MPI_Datatype *atypes,*btypes;
    PetscInt     i;
    PetscBool    same;
    ierr = PetscMalloc6(aintcount,&aints,bintcount,&bints,aaddrcount,&aaddrs,baddrcount,&baddrs,atypecount,&atypes,btypecount,&btypes);CHKERRQ(ierr);
    ierr = MPI_Type_get_contents(atype,aintcount,aaddrcount,atypecount,aints,aaddrs,atypes);CHKERRQ(ierr);
    ierr = MPI_Type_get_contents(btype,bintcount,baddrcount,btypecount,bints,baddrs,btypes);CHKERRQ(ierr);
    ierr = PetscMemcmp(aints,bints,aintcount*sizeof(aints[0]),&same);CHKERRQ(ierr);
    if (same) {
      ierr = PetscMemcmp(aaddrs,baddrs,aaddrcount*sizeof(aaddrs[0]),&same);CHKERRQ(ierr);
      if (same) {
        /* Check for identity first */
        ierr = PetscMemcmp(atypes,btypes,atypecount*sizeof(atypes[0]),&same);CHKERRQ(ierr);
        if (!same) {
          /* If the atype or btype were not predefined data types, then the types returned from MPI_Type_get_contents
           * will merely be equivalent to the types used in the construction, so we must recursively compare. */
          for (i=0; i<atypecount; i++) {
            ierr = MPIPetsc_Type_compare(atypes[i],btypes[i],&same);CHKERRQ(ierr);
            if (!same) break;
          }
        }
      }
    }
    for (i=0; i<atypecount; i++) {
      ierr = MPIPetsc_Type_free(&(atypes[i]));CHKERRQ(ierr);
      ierr = MPIPetsc_Type_free(&(btypes[i]));CHKERRQ(ierr);
    }
    ierr = PetscFree6(aints,bints,aaddrs,baddrs,atypes,btypes);CHKERRQ(ierr);
    if (same) *match = PETSC_TRUE;
  }
free_types:
  if (freeatype) {
    ierr = MPIPetsc_Type_free(&atype);CHKERRQ(ierr);
  }
  if (freebtype) {
    ierr = MPIPetsc_Type_free(&btype);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Example #2
0
/*@
    MPI_File_get_view - Returns the file view

Input Parameters:
. fh - file handle (handle)

Output Parameters:
. disp - displacement (nonnegative integer)
. etype - elementary datatype (handle)
. filetype - filetype (handle)
. datarep - data representation (string)

.N fortran
@*/
int MPI_File_get_view(MPI_File fh, MPI_Offset * disp, MPI_Datatype * etype,
                      MPI_Datatype * filetype, char *datarep)
{
    int error_code;
    ADIO_File adio_fh;
    static char myname[] = "MPI_FILE_GET_VIEW";
    int i, j, k, combiner;
    MPI_Datatype copy_etype, copy_filetype;

    ROMIO_THREAD_CS_ENTER();

    adio_fh = MPIO_File_resolve(fh);

    /* --BEGIN ERROR HANDLING-- */
    MPIO_CHECK_FILE_HANDLE(adio_fh, myname, error_code);

    if (datarep == NULL) {
        error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
                                          myname, __LINE__, MPI_ERR_ARG, "**iodatarepnomem", 0);
        error_code = MPIO_Err_return_file(adio_fh, error_code);
        goto fn_exit;
    }
    /* --END ERROR HANDLING-- */

    *disp = adio_fh->disp;
    ADIOI_Strncpy(datarep,
                  (adio_fh->is_external32 ? "external32" : "native"), MPI_MAX_DATAREP_STRING);

    MPI_Type_get_envelope(adio_fh->etype, &i, &j, &k, &combiner);
    if (combiner == MPI_COMBINER_NAMED)
        *etype = adio_fh->etype;
    else {
        /* FIXME: It is wrong to use MPI_Type_contiguous; the user could choose to
         * re-implement MPI_Type_contiguous in an unexpected way.  Either use
         * MPID_Barrier as in MPICH or PMPI_Type_contiguous */
        MPI_Type_contiguous(1, adio_fh->etype, &copy_etype);

        /* FIXME: Ditto for MPI_Type_commit - use NMPI or PMPI */
        MPI_Type_commit(&copy_etype);
        *etype = copy_etype;
    }
    /* FIXME: Ditto for MPI_Type_xxx - use NMPI or PMPI */
    MPI_Type_get_envelope(adio_fh->filetype, &i, &j, &k, &combiner);
    if (combiner == MPI_COMBINER_NAMED)
        *filetype = adio_fh->filetype;
    else {
        MPI_Type_contiguous(1, adio_fh->filetype, &copy_filetype);

        MPI_Type_commit(&copy_filetype);
        *filetype = copy_filetype;
    }

  fn_exit:
    ROMIO_THREAD_CS_EXIT();

    return MPI_SUCCESS;
}
Example #3
0
PetscErrorCode MPIPetsc_Type_compare(MPI_Datatype a,MPI_Datatype b,PetscBool *match)
{
  PetscErrorCode ierr;
  MPI_Datatype   atype,btype;
  PetscMPIInt    aintcount,aaddrcount,atypecount,acombiner;
  PetscMPIInt    bintcount,baddrcount,btypecount,bcombiner;
  PetscBool      freeatype, freebtype;

  PetscFunctionBegin;
  ierr   = MPIPetsc_Type_unwrap(a,&atype,&freeatype);CHKERRQ(ierr);
  ierr   = MPIPetsc_Type_unwrap(b,&btype,&freebtype);CHKERRQ(ierr);
  *match = PETSC_FALSE;
  if (atype == btype) {
    *match = PETSC_TRUE;
    goto free_types;
  }
  ierr = MPI_Type_get_envelope(atype,&aintcount,&aaddrcount,&atypecount,&acombiner);CHKERRQ(ierr);
  ierr = MPI_Type_get_envelope(btype,&bintcount,&baddrcount,&btypecount,&bcombiner);CHKERRQ(ierr);
  if (acombiner == bcombiner && aintcount == bintcount && aaddrcount == baddrcount && atypecount == btypecount && (aintcount > 0 || aaddrcount > 0 || atypecount > 0)) {
    PetscMPIInt  *aints,*bints;
    MPI_Aint     *aaddrs,*baddrs;
    MPI_Datatype *atypes,*btypes;
    PetscInt     i;
    PetscBool    same;
    ierr = PetscMalloc6(aintcount,&aints,bintcount,&bints,aaddrcount,&aaddrs,baddrcount,&baddrs,atypecount,&atypes,btypecount,&btypes);CHKERRQ(ierr);
    ierr = MPI_Type_get_contents(atype,aintcount,aaddrcount,atypecount,aints,aaddrs,atypes);CHKERRQ(ierr);
    ierr = MPI_Type_get_contents(btype,bintcount,baddrcount,btypecount,bints,baddrs,btypes);CHKERRQ(ierr);
    ierr = PetscMemcmp(aints,bints,aintcount*sizeof(aints[0]),&same);CHKERRQ(ierr);
    if (same) {
      ierr = PetscMemcmp(aaddrs,baddrs,aaddrcount*sizeof(aaddrs[0]),&same);CHKERRQ(ierr);
      if (same) {
        /* This comparison should be recursive */
        ierr = PetscMemcmp(atypes,btypes,atypecount*sizeof(atypes[0]),&same);CHKERRQ(ierr);
      }
    }
    for (i=0; i<atypecount; i++) {
      ierr = MPIPetsc_Type_free(&(atypes[i]));CHKERRQ(ierr);
      ierr = MPIPetsc_Type_free(&(btypes[i]));CHKERRQ(ierr);
    }
    ierr = PetscFree6(aints,bints,aaddrs,baddrs,atypes,btypes);CHKERRQ(ierr);
    if (same) *match = PETSC_TRUE;
  }
free_types:
  if (freeatype) {
    ierr = MPIPetsc_Type_free(&atype);CHKERRQ(ierr);
  }
  if (freebtype) {
    ierr = MPIPetsc_Type_free(&btype);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Example #4
0
/* Check whether a was created via MPI_Type_contiguous from b
 *
 */
PetscErrorCode MPIPetsc_Type_compare_contig(MPI_Datatype a,MPI_Datatype b,PetscInt *n)
{
  PetscErrorCode ierr;
  MPI_Datatype   atype,btype;
  PetscMPIInt    aintcount,aaddrcount,atypecount,acombiner;

  PetscFunctionBegin;
  ierr = MPIPetsc_Type_unwrap(a,&atype);CHKERRQ(ierr);
  ierr = MPIPetsc_Type_unwrap(b,&btype);CHKERRQ(ierr);
  *n = PETSC_FALSE;
  if (atype == btype) {
    *n = 1;
    PetscFunctionReturn(0);
  }
  ierr = MPI_Type_get_envelope(atype,&aintcount,&aaddrcount,&atypecount,&acombiner);CHKERRQ(ierr);
  if (acombiner == MPI_COMBINER_CONTIGUOUS && aintcount >= 1) {
    PetscMPIInt  *aints;
    MPI_Aint     *aaddrs;
    MPI_Datatype *atypes;
    ierr = PetscMalloc3(aintcount,&aints,aaddrcount,&aaddrs,atypecount,&atypes);CHKERRQ(ierr);
    ierr = MPI_Type_get_contents(atype,aintcount,aaddrcount,atypecount,aints,aaddrs,atypes);CHKERRQ(ierr);
    if (atypes[0] == btype) *n = aints[0];
    ierr = PetscFree3(aints,aaddrs,atypes);CHKERRQ(ierr);
    PetscFunctionReturn(0);
  }
  PetscFunctionReturn(0);
}
Example #5
0
PetscErrorCode MPIPetsc_Type_unwrap(MPI_Datatype a,MPI_Datatype *atype,PetscBool *flg)
{
  PetscMPIInt    nints,naddrs,ntypes,combiner;
  PetscErrorCode ierr;

  PetscFunctionBegin;
  *flg = PETSC_FALSE;
  ierr = MPI_Type_get_envelope(a,&nints,&naddrs,&ntypes,&combiner);CHKERRQ(ierr);
  if (combiner == MPI_COMBINER_DUP) {
    PetscMPIInt  ints[1];
    MPI_Aint     addrs[1];
    MPI_Datatype types[1];
    if (nints != 0 || naddrs != 0 || ntypes != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Unexpected returns from MPI_Type_get_envelope()");
    ierr   = MPI_Type_get_contents(a,0,0,1,ints,addrs,types);CHKERRQ(ierr);
    /* Recursively unwrap dupped types. */
    ierr   = MPIPetsc_Type_unwrap(types[0],atype,flg);CHKERRQ(ierr);
    if (*flg) {
      /* If the recursive call returns a new type, then that means that atype[0] != types[0] and we're on the hook to
       * free types[0].  Note that this case occurs if combiner(types[0]) is MPI_COMBINER_DUP, so we're safe to
       * directly call MPI_Type_free rather than MPIPetsc_Type_free here. */
      ierr = MPI_Type_free(&(types[0]));CHKERRQ(ierr);
    }
    /* In any case, it's up to the caller to free the returned type in this case. */
    *flg = PETSC_TRUE;
  } else *atype = a;
  PetscFunctionReturn(0);
}
Example #6
0
    static void unpack_envelope(MPI_Datatype type, flat_repr& f) {
        int num_ints, num_addr, num_dt, comb;
        MPI_Type_get_envelope(type, &num_ints, &num_addr, &num_dt, &comb);

        if (comb == MPI_COMBINER_NAMED) {
            //std::cout << "Type: " << builtin_typename_map::get_typeid_name(type) << std::endl;
            f.m.emplace(f.cur_offset, type);
            return;
        }

        // allocate the output for get_contents
        std::vector<int> ints; ints.resize(num_ints);
        std::vector<MPI_Aint> addrs; addrs.resize(num_addr);
        std::vector<MPI_Datatype> types; types.resize(num_dt);

        MPI_Type_get_contents(type, num_ints, num_addr, num_dt,
                              &ints[0], &addrs[0], &types[0]);

        switch(comb) {
          case MPI_COMBINER_DUP:
            MXX_ASSERT(num_ints == 0 && num_addr == 0 && num_dt == 1);
            unpack_envelope(types[0], f);
            break;
          case MPI_COMBINER_CONTIGUOUS:
            std::cout << "Contiguous: " << ints[0] << " x ";
            unpack_envelope(types[0], f);
            break;
          case MPI_COMBINER_VECTOR:
          case MPI_COMBINER_HVECTOR:
          case MPI_COMBINER_INDEXED:
          case MPI_COMBINER_HINDEXED:
          case MPI_COMBINER_INDEXED_BLOCK:
          case MPI_COMBINER_HINDEXED_BLOCK:
            std::cout << "NOT YET SUPPORTED vector/indexed/indexed_block" << std::endl;
            break;
          case MPI_COMBINER_STRUCT:
            {
                int count = ints[0];
                std::vector<int> blen(&ints[1], &ints[0]+count);
                std::vector<MPI_Aint> displ = addrs;
                std::cout << "Struct: " << std::endl;
                MPI_Aint offset = f.cur_offset;
                for (int i = 0; i < count; ++i) {
                    f.cur_offset = offset + displ[i];
                    unpack_envelope(types[i], f);
                }
                f.cur_offset = offset;
            }
            break;
          case MPI_COMBINER_RESIZED:
            // TODO
            std::cout << "resized to [" << addrs[0] << "," << addrs[1] << "): " << std::endl;
            unpack_envelope(types[0], f);
            break;
          case MPI_COMBINER_SUBARRAY:
          case MPI_COMBINER_DARRAY:
            std::cout << "NOT YET SUPPORTED subarray/darray" << std::endl;
            break;
        }
    }
Example #7
0
void ADIOI_Datatype_iscontig(MPI_Datatype datatype, int *flag)
{
    int nints, nadds, ntypes, combiner;
    int *ints, ni, na, nt, cb;
    MPI_Aint *adds;
    MPI_Datatype *types;

    MPI_Type_get_envelope(datatype, &nints, &nadds, &ntypes, &combiner);

    switch (combiner) {
    case MPI_COMBINER_NAMED:
	*flag = 1;
	break;
    case MPI_COMBINER_CONTIGUOUS:
	ints = (int *) ADIOI_Malloc((nints+1)*sizeof(int));
	adds = (MPI_Aint *) ADIOI_Malloc((nadds+1)*sizeof(MPI_Aint));
	types = (MPI_Datatype *) ADIOI_Malloc((ntypes+1)*sizeof(MPI_Datatype));
	MPI_Type_get_contents(datatype, nints, nadds, ntypes, ints,
			      adds, types); 
	ADIOI_Datatype_iscontig(types[0], flag);

#ifndef MPISGI
/* There is a bug in SGI's impl. of MPI_Type_get_contents. It doesn't
   return new datatypes. Therefore no need to free. */
	MPI_Type_get_envelope(types[0], &ni, &na, &nt, &cb);
	if (cb != MPI_COMBINER_NAMED) MPI_Type_free(types);
#endif

	ADIOI_Free(ints);
	ADIOI_Free(adds);
	ADIOI_Free(types);
	break;
    default:
	*flag = 0;
	break;
    }

    /* This function needs more work. It should check for contiguity 
       in other cases as well.*/
}
Example #8
0
void initialize_data_GL_Gather_as_Reduce(const basic_collective_params_t info, const long count, collective_params_t* params) {
    int num_ints, num_adds, num_dtypes, combiner;
    int i;

    initialize_common_data(info, params);

    params->count = count;

    params->scount = count;
    params->rcount = count * params->nprocs;

    assert (params->scount < INT_MAX);
    assert (params->rcount < INT_MAX);

    params->sbuf = (char*)reprompi_calloc(params->scount, params->datatype_extent);
    params->rbuf = (char*)reprompi_calloc(params->rcount, params->datatype_extent);

    params->tmp_buf = (char*)reprompi_calloc(params->rcount, params->datatype_extent);

    // set identity operand for different operations
    if (params->op == MPI_BAND || params->op == MPI_PROD) {
        MPI_Type_get_envelope(params->datatype,
                &num_ints, &num_adds, &num_dtypes, &combiner);

        if (combiner == MPI_COMBINER_NAMED) {
            if (params->datatype == MPI_INT) {
                for (i=0; i<params->rcount; i++) {
                    ((int*)params->tmp_buf)[i] = 1;
                }
            }
            else {
                if (params->datatype == MPI_DOUBLE) {
                    for (i=0; i<params->rcount; i++) {
                        ((double*)params->tmp_buf)[i] = 1;
                    }
                }
                else {
                    memset( params->tmp_buf, 0xFF, params->rcount * params->datatype_extent);
                }
            }
        }
        else {
            memset( params->tmp_buf, 0xFF, params->rcount * params->datatype_extent);
        }
    }
    else {
        memset(params->tmp_buf, 0, params->rcount * params->datatype_extent);
    }

}
Example #9
0
static PetscErrorCode MPIPetsc_Type_free(MPI_Datatype *a)
{
  PetscMPIInt    nints,naddrs,ntypes,combiner;
  PetscErrorCode ierr;

  PetscFunctionBegin;
  ierr = MPI_Type_get_envelope(*a,&nints,&naddrs,&ntypes,&combiner);CHKERRQ(ierr);

  if (combiner != MPI_COMBINER_NAMED) {
    ierr = MPI_Type_free(a);CHKERRQ(ierr);
  }

  *a = MPI_DATATYPE_NULL;
  PetscFunctionReturn(0);
}
Example #10
0
PetscErrorCode MPIPetsc_Type_unwrap(MPI_Datatype a,MPI_Datatype *atype)
{
  PetscMPIInt    nints,naddrs,ntypes,combiner;
  PetscErrorCode ierr;

  PetscFunctionBegin;
  ierr = MPI_Type_get_envelope(a,&nints,&naddrs,&ntypes,&combiner);CHKERRQ(ierr);
  if (combiner == MPI_COMBINER_DUP) {
    PetscMPIInt  ints[1];
    MPI_Aint     addrs[1];
    MPI_Datatype types[1];
    if (nints != 0 || naddrs != 0 || ntypes != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Unexpected returns from MPI_Type_get_envelope()");
    ierr   = MPI_Type_get_contents(a,0,0,1,ints,addrs,types);CHKERRQ(ierr);
    *atype = types[0];
  } else *atype = a;
  PetscFunctionReturn(0);
}
Example #11
0
/* Check whether a was created via MPI_Type_contiguous from b
 *
 */
PetscErrorCode MPIPetsc_Type_compare_contig(MPI_Datatype a,MPI_Datatype b,PetscInt *n)
{
  PetscErrorCode ierr;
  MPI_Datatype   atype,btype;
  PetscMPIInt    aintcount,aaddrcount,atypecount,acombiner;
  PetscBool      freeatype,freebtype;
  PetscFunctionBegin;
  ierr = MPIPetsc_Type_unwrap(a,&atype,&freeatype);CHKERRQ(ierr);
  ierr = MPIPetsc_Type_unwrap(b,&btype,&freebtype);CHKERRQ(ierr);
  *n = PETSC_FALSE;
  if (atype == btype) {
    *n = 1;
    goto free_types;
  }
  ierr = MPI_Type_get_envelope(atype,&aintcount,&aaddrcount,&atypecount,&acombiner);CHKERRQ(ierr);
  if (acombiner == MPI_COMBINER_CONTIGUOUS && aintcount >= 1) {
    PetscMPIInt  *aints;
    MPI_Aint     *aaddrs;
    MPI_Datatype *atypes;
    PetscInt      i;
    PetscBool     same;
    ierr = PetscMalloc3(aintcount,&aints,aaddrcount,&aaddrs,atypecount,&atypes);CHKERRQ(ierr);
    ierr = MPI_Type_get_contents(atype,aintcount,aaddrcount,atypecount,aints,aaddrs,atypes);CHKERRQ(ierr);
    /* Check for identity first. */
    if (atypes[0] == btype) {
      *n = aints[0];
    } else {
      /* atypes[0] merely has to be equivalent to the type used to create atype. */
      ierr = MPIPetsc_Type_compare(atypes[0],btype,&same);CHKERRQ(ierr);
      if (same) *n = aints[0];
    }
    for (i=0; i<atypecount; i++) {
      ierr = MPIPetsc_Type_free(&(atypes[i]));CHKERRQ(ierr);
    }
    ierr = PetscFree3(aints,aaddrs,atypes);CHKERRQ(ierr);
  }
free_types:
  if (freeatype) {
    ierr = MPIPetsc_Type_free(&atype);CHKERRQ(ierr);
  }
  if (freebtype) {
    ierr = MPIPetsc_Type_free(&btype);CHKERRQ(ierr);
  }
  PetscFunctionReturn(0);
}
Example #12
0
/* builtin_float_test()
 *
 * Tests functionality of get_envelope() and get_contents() on a MPI_FLOAT.
 *
 * Returns the number of errors encountered.
 */
int builtin_float_test(void)
{
    int nints, nadds, ntypes, combiner;

    int err, errs = 0;

    err = MPI_Type_get_envelope(MPI_FLOAT,
				&nints,
				&nadds,
				&ntypes,
				&combiner);
    
    if (combiner != MPI_COMBINER_NAMED) errs++;
    if (verbose && combiner != MPI_COMBINER_NAMED)
	fprintf(stderr, "combiner = %s; should be named\n", 
		combiner_to_string(combiner));

    /* Note: it is erroneous to call MPI_Type_get_contents() on a basic. */
    return errs;
}
Example #13
0
void mpi_type_get_envelope_f(MPI_Fint *type, MPI_Fint *num_integers,
			     MPI_Fint *num_addresses, 
			     MPI_Fint *num_datatypes, MPI_Fint *combiner,
			     MPI_Fint *ierr)
{
    MPI_Datatype c_type = MPI_Type_f2c(*type);
    OMPI_SINGLE_NAME_DECL(num_integers);
    OMPI_SINGLE_NAME_DECL(num_addresses);
    OMPI_SINGLE_NAME_DECL(num_datatypes);
    OMPI_SINGLE_NAME_DECL(combiner);

    *ierr = OMPI_INT_2_FINT(MPI_Type_get_envelope(c_type,
				 OMPI_SINGLE_NAME_CONVERT(num_integers), 
				 OMPI_SINGLE_NAME_CONVERT(num_addresses), 
				 OMPI_SINGLE_NAME_CONVERT(num_datatypes), 
				 OMPI_SINGLE_NAME_CONVERT(combiner)));

    if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) {
        OMPI_SINGLE_INT_2_FINT(num_integers);
        OMPI_SINGLE_INT_2_FINT(num_addresses);
        OMPI_SINGLE_INT_2_FINT(num_datatypes);
        OMPI_SINGLE_INT_2_FINT(combiner);
    }
}
Example #14
0
void ADIOI_PVFS_Fcntl(ADIO_File fd, int flag, ADIO_Fcntl_t *fcntl_struct, int *error_code)
{
    MPI_Datatype copy_etype, copy_filetype;
    int combiner, i, j, k, filetype_is_contig, ntimes, err;
    ADIOI_Flatlist_node *flat_file;
    ADIO_Offset curr_fsize, alloc_size, size, len, done;
    ADIO_Status status;
    char *buf;
#ifndef PRINT_ERR_MSG
    static char myname[] = "ADIOI_PVFS_FCNTL";
#endif

    switch(flag) {
    case ADIO_FCNTL_SET_VIEW:
        /* free copies of old etypes and filetypes and delete flattened 
           version of filetype if necessary */

	MPI_Type_get_envelope(fd->etype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->etype));

	ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (!filetype_is_contig) ADIOI_Delete_flattened(fd->filetype);

	MPI_Type_get_envelope(fd->filetype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->filetype));

	/* set new info */
	ADIO_SetInfo(fd, fcntl_struct->info, &err);

        /* set new etypes and filetypes */

	MPI_Type_get_envelope(fcntl_struct->etype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) fd->etype = fcntl_struct->etype;
	else {
	    MPI_Type_contiguous(1, fcntl_struct->etype, &copy_etype);
	    MPI_Type_commit(&copy_etype);
	    fd->etype = copy_etype;
	}
	MPI_Type_get_envelope(fcntl_struct->filetype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) 
	    fd->filetype = fcntl_struct->filetype;
	else {
	    MPI_Type_contiguous(1, fcntl_struct->filetype, &copy_filetype);
	    MPI_Type_commit(&copy_filetype);
	    fd->filetype = copy_filetype;
	    ADIOI_Flatten_datatype(fd->filetype);
            /* this function will not flatten the filetype if it turns out
               to be all contiguous. */
	}

	MPI_Type_size(fd->etype, &(fd->etype_size));
	fd->disp = fcntl_struct->disp;

        /* reset MPI-IO file pointer to point to the first byte that can
           be accessed in this view. */

        ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (filetype_is_contig) fd->fp_ind = fcntl_struct->disp;
	else {
	    flat_file = ADIOI_Flatlist;
	    while (flat_file->type != fd->filetype) 
		flat_file = flat_file->next;
	    for (i=0; i<flat_file->count; i++) {
		if (flat_file->blocklens[i]) {
		    fd->fp_ind = fcntl_struct->disp + flat_file->indices[i];
		    break;
		}
	    }
	}
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_GET_FSIZE:
	fcntl_struct->fsize = pvfs_lseek(fd->fd_sys, 0, SEEK_END);
	if (fd->fp_sys_posn != -1) 
	     pvfs_lseek(fd->fd_sys, fd->fp_sys_posn, SEEK_SET);
#ifdef PRINT_ERR_MSG
	*error_code = (fcntl_struct->fsize == -1) ? MPI_ERR_UNKNOWN : MPI_SUCCESS;
#else
	if (fcntl_struct->fsize == -1) {
	    *error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	    ADIOI_Error(fd, *error_code, myname);	    
	}
	else *error_code = MPI_SUCCESS;
#endif
	break;

    case ADIO_FCNTL_SET_DISKSPACE:
	/* will be called by one process only */
	/* On file systems with no preallocation function, I have to 
           explicitly write 
           to allocate space. Since there could be holes in the file, 
           I need to read up to the current file size, write it back, 
           and then write beyond that depending on how much 
           preallocation is needed.
           read/write in sizes of no more than ADIOI_PREALLOC_BUFSZ */

	curr_fsize = pvfs_lseek(fd->fd_sys, 0, SEEK_END);
	alloc_size = fcntl_struct->diskspace;

	size = ADIOI_MIN(curr_fsize, alloc_size);
	
	ntimes = (size + ADIOI_PREALLOC_BUFSZ - 1)/ADIOI_PREALLOC_BUFSZ;
	buf = (char *) ADIOI_Malloc(ADIOI_PREALLOC_BUFSZ);
	done = 0;

	for (i=0; i<ntimes; i++) {
	    len = ADIOI_MIN(size-done, ADIOI_PREALLOC_BUFSZ);
	    ADIO_ReadContig(fd, buf, len, MPI_BYTE, ADIO_EXPLICIT_OFFSET, done,
			    &status, error_code);
	    if (*error_code != MPI_SUCCESS) {
#ifdef PRINT_ERR_MSG
		FPRINTF(stderr, "ADIOI_PVFS_Fcntl: To preallocate disk space, ROMIO needs to read the file and write it back, but is unable to read the file. Please give the file read permission and open it with MPI_MODE_RDWR.\n");
		MPI_Abort(MPI_COMM_WORLD, 1);
#else
		*error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_PREALLOC_PERM,
			      myname, (char *) 0, (char *) 0);
		ADIOI_Error(fd, *error_code, myname);
                return;  
#endif
	    }
	    ADIO_WriteContig(fd, buf, len, MPI_BYTE, ADIO_EXPLICIT_OFFSET, done,
			     &status, error_code);
	    if (*error_code != MPI_SUCCESS) return;
	    done += len;
	}

	if (alloc_size > curr_fsize) {
	    memset(buf, 0, ADIOI_PREALLOC_BUFSZ); 
	    size = alloc_size - curr_fsize;
	    ntimes = (size + ADIOI_PREALLOC_BUFSZ - 1)/ADIOI_PREALLOC_BUFSZ;
	    for (i=0; i<ntimes; i++) {
		len = ADIOI_MIN(alloc_size-done, ADIOI_PREALLOC_BUFSZ);
		ADIO_WriteContig(fd, buf, len, MPI_BYTE, ADIO_EXPLICIT_OFFSET, 
				 done, &status, error_code);
		if (*error_code != MPI_SUCCESS) return;
		done += len;  
	    }
	}
	ADIOI_Free(buf);
	if (fd->fp_sys_posn != -1) 
	    pvfs_lseek(fd->fd_sys, fd->fp_sys_posn, SEEK_SET);
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_IOMODE:
        /* for implementing PFS I/O modes. will not occur in MPI-IO
           implementation.*/
	if (fd->iomode != fcntl_struct->iomode) {
	    fd->iomode = fcntl_struct->iomode;
	    MPI_Barrier(MPI_COMM_WORLD);
	}
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_ATOMICITY:
	*error_code = MPI_ERR_UNKNOWN;
	break;

    default:
	FPRINTF(stderr, "Unknown flag passed to ADIOI_PVFS_Fcntl\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
}
Example #15
0
File: flatten.c Project: ORNL/ompi
/* ADIOI_Flatten()
 *
 * Assumption: input datatype is not a basic!!!!
 */
void ADIOI_Flatten(MPI_Datatype datatype, ADIOI_Flatlist_node *flat, 
		  ADIO_Offset st_offset, MPI_Count *curr_index)
{
    int i, k, m, n, basic_num, nonzeroth, is_hindexed_block=0;
    int combiner, old_combiner, old_is_contig;
    int nints, nadds, ntypes, old_nints, old_nadds, old_ntypes;
    /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
    ADIO_Offset top_count;
    MPI_Count j, old_size, prev_index, num;
    MPI_Aint old_extent;/* Assume extents are non-negative */
    int *ints;
    MPI_Aint *adds; /* Make no assumptions about +/- sign on these */
    MPI_Datatype *types;
    MPI_Type_get_envelope(datatype, &nints, &nadds, &ntypes, &combiner);
    ints = (int *) ADIOI_Malloc((nints+1)*sizeof(int));
    adds = (MPI_Aint *) ADIOI_Malloc((nadds+1)*sizeof(MPI_Aint));
    types = (MPI_Datatype *) ADIOI_Malloc((ntypes+1)*sizeof(MPI_Datatype));
    MPI_Type_get_contents(datatype, nints, nadds, ntypes, ints, adds, types);

  #ifdef FLATTEN_DEBUG 
  DBG_FPRINTF(stderr,"ADIOI_Flatten:: st_offset %#llX, curr_index %#llX\n",st_offset,*curr_index);
  DBG_FPRINTF(stderr,"ADIOI_Flatten:: nints %#X, nadds %#X, ntypes %#X\n",nints, nadds, ntypes);
  for(i=0; i< nints; ++i)
  {
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: ints[%d]=%#X\n",i,ints[i]);
  }
  for(i=0; i< nadds; ++i)
  {
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: adds[%d]="MPI_AINT_FMT_HEX_SPEC"\n",i,adds[i]);
  }
  for(i=0; i< ntypes; ++i)
  {
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: types[%d]=%#llX\n",i,(unsigned long long)(unsigned long)types[i]);
  }
  #endif
  /* Chapter 4, page 83: when processing datatypes, note this item from the
   * standard:
	 Most datatype constructors have replication count or block length
	 arguments.  Allowed values are non-negative integers. If the value is
	 zero, no elements are generated in the type map and there is no effect
	 on datatype bounds or extent.  */

    switch (combiner) {
#ifdef MPIIMPL_HAVE_MPI_COMBINER_DUP
    case MPI_COMBINER_DUP:
    #ifdef FLATTEN_DEBUG 
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_DUP\n");
    #endif
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
			      &old_ntypes, &old_combiner); 
        ADIOI_Datatype_iscontig(types[0], &old_is_contig);
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
            ADIOI_Flatten(types[0], flat, st_offset, curr_index);
        break;
#endif
#ifdef MPIIMPL_HAVE_MPI_COMBINER_SUBARRAY
    case MPI_COMBINER_SUBARRAY:
        {
	    int dims = ints[0];
	    MPI_Datatype stype;
      #ifdef FLATTEN_DEBUG 
      DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_SUBARRAY\n");
      #endif

	    ADIO_Type_create_subarray(dims,
				      &ints[1],        /* sizes */
				      &ints[dims+1],   /* subsizes */
				      &ints[2*dims+1], /* starts */
				      ints[3*dims+1],  /* order */
				      types[0],        /* type */
				      &stype);
	    ADIOI_Flatten(stype, flat, st_offset, curr_index);
	    MPI_Type_free(&stype);
	}
	break;
#endif
#ifdef MPIIMPL_HAVE_MPI_COMBINER_DARRAY
    case MPI_COMBINER_DARRAY:
	{
	    int dims = ints[2];
	    MPI_Datatype dtype;
      #ifdef FLATTEN_DEBUG 
      DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_DARRAY\n");
      #endif

	    ADIO_Type_create_darray(ints[0],         /* size */
				    ints[1],         /* rank */
				    dims,
				    &ints[3],        /* gsizes */
				    &ints[dims+3],   /* distribs */
				    &ints[2*dims+3], /* dargs */
				    &ints[3*dims+3], /* psizes */
				    ints[4*dims+3],  /* order */
				    types[0],
				    &dtype);
      #ifdef FLATTEN_DEBUG 
      DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_DARRAY <ADIOI_Flatten(dtype, flat->indices[%#X] %#llX, flat->blocklens[%#X] %#llX, st_offset %#llX, curr_index %#llX);\n",
              0, flat->indices[0], 0, flat->blocklens[0], st_offset, *curr_index);
      #endif
	    ADIOI_Flatten(dtype, flat, st_offset, curr_index);
      #ifdef FLATTEN_DEBUG 
      DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_DARRAY >ADIOI_Flatten(dtype, flat->indices[%#X] %#llX, flat->blocklens[%#X] %#llX, st_offset %#llX, curr_index %#llX);\n",
              0, flat->indices[0], 0, flat->blocklens[0], st_offset, *curr_index);
      #endif
	    MPI_Type_free(&dtype);
	}
	break;
#endif
    case MPI_COMBINER_CONTIGUOUS:
    #ifdef FLATTEN_DEBUG 
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_CONTIGUOUS\n");
    #endif
	top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
			      &old_ntypes, &old_combiner); 
        ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
	    ADIOI_Flatten(types[0], flat, st_offset, curr_index);

	if (prev_index == *curr_index) {
/* simplest case, made up of basic or contiguous types */
	    j = *curr_index;
	    flat->indices[j] = st_offset;
	    MPI_Type_size_x(types[0], &old_size);
	    flat->blocklens[j] = top_count * old_size;
      #ifdef FLATTEN_DEBUG 
      DBG_FPRINTF(stderr,"ADIOI_Flatten:: simple flat->indices[%#llX] %#llX, flat->blocklens[%#llX] %#llX\n",j, flat->indices[j], j, flat->blocklens[j]);
      #endif
	    (*curr_index)++;
	}
	else {
/* made up of noncontiguous derived types */
	    j = *curr_index;
	    num = *curr_index - prev_index;

/* The noncontiguous types have to be replicated count times */
	    MPI_Type_extent(types[0], &old_extent);
	    for (m=1; m<top_count; m++) {
		for (i=0; i<num; i++) {
		    flat->indices[j] = flat->indices[j-num] + ADIOI_AINT_CAST_TO_OFFSET old_extent;
		    flat->blocklens[j] = flat->blocklens[j-num];
          #ifdef FLATTEN_DEBUG 
          DBG_FPRINTF(stderr,"ADIOI_Flatten:: derived flat->indices[%#llX] %#llX, flat->blocklens[%#llX] %#llX\n",j, flat->indices[j], j, flat->blocklens[j]);
          #endif
		    j++;
		}
	    }
	    *curr_index = j;
	}
	break;

    case MPI_COMBINER_VECTOR: 
    #ifdef FLATTEN_DEBUG 
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_VECTOR\n");
    #endif
	top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
			      &old_ntypes, &old_combiner); 
        ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
	    ADIOI_Flatten(types[0], flat, st_offset, curr_index);

	if (prev_index == *curr_index) {
/* simplest case, vector of basic or contiguous types */
    /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
    ADIO_Offset blocklength = ints[1], stride = ints[2];
	    j = *curr_index;
	    flat->indices[j] = st_offset;
	    MPI_Type_size_x(types[0], &old_size);
	    flat->blocklens[j] = blocklength * old_size;
	    for (i=j+1; i<j+top_count; i++) {
		flat->indices[i] = flat->indices[i-1] + stride * old_size;
		flat->blocklens[i] = flat->blocklens[j];
	    }
	    *curr_index = i;
	}
	else {
/* vector of noncontiguous derived types */
    /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
    ADIO_Offset blocklength = ints[1], stride = ints[2];

	    j = *curr_index;
	    num = *curr_index - prev_index;

/* The noncontiguous types have to be replicated blocklen times
   and then strided. Replicate the first one. */
	    MPI_Type_extent(types[0], &old_extent);
	    for (m=1; m<blocklength; m++) {
		for (i=0; i<num; i++) {
		    flat->indices[j] = flat->indices[j-num] + ADIOI_AINT_CAST_TO_OFFSET old_extent;
		    flat->blocklens[j] = flat->blocklens[j-num];
		    j++;
		}
	    }
	    *curr_index = j;

/* Now repeat with strides. */
	    num = *curr_index - prev_index;
	    for (i=1; i<top_count; i++) {
 		for (m=0; m<num; m++) {
		   flat->indices[j] =  flat->indices[j-num] + stride * ADIOI_AINT_CAST_TO_OFFSET old_extent;
		   flat->blocklens[j] = flat->blocklens[j-num];
		   j++;
		}
	    }
	    *curr_index = j;
	}
	break;

    case MPI_COMBINER_HVECTOR: 
    case MPI_COMBINER_HVECTOR_INTEGER: 
    #ifdef FLATTEN_DEBUG 
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_HVECTOR_INTEGER\n");
    #endif
	top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
			      &old_ntypes, &old_combiner); 
        ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
	    ADIOI_Flatten(types[0], flat, st_offset, curr_index);

	if (prev_index == *curr_index) {
/* simplest case, vector of basic or contiguous types */
    /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
    ADIO_Offset blocklength = ints[1];
	    j = *curr_index;
	    flat->indices[j] = st_offset;
	    MPI_Type_size_x(types[0], &old_size);
	    flat->blocklens[j] = blocklength * old_size;
	    for (i=j+1; i<j+top_count; i++) {
		flat->indices[i] = flat->indices[i-1] + adds[0];
		flat->blocklens[i] = flat->blocklens[j];
	    }
	    *curr_index = i;
	}
	else {
/* vector of noncontiguous derived types */
    /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
    ADIO_Offset blocklength = ints[1];

	    j = *curr_index;
	    num = *curr_index - prev_index;

/* The noncontiguous types have to be replicated blocklen times
   and then strided. Replicate the first one. */
	    MPI_Type_extent(types[0], &old_extent);
	    for (m=1; m<blocklength; m++) {
		for (i=0; i<num; i++) {
		    flat->indices[j] = flat->indices[j-num] + ADIOI_AINT_CAST_TO_OFFSET old_extent;
		    flat->blocklens[j] = flat->blocklens[j-num];
		    j++;
		}
	    }
	    *curr_index = j;

/* Now repeat with strides. */
	    num = *curr_index - prev_index;
	    for (i=1; i<top_count; i++) {
 		for (m=0; m<num; m++) {
		   flat->indices[j] =  flat->indices[j-num] + adds[0];
		   flat->blocklens[j] = flat->blocklens[j-num];
		   j++;
		}
	    }
	    *curr_index = j;
	}
	break;

    case MPI_COMBINER_INDEXED: 
    #ifdef FLATTEN_DEBUG 
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_INDEXED\n");
    #endif
	top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
			      &old_ntypes, &old_combiner); 
        ADIOI_Datatype_iscontig(types[0], &old_is_contig);
	MPI_Type_extent(types[0], &old_extent);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
  {
    /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
    ADIO_Offset stride = ints[top_count+1];
        ADIOI_Flatten(types[0], flat,
         st_offset+stride* ADIOI_AINT_CAST_TO_OFFSET old_extent, curr_index);
  }

	if (prev_index == *curr_index) {
/* simplest case, indexed type made up of basic or contiguous types */
	    j = *curr_index;
	    for (i=j, nonzeroth=i; i<j+top_count; i++) {
    /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
    ADIO_Offset blocklength = ints[1+i-j], stride = ints[top_count+1+i-j];
		if (blocklength > 0) {
		    flat->indices[nonzeroth] =
			st_offset + stride* ADIOI_AINT_CAST_TO_OFFSET old_extent;
		    flat->blocklens[nonzeroth] =
			blocklength* ADIOI_AINT_CAST_TO_OFFSET old_extent;
		    nonzeroth++;
		} else {
		    flat->count--; /* don't count/consider any zero-length blocklens */
		}
	    }
	    *curr_index = i;
	}
	else {
/* indexed type made up of noncontiguous derived types */

	    j = *curr_index;
	    num = *curr_index - prev_index;
	    basic_num = num;

/* The noncontiguous types have to be replicated blocklens[i] times
   and then strided. Replicate the first one. */
	    for (m=1; m<ints[1]; m++) {
		for (i=0, nonzeroth = j; i<num; i++) {
		    if (flat->blocklens[j-num] > 0) {
			flat->indices[nonzeroth] =
			    flat->indices[nonzeroth-num] + ADIOI_AINT_CAST_TO_OFFSET old_extent;
			flat->blocklens[nonzeroth] =
			    flat->blocklens[nonzeroth-num];
			j++;
			nonzeroth++;
		    } else {
			flat->count --;
		    }
		}
	    }
	    *curr_index = j;

/* Now repeat with strides. */
	    for (i=1; i<top_count; i++) {
		num = *curr_index - prev_index;
		prev_index = *curr_index;
		for (m=0, nonzeroth=j; m<basic_num; m++) {
      /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
      ADIO_Offset stride = ints[top_count+1+i]-ints[top_count+i];
		    if (flat->blocklens[j-num] > 0 ) {
			flat->indices[nonzeroth] =
			    flat->indices[j-num] + stride* ADIOI_AINT_CAST_TO_OFFSET old_extent;
			flat->blocklens[nonzeroth] = flat->blocklens[j-num];
			j++;
			nonzeroth++;
		    } else {
			flat->count--;
		    }
		}
		*curr_index = j;
		for (m=1; m<ints[1+i]; m++) {
                    for (k=0, nonzeroth=j; k<basic_num; k++) {
			if (flat->blocklens[j-basic_num] > 0) {
			    flat->indices[nonzeroth] =
				flat->indices[j-basic_num] + ADIOI_AINT_CAST_TO_OFFSET old_extent;
			    flat->blocklens[nonzeroth] = flat->blocklens[j-basic_num];
			    j++;
			    nonzeroth++;
			} else {
			    flat->count --;
			}
                    }
                }
		*curr_index = j;
	    }
	}
	break;

#if defined HAVE_DECL_MPI_COMBINER_HINDEXED_BLOCK && HAVE_DECL_MPI_COMBINER_HINDEXED_BLOCK
    case MPI_COMBINER_HINDEXED_BLOCK:
	is_hindexed_block=1;
	/* deliberate fall-through */
#endif
    case MPI_COMBINER_INDEXED_BLOCK:
    #ifdef FLATTEN_DEBUG 
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_INDEXED_BLOCK\n");
    #endif
	top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
			      &old_ntypes, &old_combiner); 
        ADIOI_Datatype_iscontig(types[0], &old_is_contig);
	MPI_Type_extent(types[0], &old_extent);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
  {
      /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
      ADIO_Offset stride = ints[1+1];
	if (is_hindexed_block) {
	    ADIOI_Flatten(types[0], flat,
		    st_offset+adds[0], curr_index);
	} else {
	    ADIOI_Flatten(types[0], flat,
		    st_offset+stride* ADIOI_AINT_CAST_TO_OFFSET old_extent, curr_index);
	}
  }

	if (prev_index == *curr_index) {
/* simplest case, indexed type made up of basic or contiguous types */
	    j = *curr_index;
	    for (i=j; i<j+top_count; i++) {
      /* By using ADIO_Offset we preserve +/- sign and 
         avoid >2G integer arithmetic problems */
		ADIO_Offset blocklength = ints[1];
		if (is_hindexed_block) {
		    flat->indices[i] = st_offset + adds[i-j];
		} else {
		    ADIO_Offset stride = ints[1+1+i-j];
		    flat->indices[i] = st_offset +
			stride* ADIOI_AINT_CAST_TO_OFFSET old_extent;
		}
		flat->blocklens[i] = blocklength* ADIOI_AINT_CAST_TO_OFFSET old_extent;
	    }
	    *curr_index = i;
	}
	else {
/* vector of noncontiguous derived types */

	    j = *curr_index;
	    num = *curr_index - prev_index;

/* The noncontiguous types have to be replicated blocklens[i] times
   and then strided. Replicate the first one. */
	    for (m=1; m<ints[1]; m++) {
		for (i=0; i<num; i++) {
		    if (is_hindexed_block) {
			/* this is the one place the hindexed case uses the
			 * extent of a type */
			MPI_Type_extent(types[0], &old_extent);
		    }
		    flat->indices[j] = flat->indices[j-num] +
			ADIOI_AINT_CAST_TO_OFFSET old_extent;
		    flat->blocklens[j] = flat->blocklens[j-num];
		    j++;
		}
	    }
	    *curr_index = j;

/* Now repeat with strides. */
	    num = *curr_index - prev_index;
	    for (i=1; i<top_count; i++) {
		for (m=0; m<num; m++) {
		    if (is_hindexed_block) {
			flat->indices[j] = flat->indices[j-num] +
			    adds[i] - adds[i-1];
		    } else {
			/* By using ADIO_Offset we preserve +/- sign and
			   avoid >2G integer arithmetic problems */
			ADIO_Offset stride = ints[2+i]-ints[1+i];
			flat->indices[j] = flat->indices[j-num] +
			    stride* ADIOI_AINT_CAST_TO_OFFSET old_extent;
		    }
		    flat->blocklens[j] = flat->blocklens[j-num];
		    j++;
		}
	    }
	    *curr_index = j;
	}
	break;

    case MPI_COMBINER_HINDEXED: 
    case MPI_COMBINER_HINDEXED_INTEGER:
    #ifdef FLATTEN_DEBUG 
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_HINDEXED_INTEGER\n");
    #endif
	top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
			      &old_ntypes, &old_combiner); 
        ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
  {
        ADIOI_Flatten(types[0], flat, st_offset+adds[0], curr_index); 
  }

	if (prev_index == *curr_index) {
/* simplest case, indexed type made up of basic or contiguous types */
	    j = *curr_index;
	    MPI_Type_size_x(types[0], &old_size);
	    for (i=j, nonzeroth=j; i<j+top_count; i++) {
		if (ints[1+i-j] > 0) {
		    /* By using ADIO_Offset we preserve +/- sign and
		       avoid >2G integer arithmetic problems */
		    ADIO_Offset blocklength = ints[1+i-j];
		    flat->indices[nonzeroth] = st_offset + adds[i-j];
		    flat->blocklens[nonzeroth] = blocklength*old_size;
		    nonzeroth++;
		} else {
		    flat->count--;
		}
	    }
	    *curr_index = i;
	}
	else {
/* indexed type made up of noncontiguous derived types */

	    j = *curr_index;
	    num = *curr_index - prev_index;
	    basic_num = num;

/* The noncontiguous types have to be replicated blocklens[i] times
   and then strided. Replicate the first one. */
	    MPI_Type_extent(types[0], &old_extent);
	    for (m=1; m<ints[1]; m++) {
		for (i=0, nonzeroth=j; i<num; i++) {
		    if (flat->blocklens[j-num] > 0) {
			flat->indices[nonzeroth] =
			    flat->indices[j-num] + ADIOI_AINT_CAST_TO_OFFSET old_extent;
			flat->blocklens[nonzeroth] = flat->blocklens[j-num];
			j++;
			nonzeroth++;
		    } else {
			flat->count--;
		    }
		}
	    }
	    *curr_index = j;

/* Now repeat with strides. */
	    for (i=1; i<top_count; i++) {
		num = *curr_index - prev_index;
		prev_index = *curr_index;
		for (m=0, nonzeroth=j; m<basic_num; m++) {
		    if (flat->blocklens[j-num] > 0) {
			flat->indices[nonzeroth] =
			    flat->indices[j-num] + adds[i] - adds[i-1];
			flat->blocklens[nonzeroth] = flat->blocklens[j-num];
			j++;
			nonzeroth++;
		    } else {
			flat->count--;
		    }
		}
		*curr_index = j;
		for (m=1; m<ints[1+i]; m++) {
		    for (k=0,nonzeroth=j; k<basic_num; k++) {
			if (flat->blocklens[j-basic_num] >0) {
			    flat->indices[nonzeroth] =
				flat->indices[j-basic_num] + ADIOI_AINT_CAST_TO_OFFSET old_extent;
			    flat->blocklens[nonzeroth] = flat->blocklens[j-basic_num];
			    j++;
			    nonzeroth++;
			}
		    }
		}
		*curr_index = j;
	    }
	}
	break;

    case MPI_COMBINER_STRUCT: 
    case MPI_COMBINER_STRUCT_INTEGER: 
    #ifdef FLATTEN_DEBUG 
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_STRUCT_INTEGER\n");
    #endif
	top_count = ints[0];
	for (n=0; n<top_count; n++) {
	    MPI_Type_get_envelope(types[n], &old_nints, &old_nadds,
				  &old_ntypes, &old_combiner); 
            ADIOI_Datatype_iscontig(types[n], &old_is_contig);

	    prev_index = *curr_index;
            if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
		ADIOI_Flatten(types[n], flat, st_offset+adds[n], curr_index);

	    if (prev_index == *curr_index) {
/* simplest case, current type is basic or contiguous types */
        /* By using ADIO_Offset we preserve +/- sign and 
           avoid >2G integer arithmetic problems */
		if (ints[1+n] > 0 || types[n] == MPI_LB || types[n] == MPI_UB) {
		    ADIO_Offset blocklength = ints[1+n];
		    j = *curr_index;
		    flat->indices[j] = st_offset + adds[n];
		    MPI_Type_size_x(types[n], &old_size);
		    flat->blocklens[j] = blocklength * old_size;
#ifdef FLATTEN_DEBUG
		    DBG_FPRINTF(stderr,"ADIOI_Flatten:: simple adds[%#X] "MPI_AINT_FMT_HEX_SPEC", flat->indices[%#llX] %#llX, flat->blocklens[%#llX] %#llX\n",n,adds[n],j, flat->indices[j], j, flat->blocklens[j]);
#endif
		    (*curr_index)++;
		}
	    }
	    else {
/* current type made up of noncontiguous derived types */

		j = *curr_index;
		num = *curr_index - prev_index;

/* The current type has to be replicated blocklens[n] times */
		MPI_Type_extent(types[n], &old_extent);
		for (m=1; m<ints[1+n]; m++) {
		    for (i=0; i<num; i++) {
			flat->indices[j] =
			    flat->indices[j-num] + ADIOI_AINT_CAST_TO_OFFSET old_extent;
			flat->blocklens[j] = flat->blocklens[j-num];
#ifdef FLATTEN_DEBUG
			DBG_FPRINTF(stderr,"ADIOI_Flatten:: simple old_extent "MPI_AINT_FMT_HEX_SPEC", flat->indices[%#llX] %#llX, flat->blocklens[%#llX] %#llX\n",old_extent,j, flat->indices[j], j, flat->blocklens[j]);
#endif
			j++;
		    }
		}
		*curr_index = j;
	    }
	}
 	break;

    case MPI_COMBINER_RESIZED: 
    #ifdef FLATTEN_DEBUG 
    DBG_FPRINTF(stderr,"ADIOI_Flatten:: MPI_COMBINER_RESIZED\n");
    #endif

    /* This is done similar to a type_struct with an lb, datatype, ub */

    /* handle the Lb */
	j = *curr_index;
	flat->indices[j] = st_offset + adds[0];
	/* this zero-length blocklens[] element, unlike eleswhere in the
	 * flattening code, is correct and is used to indicate a lower bound
	 * marker */
	flat->blocklens[j] = 0;

        #ifdef FLATTEN_DEBUG 
        DBG_FPRINTF(stderr,"ADIOI_Flatten:: simple adds[%#X] "MPI_AINT_FMT_HEX_SPEC", flat->indices[%#llX] %#llX, flat->blocklens[%#llX] %#llX\n",0,adds[0],j, flat->indices[j], j, flat->blocklens[j]);
        #endif

	(*curr_index)++;

	/* handle the datatype */

	MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
			      &old_ntypes, &old_combiner); 
	ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig)) {
	    ADIOI_Flatten(types[0], flat, st_offset+adds[0], curr_index);
	}
	else {
            /* current type is basic or contiguous */
	    j = *curr_index;
	    flat->indices[j] = st_offset;
	    MPI_Type_size_x(types[0], &old_size);
	    flat->blocklens[j] = old_size;

            #ifdef FLATTEN_DEBUG 
	    DBG_FPRINTF(stderr,"ADIOI_Flatten:: simple adds[%#X] "MPI_AINT_FMT_HEX_SPEC", flat->indices[%#llX] %#llX, flat->blocklens[%#llX] %#llX\n",0,adds[0],j, flat->indices[j], j, flat->blocklens[j]);
            #endif

	    (*curr_index)++;
	}

	/* take care of the extent as a UB */
	j = *curr_index;
	flat->indices[j] = st_offset + adds[0] + adds[1];
	/* again, zero-element ok: an upper-bound marker explicitly set by the
	 * constructor of this resized type */
	flat->blocklens[j] = 0;

        #ifdef FLATTEN_DEBUG 
        DBG_FPRINTF(stderr,"ADIOI_Flatten:: simple adds[%#X] "MPI_AINT_FMT_HEX_SPEC", flat->indices[%#llX] %#llX, flat->blocklens[%#llX] %#llX\n",1,adds[1],j, flat->indices[j], j, flat->blocklens[j]);
        #endif

	(*curr_index)++;

 	break;

    default:
	/* TODO: FIXME (requires changing prototypes to return errors...) */
	DBG_FPRINTF(stderr, "Error: Unsupported datatype passed to ADIOI_Flatten\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }

#ifndef MPISGI
/* There is a bug in SGI's impl. of MPI_Type_get_contents. It doesn't
   return new datatypes. Therefore no need to free. */
    for (i=0; i<ntypes; i++) {
 	MPI_Type_get_envelope(types[i], &old_nints, &old_nadds, &old_ntypes,
 			      &old_combiner);
 	if (old_combiner != MPI_COMBINER_NAMED) MPI_Type_free(types+i);
    }
#endif

    ADIOI_Free(ints);
    ADIOI_Free(adds);
    ADIOI_Free(types);

  #ifdef FLATTEN_DEBUG 
  DBG_FPRINTF(stderr,"ADIOI_Flatten:: return st_offset %#llX, curr_index %#llX\n",st_offset,*curr_index);
  #endif

}
Example #16
0
File: flatten.c Project: ORNL/ompi
/* ADIOI_Count_contiguous_blocks
 *
 * Returns number of contiguous blocks in type, and also updates
 * curr_index to reflect the space for the additional blocks.
 *
 * ASSUMES THAT TYPE IS NOT A BASIC!!!
 */
MPI_Count ADIOI_Count_contiguous_blocks(MPI_Datatype datatype, MPI_Count *curr_index)
{
    int i, n;
    MPI_Count count=0, prev_index, num, basic_num;
    int top_count, combiner, old_combiner, old_is_contig;
    int nints, nadds, ntypes, old_nints, old_nadds, old_ntypes;
    int *ints;
    MPI_Aint *adds; /* Make no assumptions about +/- sign on these */
    MPI_Datatype *types;

    MPI_Type_get_envelope(datatype, &nints, &nadds, &ntypes, &combiner);
    ints = (int *) ADIOI_Malloc((nints+1)*sizeof(int));
    adds = (MPI_Aint *) ADIOI_Malloc((nadds+1)*sizeof(MPI_Aint));
    types = (MPI_Datatype *) ADIOI_Malloc((ntypes+1)*sizeof(MPI_Datatype));
    MPI_Type_get_contents(datatype, nints, nadds, ntypes, ints, adds, types);

    switch (combiner) {
#ifdef MPIIMPL_HAVE_MPI_COMBINER_DUP
    case MPI_COMBINER_DUP:
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
                              &old_ntypes, &old_combiner); 
	ADIOI_Datatype_iscontig(types[0], &old_is_contig);
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
	    count = ADIOI_Count_contiguous_blocks(types[0], curr_index);
	else {
		count = 1;
		(*curr_index)++;
	}
        break;
#endif
#ifdef MPIIMPL_HAVE_MPI_COMBINER_SUBARRAY
    case MPI_COMBINER_SUBARRAY:
        {
	    int dims = ints[0];
	    MPI_Datatype stype;

	    ADIO_Type_create_subarray(dims,
				      &ints[1],        /* sizes */
				      &ints[dims+1],   /* subsizes */
				      &ints[2*dims+1], /* starts */
				      ints[3*dims+1],  /* order */
				      types[0],        /* type */
				      &stype);
	    count = ADIOI_Count_contiguous_blocks(stype, curr_index);
	    /* curr_index will have already been updated; just pass
	     * count back up.
	     */
	    MPI_Type_free(&stype);

	}
	break;
#endif
#ifdef MPIIMPL_HAVE_MPI_COMBINER_DARRAY
    case MPI_COMBINER_DARRAY:
	{
	    int dims = ints[2];
	    MPI_Datatype dtype;

	    ADIO_Type_create_darray(ints[0],         /* size */
				    ints[1],         /* rank */
				    dims,
				    &ints[3],        /* gsizes */
				    &ints[dims+3],   /* distribs */
				    &ints[2*dims+3], /* dargs */
				    &ints[3*dims+3], /* psizes */
				    ints[4*dims+3],  /* order */
				    types[0],
				    &dtype);
	    count = ADIOI_Count_contiguous_blocks(dtype, curr_index);
	    /* curr_index will have already been updated; just pass
	     * count back up.
	     */
	    MPI_Type_free(&dtype);
	}
	break;
#endif
    case MPI_COMBINER_CONTIGUOUS:
        top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
                              &old_ntypes, &old_combiner); 
	ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
	    count = ADIOI_Count_contiguous_blocks(types[0], curr_index);
	else count = 1;

	if (prev_index == *curr_index) 
/* simplest case, made up of basic or contiguous types */
	    (*curr_index)++;
	else {
/* made up of noncontiguous derived types */
	    num = *curr_index - prev_index;
	    count *= top_count;
	    *curr_index += (top_count - 1)*num;
	}
	break;

    case MPI_COMBINER_VECTOR:
    case MPI_COMBINER_HVECTOR:
    case MPI_COMBINER_HVECTOR_INTEGER: 
        top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
                              &old_ntypes, &old_combiner); 
	ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
	    count = ADIOI_Count_contiguous_blocks(types[0], curr_index);
	else count = 1;

	if (prev_index == *curr_index) {
/* simplest case, vector of basic or contiguous types */
	    count = top_count;
	    *curr_index += count;
	}
	else {
/* vector of noncontiguous derived types */
	    num = *curr_index - prev_index;

/* The noncontiguous types have to be replicated blocklen times
   and then strided. */
	    count *= ints[1] * top_count;

/* First one */
	    *curr_index += (ints[1] - 1)*num;

/* Now repeat with strides. */
	    num = *curr_index - prev_index;
	    *curr_index += (top_count - 1)*num;
	}
	break;

    case MPI_COMBINER_INDEXED: 
    case MPI_COMBINER_HINDEXED:
    case MPI_COMBINER_HINDEXED_INTEGER:
        top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
                              &old_ntypes, &old_combiner); 
	ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
	    count = ADIOI_Count_contiguous_blocks(types[0], curr_index);
	else count = 1;

	if (prev_index == *curr_index) {
/* simplest case, indexed type made up of basic or contiguous types */
	    count = top_count;
	    *curr_index += count;
	}
	else {
/* indexed type made up of noncontiguous derived types */
	    basic_num = *curr_index - prev_index;

/* The noncontiguous types have to be replicated blocklens[i] times
   and then strided. */
	    *curr_index += (ints[1]-1) * basic_num;
	    count *= ints[1];

/* Now repeat with strides. */
	    for (i=1; i<top_count; i++) {
		count += ints[1+i] * basic_num;
		*curr_index += ints[1+i] * basic_num;
	    }
	}
	break;

#if defined HAVE_DECL_MPI_COMBINER_HINDEXED_BLOCK && HAVE_DECL_MPI_COMBINER_HINDEXED_BLOCK
    case MPI_COMBINER_HINDEXED_BLOCK:
#endif
    case MPI_COMBINER_INDEXED_BLOCK:
        top_count = ints[0];
        MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
                              &old_ntypes, &old_combiner); 
	ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	prev_index = *curr_index;
	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
	    count = ADIOI_Count_contiguous_blocks(types[0], curr_index);
	else count = 1;

	if (prev_index == *curr_index) {
/* simplest case, indexed type made up of basic or contiguous types */
	    count = top_count;
	    *curr_index += count;
	}
	else {
/* indexed type made up of noncontiguous derived types */
	    basic_num = *curr_index - prev_index;

/* The noncontiguous types have to be replicated blocklens[i] times
   and then strided. */
	    *curr_index += (ints[1]-1) * basic_num;
	    count *= ints[1];

/* Now repeat with strides. */
	    *curr_index += (top_count-1) * count;
	    count *= top_count;
	}
	break;

    case MPI_COMBINER_STRUCT: 
    case MPI_COMBINER_STRUCT_INTEGER: 
        top_count = ints[0];
	count = 0;
	for (n=0; n<top_count; n++) {
            MPI_Type_get_envelope(types[n], &old_nints, &old_nadds,
                                  &old_ntypes, &old_combiner); 
	    ADIOI_Datatype_iscontig(types[n], &old_is_contig);

	    prev_index = *curr_index;
	    if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig))
	    count += ADIOI_Count_contiguous_blocks(types[n], curr_index);

	    if (prev_index == *curr_index) {
/* simplest case, current type is basic or contiguous types */
		count++;
		(*curr_index)++;
	    }
	    else {
/* current type made up of noncontiguous derived types */
/* The current type has to be replicated blocklens[n] times */

		num = *curr_index - prev_index;
		count += (ints[1+n]-1)*num;
		(*curr_index) += (ints[1+n]-1)*num;
	    }
	}
	break;

    case MPI_COMBINER_RESIZED: 
	/* treat it as a struct with lb, type, ub */

	/* add 2 for lb and ub */
	(*curr_index) += 2;
	count += 2;

	/* add for datatype */ 
	MPI_Type_get_envelope(types[0], &old_nints, &old_nadds,
                                  &old_ntypes, &old_combiner); 
	ADIOI_Datatype_iscontig(types[0], &old_is_contig);

	if ((old_combiner != MPI_COMBINER_NAMED) && (!old_is_contig)) {
	    count += ADIOI_Count_contiguous_blocks(types[0], curr_index);
	}
	else {
        /* basic or contiguous type */
	    count++;
	    (*curr_index)++;
	}
	break;

    default:
	/* TODO: FIXME */
	DBG_FPRINTF(stderr, "Error: Unsupported datatype passed to ADIOI_Count_contiguous_blocks, combiner = %d\n", combiner);
	MPI_Abort(MPI_COMM_WORLD, 1);
    }

#ifndef MPISGI
/* There is a bug in SGI's impl. of MPI_Type_get_contents. It doesn't
   return new datatypes. Therefore no need to free. */
    for (i=0; i<ntypes; i++) {
 	MPI_Type_get_envelope(types[i], &old_nints, &old_nadds, &old_ntypes,
 			      &old_combiner);
 	if (old_combiner != MPI_COMBINER_NAMED) MPI_Type_free(types+i);
    }
#endif

    ADIOI_Free(ints);
    ADIOI_Free(adds);
    ADIOI_Free(types);
    return count;
}
Example #17
0
void ADIOI_SCI_Fcntl(ADIO_File fd, int flag, ADIO_Fcntl_t *fcntl_struct, int *error_code)
{
    MPI_Datatype copy_etype, copy_filetype;
    int combiner, i, j, k, filetype_is_contig, ntimes, err;
    ADIOI_Flatlist_node *flat_file;
    ADIO_Offset curr_fsize, alloc_size, size, len, done;
    ADIO_Status status;
    char *buf;
#ifndef PRINT_ERR_MSG
    static char myname[] = "ADIOI_SCI_FCNTL";
#endif

    switch (flag) {
    case ADIO_FCNTL_SET_VIEW:
        /* free copies of old etypes and filetypes and delete flattened 
           version of filetype if necessary */

	MPI_Type_get_envelope(fd->etype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) 
	    MPI_Type_free(&(fd->etype));

	ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (!filetype_is_contig) 
	    ADIOI_Delete_flattened(fd->filetype);

	MPI_Type_get_envelope(fd->filetype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) 
	    MPI_Type_free(&(fd->filetype));

	/* set new info */
	ADIO_SetInfo(fd, fcntl_struct->info, &err);

        /* set new etypes and filetypes */

	MPI_Type_get_envelope(fcntl_struct->etype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) {
	    fd->etype = fcntl_struct->etype;
	} else {
	    MPI_Type_contiguous(1, fcntl_struct->etype, &copy_etype);
	    MPI_Type_commit(&copy_etype);
	    fd->etype = copy_etype;
	}
	MPI_Type_get_envelope(fcntl_struct->filetype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) {
	    fd->filetype = fcntl_struct->filetype;
	} else {
	    MPI_Type_contiguous(1, fcntl_struct->filetype, &copy_filetype);
	    MPI_Type_commit(&copy_filetype);
	    fd->filetype = copy_filetype;
	    ADIOI_Flatten_datatype(fd->filetype);
            /* this function will not flatten the filetype if it turns out
               to be all contiguous. */
	}

	MPI_Type_size(fd->etype, &(fd->etype_size));
	fd->disp = fcntl_struct->disp;

        /* reset MPI-IO file pointer to point to the first byte that can
           be accessed in this view. */

        ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (filetype_is_contig) {
	    fd->fp_ind = fcntl_struct->disp;
	} else {
	    flat_file = ADIOI_Flatlist;
	    while (flat_file->type != fd->filetype) 
		flat_file = flat_file->next;
	    for (i=0; i<flat_file->count; i++) {
		if (flat_file->blocklens[i]) {
		    fd->fp_ind = fcntl_struct->disp + flat_file->indices[i];
		    break;
		}
	    }
	}
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_GET_FSIZE:
	break;

    case ADIO_FCNTL_SET_DISKSPACE:
	/* will be called by one process only */
	/* On file systems with no preallocation function, I have to 
           explicitly write 
           to allocate space. Since there could be holes in the file, 
           I need to read up to the current file size, write it back, 
           and then write beyond that depending on how much 
           preallocation is needed.
           read/write in sizes of no more than ADIOI_PREALLOC_BUFSZ */

	break;

    case ADIO_FCNTL_SET_IOMODE:
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_ATOMICITY:
	fd->atomicity = (fcntl_struct->atomicity == 0) ? 0 : 1;
	*error_code = MPI_SUCCESS;
	break;

    default:
	FPRINTF(stderr, "Unknown flag passed to ADIOI_SCI_Fcntl\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
}
Example #18
0
void ADIOI_SVM_Fcntl(ADIO_File fd, int flag, ADIO_Fcntl_t *fcntl_struct, int *error_code)
{
    MPI_Datatype copy_etype, copy_filetype;
    int combiner, i, j, k, filetype_is_contig, ntimes, err;
    ADIOI_Flatlist_node *flat_file;
    ADIO_Offset curr_fsize, alloc_size, size, len, done;
    ADIO_Status status;
    char *buf;
    /* added by RAY */
    FileTable  files_local;
    /* end RAY */
    

    /*printf("Entering ADIOI_SVM_Fcntl().\n");*/
    
    switch(flag) {
    case ADIO_FCNTL_SET_VIEW:
        /* free copies of old etypes and filetypes and delete flattened 
           version of filetype if necessary */

	MPI_Type_get_envelope(fd->etype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->etype));

	ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (!filetype_is_contig) ADIOI_Delete_flattened(fd->filetype);

	MPI_Type_get_envelope(fd->filetype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->filetype));

	/* set new info */
	ADIO_SetInfo(fd, fcntl_struct->info, &err);

        /* set new etypes and filetypes */

	MPI_Type_get_envelope(fcntl_struct->etype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) fd->etype = fcntl_struct->etype;
	else {
	    MPI_Type_contiguous(1, fcntl_struct->etype, &copy_etype);
	    MPI_Type_commit(&copy_etype);
	    fd->etype = copy_etype;
	}
	MPI_Type_get_envelope(fcntl_struct->filetype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) 
	    fd->filetype = fcntl_struct->filetype;
	else {
	    MPI_Type_contiguous(1, fcntl_struct->filetype, &copy_filetype);
	    MPI_Type_commit(&copy_filetype);
	    fd->filetype = copy_filetype;
	    ADIOI_Flatten_datatype(fd->filetype);
            /* this function will not flatten the filetype if it turns out
               to be all contiguous. */
	}

	MPI_Type_size(fd->etype, &(fd->etype_size));
	fd->disp = fcntl_struct->disp;

        /* reset MPI-IO file pointer to point to the first byte that can
           be accessed in this view. */

        ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (filetype_is_contig) fd->fp_ind = fcntl_struct->disp;
	else {
	    flat_file = ADIOI_Flatlist;
	    while (flat_file->type != fd->filetype) 
		flat_file = flat_file->next;
	    for (i=0; i<flat_file->count; i++) {
		if (flat_file->blocklens[i]) {
		    fd->fp_ind = fcntl_struct->disp + flat_file->indices[i];
		    break;
		}
	    }
	}
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_GET_FSIZE:
    	/* changed by RAY */
	/*fcntl_struct->fsize = lseek(fd->fd_sys, 0, SEEK_END);*/
	files_local = (FileTable)NULL;
	if (ADIOI_SVM_Lookup_fd(fd->fd_sys,&files_local)==MPI_SUCCESS)
	   fcntl_struct->fsize = files_local->size;
	else
	   fcntl_struct->fsize = -1;
	/*if (fd->fp_sys_posn != -1) */
	     /*lseek(fd->fd_sys, fd->fp_sys_posn, SEEK_SET);*/
	free(files_local);
	/* end RAY */
	*error_code = (fcntl_struct->fsize == -1) ? MPI_ERR_UNKNOWN : MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_DISKSPACE:
	/* will be called by one process only */
	/* added by RAY */
	/* !!! Not with AD_SVM - here every process calls it !!! */
	/* end RAY */
	/* On file systems with no preallocation function, I have to 
           explicitly write 
           to allocate space. Since there could be holes in the file, 
           I need to read up to the current file size, write it back, 
           and then write beyond that depending on how much 
           preallocation is needed.
           read/write in sizes of no more than ADIOI_PREALLOC_BUFSZ */

	/* changed by RAY */
	/*curr_fsize = lseek(fd->fd_sys, 0, SEEK_END);*/
	files_local = (FileTable)NULL;
	if (ADIOI_SVM_Lookup_fd(fd->fd_sys,&files_local)==MPI_SUCCESS)
	   curr_fsize = files_local->size;
	else
	   curr_fsize = -1;

	free(files_local);

	if (curr_fsize < fcntl_struct->diskspace)
	   ADIOI_SVM_Resize(fd,fcntl_struct->diskspace,error_code);
		
	if (fd->fp_sys_posn != -1)
	   /*lseek(fd->fd_sys, fd->fp_sys_posn, SEEK_SET);*/
	   ADIOI_SVM_Lseek(fd,fd->fp_sys_posn);
	/* end RAY */ 
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_IOMODE:
        /* for implementing PFS I/O modes. will not occur in MPI-IO
           implementation.*/
	if (fd->iomode != fcntl_struct->iomode) {
	    fd->iomode = fcntl_struct->iomode;
	    MPI_Barrier(MPI_COMM_WORLD);
	}
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_ATOMICITY:
	fd->atomicity = (fcntl_struct->atomicity == 0) ? 0 : 1;
	*error_code = MPI_SUCCESS;
	break;

    default:
	printf("Unknown flag passed to ADIOI_SVM_Fcntl\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
}
Example #19
0
/* indexed_of_vectors_test()
 *
 * Builds an indexed type of vectors of ints.
 *
 * MPICH1 fails this test because it converts the vectors into hvectors.
 *
 * Returns the number of errors encountered.
 */
int indexed_of_vectors_test(void)
{
    MPI_Datatype inner_vector, inner_vector_copy;
    MPI_Datatype outer_indexed;
    
    int i_count = 3, i_blocklengths[3] = { 3, 2, 1 };
    int i_displacements[3] = { 10, 20, 30 };

    int nints, nadds, ntypes, combiner, *ints;
    MPI_Aint *adds = NULL;
    MPI_Datatype *types;

    int err, errs = 0;

    /* set up type */
    err = MPI_Type_vector(2,
			  1,
			  2,
			  MPI_INT,
			  &inner_vector);
    if (err != MPI_SUCCESS) {
	if (verbose) fprintf(stderr, 
			     "error in MPI call; aborting after %d errors\n",
			     errs+1);
	return errs+1;
    }

    err = MPI_Type_indexed(i_count,
			   i_blocklengths,
			   i_displacements,
			   inner_vector,
			   &outer_indexed);
    if (err != MPI_SUCCESS) {
	if (verbose) fprintf(stderr, 
			     "error in MPI call; aborting after %d errors\n",
			     errs+1);
	return errs+1;
    }

    /* decode outer vector (get envelope, then contents) */
    err = MPI_Type_get_envelope(outer_indexed,
				&nints,
				&nadds,
				&ntypes,
				&combiner);
    if (err != MPI_SUCCESS) {
	if (verbose) fprintf(stderr, 
			     "error in MPI call; aborting after %d errors\n",
			     errs+1);
	return errs+1;
    }

    if (nints != 7) errs++;
    if (nadds != 0) errs++;
    if (ntypes != 1) errs++;
    if (combiner != MPI_COMBINER_INDEXED) errs++;

    if (verbose) {
        if (nints != 7) fprintf(stderr, "nints = %d; should be 7\n", nints);
	if (nadds != 0) fprintf(stderr, "nadds = %d; should be 0\n", nadds);
	if (ntypes != 1) fprintf(stderr, "ntypes = %d; should be 1\n", ntypes);
	if (combiner != MPI_COMBINER_INDEXED)
	    fprintf(stderr, "combiner = %s; should be indexed\n",
		    combiner_to_string(combiner));
    }

    if (errs) {
	if (verbose) fprintf(stderr, "aborting after %d errors\n", errs);
	return errs;
    }

    ints = malloc(nints * sizeof(*ints));
    if (nadds) adds = malloc(nadds * sizeof(*adds));
    types = malloc(ntypes * sizeof(*types));

    /* get contents of outer vector */
    err = MPI_Type_get_contents(outer_indexed,
				nints,
				nadds,
				ntypes,
				ints,
				adds,
				types);

    if (ints[0] != i_count) errs++;
    if (ints[1] != i_blocklengths[0]) errs++;
    if (ints[2] != i_blocklengths[1]) errs++;
    if (ints[3] != i_blocklengths[2]) errs++;
    if (ints[4] != i_displacements[0]) errs++;
    if (ints[5] != i_displacements[1]) errs++;
    if (ints[6] != i_displacements[2]) errs++;

    if (verbose) {
	if (ints[0] != i_count) 
	    fprintf(stderr, "count = %d; should be %d\n", ints[0], i_count);
	if (ints[1] != i_blocklengths[0]) 
	    fprintf(stderr, "blocklength[0] = %d; should be %d\n", ints[1], i_blocklengths[0]);
	if (ints[2] != i_blocklengths[1]) 
	    fprintf(stderr, "blocklength[1] = %d; should be %d\n", ints[2], i_blocklengths[1]);
	if (ints[3] != i_blocklengths[2]) 
	    fprintf(stderr, "blocklength[2] = %d; should be %d\n", ints[3], i_blocklengths[2]);
	if (ints[4] != i_displacements[0]) 
	    fprintf(stderr, "displacement[0] = %d; should be %d\n", ints[4], i_displacements[0]);
	if (ints[5] != i_displacements[1]) 
	    fprintf(stderr, "displacement[1] = %d; should be %d\n", ints[5], i_displacements[1]);
	if (ints[6] != i_displacements[2]) 
	    fprintf(stderr, "displacement[2] = %d; should be %d\n", ints[6], i_displacements[2]);
    }

    if (errs) {
	if (verbose) fprintf(stderr, "aborting after %d errors\n", errs);
	return errs;
    }

    inner_vector_copy = types[0];
    free(ints);
    if (nadds) free(adds);
    free(types);

    /* decode inner vector */
    err = MPI_Type_get_envelope(inner_vector_copy,
				&nints,
				&nadds,
				&ntypes,
				&combiner);
    if (err != MPI_SUCCESS) {
	if (verbose) fprintf(stderr, 
			     "error in MPI call; aborting after %d errors\n",
			     errs+1);
	return errs+1;
    }

    if (nints != 3) errs++;
    if (nadds != 0) errs++;
    if (ntypes != 1) errs++;
    if (combiner != MPI_COMBINER_VECTOR) errs++;

    if (verbose) {
	if (nints != 3) fprintf(stderr, 
				"inner vector nints = %d; should be 3\n",
				nints);
	if (nadds != 0) fprintf(stderr, 
				"inner vector nadds = %d; should be 0\n",
				nadds);
	if (ntypes != 1) fprintf(stderr, 
				 "inner vector ntypes = %d; should be 1\n",
				 ntypes);
	if (combiner != MPI_COMBINER_VECTOR)
	    fprintf(stderr, "inner vector combiner = %s; should be vector\n",
		    combiner_to_string(combiner));
    }
    if (errs) {
	if (verbose) fprintf(stderr, "aborting after %d errors\n", errs);
	return errs;
    }

    ints = malloc(nints * sizeof(*ints));
    if (nadds) adds = malloc(nadds * sizeof(*adds));
    types = malloc(ntypes * sizeof(*types));

    err = MPI_Type_get_contents(inner_vector_copy,
				nints,
				nadds,
				ntypes,
				ints,
				adds,
				types);

    if (ints[0] != 2) errs++;
    if (ints[1] != 1) errs++;
    if (ints[2] != 2) errs++;

    if (verbose) {
	if (ints[0] != 2) fprintf(stderr, 
				  "inner vector count = %d; should be 2\n",
				  ints[0]);
	if (ints[1] != 1) fprintf(stderr,
				  "inner vector blocklength = %d; should be 1\n",
				  ints[1]);
	if (ints[2] != 2) fprintf(stderr, 
				  "inner vector stride = %d; should be 2\n",
				  ints[2]);
    }
    if (errs) {
	if (verbose) fprintf(stderr, "aborting after %d errors\n", errs);
	return errs;
    }

    free(ints);
    if (nadds) free(adds);
    free(types);

    MPI_Type_free( &inner_vector_copy );
    MPI_Type_free( &inner_vector );
    MPI_Type_free( &outer_indexed );

    return 0;
}
Example #20
0
/*@
    MPI_File_get_view - Returns the file view

Input Parameters:
. fh - file handle (handle)

Output Parameters:
. disp - displacement (nonnegative integer)
. etype - elementary datatype (handle)
. filetype - filetype (handle)
. datarep - data representation (string)

.N fortran
@*/
int MPI_File_get_view(MPI_File mpi_fh,
		      MPI_Offset *disp,
		      MPI_Datatype *etype,
		      MPI_Datatype *filetype,
		      char *datarep)
{
    int error_code;
    ADIO_File fh;
    static char myname[] = "MPI_FILE_GET_VIEW";
    int i, j, k, combiner;
    MPI_Datatype copy_etype, copy_filetype;


    MPID_CS_ENTER();
    MPIR_Nest_incr();

    fh = MPIO_File_resolve(mpi_fh);

    /* --BEGIN ERROR HANDLING-- */
    MPIO_CHECK_FILE_HANDLE(fh, myname, error_code);

    if (datarep <= (char *) 0)
    {
	error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
					  myname, __LINE__, MPI_ERR_ARG, 
					  "**iodatarepnomem", 0);
	error_code = MPIO_Err_return_file(fh, error_code);
	goto fn_exit;
    }
    /* --END ERROR HANDLING-- */

    *disp = fh->disp;
    strcpy(datarep, "native");

    MPI_Type_get_envelope(fh->etype, &i, &j, &k, &combiner);
    if (combiner == MPI_COMBINER_NAMED) *etype = fh->etype;
    else {
        MPIR_Nest_incr();
        MPI_Type_contiguous(1, fh->etype, &copy_etype);
        MPIR_Nest_decr();

        MPIR_Nest_incr();
        MPI_Type_commit(&copy_etype);
        MPIR_Nest_decr();
        *etype = copy_etype;
    }
    MPI_Type_get_envelope(fh->filetype, &i, &j, &k, &combiner);
    if (combiner == MPI_COMBINER_NAMED) *filetype = fh->filetype;
    else {
        MPI_Type_contiguous(1, fh->filetype, &copy_filetype);

        MPI_Type_commit(&copy_filetype);
        *filetype = copy_filetype;
    }

fn_exit:
    MPIR_Nest_decr();
    MPID_CS_EXIT();

    return MPI_SUCCESS;
}
Example #21
0
int convert_mpi_pvfs2_dtype(MPI_Datatype *mpi_dtype,
			    PVFS_Request *pvfs_dtype)
{
    int num_int = -1, num_addr = -1, num_dtype = -1,
	combiner = -1, i = -1, ret = -1, leaf = -1;
    int *arr_int = NULL;
    MPI_Aint *arr_addr = NULL;
    MPI_Datatype *arr_dtype = NULL;
    PVFS_Request *old_pvfs_dtype = NULL;
    PVFS_Request *old_pvfs_dtype_arr = NULL;
    int arr_count = -1;
    PVFS_size *pvfs_arr_disp = NULL;
    int *pvfs_arr_len = NULL;

    MPI_Type_get_envelope(*mpi_dtype,
			  &num_int,
			  &num_addr,
			  &num_dtype,
			  &combiner);

    /* Depending on type of datatype do the following
     * operations */

    if (combiner == MPI_COMBINER_NAMED)
    {
	convert_named(mpi_dtype, pvfs_dtype, combiner);
	return 1;
    }

    /* Allocate space for the arrays necessary for
     * MPI_Type_get_contents */

    if ((arr_int = ADIOI_Malloc(sizeof(int)*num_int)) == NULL)
    {
	fprintf(stderr, "Failed to allocate array_int\n");
	return -1;
    }
    if ((arr_addr = ADIOI_Malloc(sizeof(int)*num_addr)) == NULL)
    {
	ADIOI_Free(arr_int);
	fprintf(stderr, "Failed to allocate array_addr\n");
	return -1;
    }
    if ((arr_dtype = ADIOI_Malloc(sizeof(MPI_Datatype)*num_dtype)) == NULL)
    {
	ADIOI_Free(arr_int);
	ADIOI_Free(arr_addr);
	fprintf(stderr, "Failed to allocate array_dtypes\n");
	return -1;
    }

    MPI_Type_get_contents(*mpi_dtype,
			  num_int,
			  num_addr,
			  num_dtype,
			  arr_int,
			  arr_addr,
			  arr_dtype);

    /* If it's not a predefined datatype, it is either a
     * derived datatype or a structured datatype */

    if (combiner != MPI_COMBINER_STRUCT)
    {
	if ((old_pvfs_dtype = ADIOI_Malloc(sizeof(PVFS_Request))) == NULL)
	    fprintf(stderr, "convert_mpi_pvfs2_dtype: "
		    "Failed to allocate PVFS_Request\n");
	switch (combiner)
	{
	    case MPI_COMBINER_CONTIGUOUS:
		leaf = convert_mpi_pvfs2_dtype(&arr_dtype[0], old_pvfs_dtype);
		ret = PVFS_Request_contiguous(arr_int[0],
					      *old_pvfs_dtype, pvfs_dtype);
		break;
	    case MPI_COMBINER_VECTOR:
		leaf = convert_mpi_pvfs2_dtype(&arr_dtype[0], old_pvfs_dtype);
		ret = PVFS_Request_vector(arr_int[0], arr_int[1],
					  arr_int[2], *old_pvfs_dtype,
					  pvfs_dtype);
		break;
	    case MPI_COMBINER_HVECTOR:
		leaf = convert_mpi_pvfs2_dtype(&arr_dtype[0], old_pvfs_dtype);
		ret = PVFS_Request_hvector(arr_int[0], arr_int[1],
					   arr_addr[0], *old_pvfs_dtype,
					   pvfs_dtype);
		break;
		/* Both INDEXED and HINDEXED types require PVFS_size
		 * address arrays.  Therefore, we need to copy and
		 * convert the data from MPI_get_contents() into
		 * a PVFS_size buffer */
	    case MPI_COMBINER_INDEXED:
		leaf = convert_mpi_pvfs2_dtype(&arr_dtype[0], old_pvfs_dtype);
		if ((pvfs_arr_disp =
			    ADIOI_Malloc(arr_int[0]*sizeof(PVFS_size))) == 0)
		{
		    fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			    "Failed to allocate pvfs_arr_disp\n");
		}
		for (i = 0; i < arr_int[0]; i++)
		{
		    pvfs_arr_disp[i] =
			(PVFS_size) arr_int[arr_int[0]+1+i];
		}
		ret = PVFS_Request_indexed(arr_int[0], &arr_int[1],
				     pvfs_arr_disp,
				     *old_pvfs_dtype, pvfs_dtype);
		ADIOI_Free(pvfs_arr_disp);
		break;
	    case MPI_COMBINER_HINDEXED:
		leaf = convert_mpi_pvfs2_dtype(&arr_dtype[0], old_pvfs_dtype);
		if ((pvfs_arr_disp =
			    ADIOI_Malloc(arr_int[0]*sizeof(PVFS_size))) == 0)
		{
		    fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			    "Failed to allocate pvfs_arr_disp\n");
		}
		for (i = 0; i < arr_int[0]; i++)
		{
		    pvfs_arr_disp[i] =
			(PVFS_size) arr_addr[i];
		}
		ret = PVFS_Request_hindexed(arr_int[0], &arr_int[1],
				      (int64_t *)&arr_addr[0],
				      *old_pvfs_dtype, pvfs_dtype);
		ADIOI_Free(pvfs_arr_disp);
		break;
	    case MPI_COMBINER_DUP:
                leaf = convert_mpi_pvfs2_dtype(&arr_dtype[0], old_pvfs_dtype);
		ret = PVFS_Request_contiguous(1,
					      *old_pvfs_dtype, pvfs_dtype);

                break;
	    case MPI_COMBINER_INDEXED_BLOCK:
		/* No native PVFS2 support for this operation currently */
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"INDEXED_BLOCK is unsupported\n");
		break;
	    case MPI_COMBINER_HINDEXED_BLOCK:
		/* No native PVFS2 support for this operation currently */
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"HINDEXED_BLOCK is unsupported\n");
		break;
	    case MPI_COMBINER_HINDEXED_INTEGER:
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"HINDEXED_INTEGER is unsupported\n");
		break;
	    case MPI_COMBINER_STRUCT_INTEGER:
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"STRUCT_INTEGER is unsupported\n");
		break;
	    case MPI_COMBINER_SUBARRAY:
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"SUBARRAY is unsupported\n");
		break;
	    case MPI_COMBINER_DARRAY:
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"DARRAY is unsupported\n");
		break;
	    case MPI_COMBINER_F90_REAL:
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"F90_REAL is unsupported\n");
		break;
	    case MPI_COMBINER_F90_COMPLEX:
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"F90_COMPLEX is unsupported\n");
		break;
	    case MPI_COMBINER_F90_INTEGER:
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"F90_INTEGER is unsupported\n");
		break;
	    case MPI_COMBINER_RESIZED:
		ADIOI_Free(old_pvfs_dtype);
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"RESIZED is unsupported\n");
		break;
	    default:
		break;
	}

	if (ret != 0)
	    fprintf(stderr, "Error in PVFS_Request_* "
		    "for a derived datatype\n");

#ifdef DEBUG_DTYPE
	print_dtype_info(combiner,
			 num_int,
			 num_addr,
			 num_dtype,
			 arr_int,
			 arr_addr,
			 arr_dtype);
#endif

	if (leaf != 1 && combiner != MPI_COMBINER_DUP)
	    MPI_Type_free(&arr_dtype[0]);

	ADIOI_Free(arr_int);
	ADIOI_Free(arr_addr);
	ADIOI_Free(arr_dtype);

	PVFS_Request_free(old_pvfs_dtype);
	ADIOI_Free(old_pvfs_dtype);

	return ret;
    }
    else /* MPI_COMBINER_STRUCT */
    {
	MPI_Aint mpi_lb = -1, mpi_extent = -1;
	PVFS_offset pvfs_lb = -1;
	PVFS_size pvfs_extent = -1;
	int has_lb_ub = 0;

	/* When converting into a PVFS_Request_struct, we no longer
	 * can use MPI_LB and MPI_UB.  Therfore, we have to do the
	 * following.
	 * We simply ignore all the MPI_LB and MPI_UB types and
	 * get the lb and extent and pass it on through a
	 * PVFS resized_req */

	arr_count = 0;
	for (i = 0; i < arr_int[0]; i++)
	{
	    if (arr_dtype[i] != MPI_LB &&
		arr_dtype[i] != MPI_UB)
	    {
		arr_count++;
	    }
	}

	if (arr_int[0] != arr_count)
	{
	    MPI_Type_get_extent(*mpi_dtype, &mpi_lb, &mpi_extent);
	    pvfs_lb = mpi_lb;
	    pvfs_extent = mpi_extent;
	    if ((pvfs_arr_len = ADIOI_Malloc(arr_count*sizeof(int)))
		== NULL)
	    {
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"Failed to allocate pvfs_arr_len\n");
	    }
	    has_lb_ub = 1;
	}

	if ((old_pvfs_dtype_arr
	     = ADIOI_Malloc(arr_count*sizeof(PVFS_Request))) == NULL)
	    fprintf(stderr, "convert_mpi_pvfs2_dtype: "
		    "Failed to allocate PVFS_Requests\n");

	if ((pvfs_arr_disp = ADIOI_Malloc(arr_count*sizeof(PVFS_size)))
	    == NULL)
	{
	    fprintf(stderr, "convert_mpi_pvfs2_dtype: "
		    "Failed to allocate pvfs_arr_disp\n");
	}

	arr_count = 0;
	for (i = 0; i < arr_int[0]; i++)
	{
	    if (arr_dtype[i] != MPI_LB &&
		arr_dtype[i] != MPI_UB)
	    {
		leaf = convert_mpi_pvfs2_dtype(
		    &arr_dtype[i], &old_pvfs_dtype_arr[arr_count]);
		if (leaf != 1)
		    MPI_Type_free(&arr_dtype[i]);
		pvfs_arr_disp[arr_count] =
		    (PVFS_size) arr_addr[i];
		if (has_lb_ub)
		{
		    pvfs_arr_len[arr_count] =
			arr_int[i+1];
		}
		arr_count++;
	    }
	}

	/* If a MPI_UB or MPI_LB did exist, we have to
	 * resize the datatype */
	if (has_lb_ub)
	{
	    PVFS_Request *tmp_pvfs_dtype = NULL;
	    if ((tmp_pvfs_dtype = ADIOI_Malloc(sizeof(PVFS_Request))) == NULL)
		fprintf(stderr, "convert_mpi_pvfs2_dtype: "
			"Failed to allocate PVFS_Request\n");

	    ret = PVFS_Request_struct(arr_count, pvfs_arr_len,
				      pvfs_arr_disp,
				      old_pvfs_dtype_arr, tmp_pvfs_dtype);
	    if (ret != 0)
		fprintf(stderr, "Error in PVFS_Request_struct\n");

	    arr_count = 0;
	    for (i = 0; i < arr_int[0]; i++)
	    {
		if (arr_dtype[i] != MPI_LB &&
		    arr_dtype[i] != MPI_UB)
		{
		    PVFS_Request_free(&old_pvfs_dtype_arr[arr_count]);
		    arr_count++;
		}
	    }

#ifdef DEBUG_DTYPE
	    fprintf(stderr, "STRUCT(WITHOUT %d LB or UB)(%d,[",
		    arr_int[0] - arr_count, arr_count);
	    for (i = 0; i < arr_count; i++)
		fprintf(stderr, "(%d,%Ld) ",
			pvfs_arr_len[i],
			pvfs_arr_disp[i]);
	    fprintf(stderr, "]\n");
	    fprintf(stderr, "RESIZED(LB = %Ld, EXTENT = %Ld)\n",
		    pvfs_lb, pvfs_extent);
#endif
	    ret = PVFS_Request_resized(*tmp_pvfs_dtype,
				       pvfs_lb, pvfs_extent, pvfs_dtype);
	    if (ret != 0)
		fprintf(stderr, "Error in PVFS_Request_resize\n");

	    PVFS_Request_free(tmp_pvfs_dtype);
	    ADIOI_Free(tmp_pvfs_dtype);
	}
	else /* No MPI_LB or MPI_UB datatypes */
	{
	    ret = PVFS_Request_struct(arr_int[0], &arr_int[1],
				      pvfs_arr_disp,
				      old_pvfs_dtype_arr, pvfs_dtype);
	    if (ret != 0)
		fprintf(stderr, "Error in PVFS_Request_struct\n");

	    for (i = 0; i < arr_int[0]; i++)
	    {
		if (arr_dtype[i] != MPI_LB &&
		    arr_dtype[i] != MPI_UB)
		    PVFS_Request_free(&old_pvfs_dtype_arr[i]);
	    }

#ifdef DEBUG_DTYPE
	    print_dtype_info(combiner,
			     num_int,
			     num_addr,
			     num_dtype,
			     arr_int,
			     arr_addr,
			     arr_dtype);
#endif
	}

	ADIOI_Free(arr_int);
	ADIOI_Free(arr_addr);
	ADIOI_Free(arr_dtype);

	ADIOI_Free(old_pvfs_dtype_arr);
	ADIOI_Free(pvfs_arr_disp);
	ADIOI_Free(pvfs_arr_len);

	return ret;
    }

    /* Shouldn't have gotten here */
    fprintf(stderr, "convert_mpi_pvfs2_dtype:  SERIOUS ERROR\n");
    return -1;
}
Example #22
0
/* this used to be implemented in every file system as an fcntl.  It makes
 * deferred open easier if we know ADIO_Fcntl will always need a file to really
 * be open. set_view doesn't modify anything related to the open files.
 */
void ADIO_Set_view(ADIO_File fd, ADIO_Offset disp, MPI_Datatype etype, 
		MPI_Datatype filetype, MPI_Info info,  int *error_code) 
{
	int combiner, i, j, k, err, filetype_is_contig;
	MPI_Datatype copy_etype, copy_filetype;
	ADIOI_Flatlist_node *flat_file;
	/* free copies of old etypes and filetypes and delete flattened 
       version of filetype if necessary */

	MPI_Type_get_envelope(fd->etype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->etype));

	ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (!filetype_is_contig) ADIOI_Delete_flattened(fd->filetype);

	MPI_Type_get_envelope(fd->filetype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->filetype));

	/* set new info */
	ADIO_SetInfo(fd, info, &err);

        /* set new etypes and filetypes */

	MPI_Type_get_envelope(etype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) fd->etype = etype;
	else {
	    MPI_Type_contiguous(1, etype, &copy_etype);
	    MPI_Type_commit(&copy_etype);
	    fd->etype = copy_etype;
	}
	MPI_Type_get_envelope(filetype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) 
	    fd->filetype = filetype;
	else {
	    MPI_Type_contiguous(1, filetype, &copy_filetype);
	    MPI_Type_commit(&copy_filetype);
	    fd->filetype = copy_filetype;
	    ADIOI_Flatten_datatype(fd->filetype);
            /* this function will not flatten the filetype if it turns out
               to be all contiguous. */
	}

	MPI_Type_size(fd->etype, &(fd->etype_size));
	fd->disp = disp;

        /* reset MPI-IO file pointer to point to the first byte that can
           be accessed in this view. */

        ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (filetype_is_contig) fd->fp_ind = disp;
	else {
	    flat_file = ADIOI_Flatlist;
	    while (flat_file->type != fd->filetype) 
		flat_file = flat_file->next;
	    for (i=0; i<flat_file->count; i++) {
		if (flat_file->blocklens[i]) {
		    fd->fp_ind = disp + flat_file->indices[i];
		    break;
		}
	    }
	}
	*error_code = MPI_SUCCESS;
}
Example #23
0
/* vector_of_vectors_test()
 *
 * Builds a vector of a vector of ints.  Assuming an int array of size 9 
 * integers, and treating the array as a 3x3 2D array, this will grab the 
 * corners.
 *
 * MPICH1 fails this test because it converts the vectors into hvectors.
 *
 * Returns the number of errors encountered.
 */
int vector_of_vectors_test(void)
{
    MPI_Datatype inner_vector, inner_vector_copy;
    MPI_Datatype outer_vector;

    int nints, nadds, ntypes, combiner, *ints;
    MPI_Aint *adds = NULL;
    MPI_Datatype *types;

    int err, errs = 0;

    /* set up type */
    err = MPI_Type_vector(2,
			  1,
			  2,
			  MPI_INT,
			  &inner_vector);
    if (err != MPI_SUCCESS) {
	if (verbose) fprintf(stderr, 
			     "error in MPI call; aborting after %d errors\n",
			     errs+1);
	return errs+1;
    }

    err = MPI_Type_vector(2,
			  1,
			  2,
			  inner_vector,
			  &outer_vector);
    if (err != MPI_SUCCESS) {
	if (verbose) fprintf(stderr, 
			     "error in MPI call; aborting after %d errors\n",
			     errs+1);
	return errs+1;
    }

    /* decode outer vector (get envelope, then contents) */
    err = MPI_Type_get_envelope(outer_vector,
				&nints,
				&nadds,
				&ntypes,
				&combiner);
    if (err != MPI_SUCCESS) {
	if (verbose) fprintf(stderr, 
			     "error in MPI call; aborting after %d errors\n",
			     errs+1);
	return errs+1;
    }

    if (nints != 3) errs++;
    if (nadds != 0) errs++;
    if (ntypes != 1) errs++;
    if (combiner != MPI_COMBINER_VECTOR) errs++;

    if (verbose) {
	if (nints != 3) fprintf(stderr, 
				"outer vector nints = %d; should be 3\n",
				nints);
	if (nadds != 0) fprintf(stderr, 
				"outer vector nadds = %d; should be 0\n",
				nadds);
	if (ntypes != 1) fprintf(stderr, 
				 "outer vector ntypes = %d; should be 1\n",
				 ntypes);
	if (combiner != MPI_COMBINER_VECTOR)
	    fprintf(stderr, "outer vector combiner = %s; should be vector\n",
		    combiner_to_string(combiner));
    }
    if (errs) {
	if (verbose) fprintf(stderr, "aborting after %d errors\n", errs);
	return errs;
    }

    ints = malloc(nints * sizeof(*ints));
    if (nadds) adds = malloc(nadds * sizeof(*adds));
    types = malloc(ntypes * sizeof(*types));

    /* get contents of outer vector */
    err = MPI_Type_get_contents(outer_vector,
				nints,
				nadds,
				ntypes,
				ints,
				adds,
				types);

    if (ints[0] != 2) errs++;
    if (ints[1] != 1) errs++;
    if (ints[2] != 2) errs++;

    if (verbose) {
	if (ints[0] != 2) fprintf(stderr, 
				  "outer vector count = %d; should be 2\n",
				  ints[0]);
	if (ints[1] != 1) fprintf(stderr,
				  "outer vector blocklength = %d; should be 1\n",
				  ints[1]);
	if (ints[2] != 2) fprintf(stderr, "outer vector stride = %d; should be 2\n",
				  ints[2]);
    }
    if (errs) {
	if (verbose) fprintf(stderr, "aborting after %d errors\n", errs);
	return errs;
    }

    inner_vector_copy = types[0];
    free(ints);
    if (nadds) free(adds);
    free(types);

    /* decode inner vector */
    err = MPI_Type_get_envelope(inner_vector_copy,
				&nints,
				&nadds,
				&ntypes,
				&combiner);
    if (err != MPI_SUCCESS) {
	if (verbose) fprintf(stderr, 
			     "error in MPI call; aborting after %d errors\n",
			     errs+1);
	return errs+1;
    }

    if (nints != 3) errs++;
    if (nadds != 0) errs++;
    if (ntypes != 1) errs++;
    if (combiner != MPI_COMBINER_VECTOR) errs++;

    if (verbose) {
	if (nints != 3) fprintf(stderr, 
				"inner vector nints = %d; should be 3\n",
				nints);
	if (nadds != 0) fprintf(stderr, 
				"inner vector nadds = %d; should be 0\n",
				nadds);
	if (ntypes != 1) fprintf(stderr, 
				 "inner vector ntypes = %d; should be 1\n",
				 ntypes);
	if (combiner != MPI_COMBINER_VECTOR)
	    fprintf(stderr, "inner vector combiner = %s; should be vector\n",
		    combiner_to_string(combiner));
    }
    if (errs) {
	if (verbose) fprintf(stderr, "aborting after %d errors\n", errs);
	return errs;
    }

    ints = malloc(nints * sizeof(*ints));
    if (nadds) adds = malloc(nadds * sizeof(*adds));
    types = malloc(ntypes * sizeof(*types));

    err = MPI_Type_get_contents(inner_vector_copy,
				nints,
				nadds,
				ntypes,
				ints,
				adds,
				types);

    if (ints[0] != 2) errs++;
    if (ints[1] != 1) errs++;
    if (ints[2] != 2) errs++;

    if (verbose) {
	if (ints[0] != 2) fprintf(stderr, 
				  "inner vector count = %d; should be 2\n",
				  ints[0]);
	if (ints[1] != 1) fprintf(stderr,
				  "inner vector blocklength = %d; should be 1\n",
				  ints[1]);
	if (ints[2] != 2) fprintf(stderr, 
				  "inner vector stride = %d; should be 2\n",
				  ints[2]);
    }
    if (errs) {
	if (verbose) fprintf(stderr, "aborting after %d errors\n", errs);
	return errs;
    }

    free(ints);
    if (nadds) free(adds);
    free(types);

    MPI_Type_free( &inner_vector_copy );
    MPI_Type_free( &inner_vector );
    MPI_Type_free( &outer_vector );

    return 0;
}
Example #24
0
/* optimizable_vector_of_basics_test()
 *
 * Builds a vector of ints.  Count is 10, blocksize is 2, stride is 2, so this
 * is equivalent to a contig of 20.  But remember...we should get back our
 * suboptimal values under MPI-2.
 *
 * Returns the number of errors encountered.
 */
int optimizable_vector_of_basics_test(void)
{
    MPI_Datatype parent_type;

    int nints, nadds, ntypes, combiner, *ints;
    MPI_Aint *adds = NULL;
    MPI_Datatype *types;

    int err, errs = 0;

    /* set up type */
    err = MPI_Type_vector(10,
			  2,
			  2,
			  MPI_INT,
			  &parent_type);

    /* decode */
    err = MPI_Type_get_envelope(parent_type,
				&nints,
				&nadds,
				&ntypes,
				&combiner);

    if (nints != 3) errs++;
    if (nadds != 0) errs++;
    if (ntypes != 1) errs++;
    if (combiner != MPI_COMBINER_VECTOR) errs++;

    if (verbose) {
        if (nints != 3) fprintf(stderr, "nints = %d; should be 3\n", nints);
	if (nadds != 0) fprintf(stderr, "nadds = %d; should be 0\n", nadds);
	if (ntypes != 1) fprintf(stderr, "ntypes = %d; should be 1\n", ntypes);
	if (combiner != MPI_COMBINER_VECTOR)
	    fprintf(stderr, "combiner = %s; should be vector\n",
		    combiner_to_string(combiner));
    }

    ints = malloc(nints * sizeof(*ints));
    if (nadds) adds = malloc(nadds * sizeof(*adds));
    types = malloc(ntypes *sizeof(*types));

    err = MPI_Type_get_contents(parent_type,
				nints,
				nadds,
				ntypes,
				ints,
				adds,
				types);

    if (ints[0] != 10) errs++;
    if (ints[1] != 2) errs++;
    if (ints[2] != 2) errs++;
    if (types[0] != MPI_INT) errs++;

    if (verbose) {
	if (ints[0] != 10) fprintf(stderr, "count = %d; should be 10\n",
				   ints[0]);
	if (ints[1] != 2) fprintf(stderr, "blocklength = %d; should be 2\n",
				  ints[1]);
	if (ints[2] != 2) fprintf(stderr, "stride = %d; should be 2\n",
				  ints[2]);
	if (types[0] != MPI_INT) fprintf(stderr, "type is not MPI_INT\n");
    }

    free(ints);
    if (nadds) free(adds);
    free(types);

    MPI_Type_free( &parent_type );

    return errs;
}
Example #25
0
void ADIO_Close(ADIO_File fd, int *error_code)
{
    int i, j, k, combiner, myrank, err, is_contig;
    static char myname[] = "ADIO_CLOSE";

    if (fd->async_count) {
	*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
					   myname, __LINE__, MPI_ERR_IO, "**io",
					   "**io %s", strerror(errno));
	return;
    }

    /* because of deferred open, this warants a bit of explaining.  First, if
     * we've done aggregation,
     * then close the file.  Then, if any process left has done independent
     * i/o, close the file.  Otherwise, we'll skip the fs-specific close and
     * just say everything is a-ok.
     *
     * XXX: is it ok for those processes with a "real" communicator and those
     * with "MPI_COMM_SELF" to both call ADIOI_xxx_Close at the same time ?
     * everyone who ever opened the file will close it. Is order important? Is
     * timing important?
     */
    if (fd->hints->deferred_open && fd->is_agg) {
	    (*(fd->fns->ADIOI_xxx_Close))(fd, error_code);
    } else {
	    if(fd->is_open)  {
		    (*(fd->fns->ADIOI_xxx_Close))(fd, error_code);
	    } else {
		    *error_code = MPI_SUCCESS;
	    }

    }

    if (fd->access_mode & ADIO_DELETE_ON_CLOSE) {
	/* if we are doing aggregation and deferred open, then it's possible
	 * that rank 0 does not have access to the file. make sure only an
	 * aggregator deletes the file.*/
	MPI_Comm_rank(fd->comm, &myrank);
	if (myrank == fd->hints->ranklist[0]) {
		ADIO_Delete(fd->filename, &err);
	}
	MPI_Barrier(fd->comm);
    }

    if (fd->fortran_handle != -1) {
	ADIOI_Ftable[fd->fortran_handle] = MPI_FILE_NULL;
    }

    if (fd->hints && fd->hints->ranklist) ADIOI_Free(fd->hints->ranklist);
    if (fd->hints && fd->hints->cb_config_list) ADIOI_Free(fd->hints->cb_config_list);

    /* This BlueGene platform-specific free must be done in the common code
     * because the malloc's for these hint data structures are done at the
     * scope of ADIO_Open within the SetInfo call (ADIOI_GPFS_SetInfo which
     * calls ADIOI_BG_gen_agg_ranklist).  They cannot be done in the
     * ADIOI_GPFS_Close because of the file creation case where the
     * ADIOI_GPFS_Close and re-open via ADIOI_GPFS_Open are done which results
     * in a double-free - ADIOI_GPFS_Open does not redo the SetInfo...  */
#ifdef BGQPLATFORM
    if (fd->hints && fd->hints->fs_hints.bg.bridgelist)
      ADIOI_Free(fd->hints->fs_hints.bg.bridgelist);
    if (fd->hints && fd->hints->fs_hints.bg.bridgelistnum)
      ADIOI_Free(fd->hints->fs_hints.bg.bridgelistnum);
#endif

    /* Persistent File Realms */
    if (fd->hints->cb_pfr == ADIOI_HINT_ENABLE) {
	/* AAR, FSIZE, and User provided uniform File realms */
	if (1) {
	    ADIOI_Delete_flattened (fd->file_realm_types[0]);
	    MPI_Type_free (&fd->file_realm_types[0]);
	}
	else {
	    for (i=0; i<fd->hints->cb_nodes; i++) {
		ADIOI_Datatype_iscontig(fd->file_realm_types[i], &is_contig);
		if (!is_contig)
		    ADIOI_Delete_flattened(fd->file_realm_types[i]);
		MPI_Type_free (&fd->file_realm_types[i]);
	    }
	}
	ADIOI_Free(fd->file_realm_st_offs);
	ADIOI_Free(fd->file_realm_types);
    }
    if (fd->hints) ADIOI_Free(fd->hints);



    MPI_Comm_free(&(fd->comm));
    ADIOI_Free(fd->filename);

    MPI_Type_get_envelope(fd->etype, &i, &j, &k, &combiner);
    if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->etype));

    ADIOI_Datatype_iscontig(fd->filetype, &is_contig);
    if (!is_contig) ADIOI_Delete_flattened(fd->filetype);

    MPI_Type_get_envelope(fd->filetype, &i, &j, &k, &combiner);
    if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->filetype));

    MPI_Info_free(&(fd->info));

    if (fd->io_buf != NULL) ADIOI_Free(fd->io_buf);

    /* memory for fd is freed in MPI_File_close */
}
Example #26
0
/* struct_of_basics_test(void)
 *
 * There's nothing simple about structs :).  Although this is an easy one.
 *
 * Returns number of errors encountered.
 *
 * NOT TESTED.
 */
int struct_of_basics_test(void)
{
    MPI_Datatype parent_type;
    int s_count = 3, s_blocklengths[3] = { 3, 2, 1 };
    MPI_Aint s_displacements[3] = { 10, 20, 30 };
    MPI_Datatype s_types[3] = { MPI_CHAR, MPI_INT, MPI_FLOAT };

    int nints, nadds, ntypes, combiner, *ints;
    MPI_Aint *adds = NULL;
    MPI_Datatype *types;

    int err, errs = 0;

    /* set up type */
    err = MPI_Type_create_struct(s_count,
				 s_blocklengths,
				 s_displacements,
				 s_types,
				 &parent_type);

    /* decode */
    err = MPI_Type_get_envelope(parent_type,
				&nints,
				&nadds,
				&ntypes,
				&combiner);

    if (nints != 4) errs++;
    if (nadds != 3) errs++;
    if (ntypes != 3) errs++;
    if (combiner != MPI_COMBINER_STRUCT) errs++;

    if (verbose) {
        if (nints != 4) fprintf(stderr, "nints = %d; should be 3\n", nints);
	if (nadds != 3) fprintf(stderr, "nadds = %d; should be 0\n", nadds);
	if (ntypes != 3) fprintf(stderr, "ntypes = %d; should be 3\n", ntypes);
	if (combiner != MPI_COMBINER_STRUCT)
	    fprintf(stderr, "combiner = %s; should be struct\n",
		    combiner_to_string(combiner));
    }

    ints = malloc(nints * sizeof(*ints));
    adds = malloc(nadds * sizeof(*adds));
    types = malloc(ntypes *sizeof(*types));

    err = MPI_Type_get_contents(parent_type,
				nints,
				nadds,
				ntypes,
				ints,
				adds,
				types);

    if (ints[0] != s_count) errs++;
    if (ints[1] != s_blocklengths[0]) errs++;
    if (ints[2] != s_blocklengths[1]) errs++;
    if (ints[3] != s_blocklengths[2]) errs++;
    if (adds[0] != s_displacements[0]) errs++;
    if (adds[1] != s_displacements[1]) errs++;
    if (adds[2] != s_displacements[2]) errs++;
    if (types[0] != s_types[0]) errs++;
    if (types[1] != s_types[1]) errs++;
    if (types[2] != s_types[2]) errs++;

    if (verbose) {
	if (ints[0] != s_count) 
	    fprintf(stderr, "count = %d; should be %d\n", ints[0], s_count);
	if (ints[1] != s_blocklengths[0])
	    fprintf(stderr, "blocklength[0] = %d; should be %d\n", ints[1], s_blocklengths[0]);
	if (ints[2] != s_blocklengths[1]) 
	    fprintf(stderr, "blocklength[1] = %d; should be %d\n", ints[2], s_blocklengths[1]);
	if (ints[3] != s_blocklengths[2]) 
	    fprintf(stderr, "blocklength[2] = %d; should be %d\n", ints[3], s_blocklengths[2]);
	if (adds[0] != s_displacements[0]) 
	    fprintf(stderr, "displacement[0] = %d; should be %d\n", adds[0], s_displacements[0]);
	if (adds[1] != s_displacements[1]) 
	    fprintf(stderr, "displacement[1] = %d; should be %d\n", adds[1], s_displacements[1]);
	if (adds[2] != s_displacements[2]) 
	    fprintf(stderr, "displacement[2] = %d; should be %d\n", adds[2], s_displacements[2]);
	if (types[0] != s_types[0]) 
	    fprintf(stderr, "type[0] does not match\n");
	if (types[1] != s_types[1]) 
	    fprintf(stderr, "type[1] does not match\n");
	if (types[2] != s_types[2]) 
	    fprintf(stderr, "type[2] does not match\n");
    }

    free(ints);
    free(adds);
    free(types);

    MPI_Type_free( &parent_type );

    return errs;
}
Example #27
0
void ADIO_Close(ADIO_File fd, int *error_code)
{
    int i, j, k, combiner, myrank, err, is_contig;
    static char myname[] = "ADIO_CLOSE";

    if (fd->async_count) {
	*error_code = MPIO_Err_create_code(MPI_SUCCESS, MPIR_ERR_RECOVERABLE,
					   myname, __LINE__, MPI_ERR_IO, "**io",
					   "**io %s", strerror(errno));
	return;
    }

    /* because of deferred open, this warants a bit of explaining.  First, if
     * we've done aggregation (fd->agg_comm has a non-nulll communicator ),
     * then close the file.  Then, if any process left has done independent
     * i/o, close the file.  Otherwise, we'll skip the fs-specific close and
     * just say everything is a-ok.
     *
     * XXX: is it ok for those processes with a "real" communicator and those
     * with "MPI_COMM_SELF" to both call ADIOI_xxx_Close at the same time ?
     * everyone who ever opened the file will close it. Is order important? Is
     * timing important?
     */
    if (fd->agg_comm != MPI_COMM_NULL) {
	    (*(fd->fns->ADIOI_xxx_Close))(fd, error_code);
    } else {
	    if(fd->is_open)  {
		    (*(fd->fns->ADIOI_xxx_Close))(fd, error_code);
	    } else {
		    *error_code = MPI_SUCCESS;
	    }
	    
    }

    if (fd->access_mode & ADIO_DELETE_ON_CLOSE) {
	/* if we are doing aggregation and deferred open, then it's possible
	 * that rank 0 does not have access to the file. make sure only an
	 * aggregator deletes the file.*/
	MPI_Comm_rank(fd->comm, &myrank);
	if (myrank == fd->hints->ranklist[0]) {
		ADIO_Delete(fd->filename, &err);
	}
	MPI_Barrier(fd->comm);
    }

    if (fd->fortran_handle != -1) {
	ADIOI_Ftable[fd->fortran_handle] = MPI_FILE_NULL;
    }

    ADIOI_Free(fd->hints->ranklist);
    ADIOI_Free(fd->hints->cb_config_list);
    ADIOI_Free(fd->hints);
    MPI_Comm_free(&(fd->comm));
    /* deferred open: if we created an aggregator communicator, free it */
    if (fd->agg_comm != MPI_COMM_NULL) {
	    MPI_Comm_free(&(fd->agg_comm));
    }
    ADIOI_Free(fd->filename); 

    MPI_Type_get_envelope(fd->etype, &i, &j, &k, &combiner);
    if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->etype));

    ADIOI_Datatype_iscontig(fd->filetype, &is_contig);
    if (!is_contig) ADIOI_Delete_flattened(fd->filetype);

    MPI_Type_get_envelope(fd->filetype, &i, &j, &k, &combiner);
    if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->filetype));

    MPI_Info_free(&(fd->info));

    /* memory for fd is freed in MPI_File_close */
}
Example #28
0
void ADIOI_XFS_Fcntl(ADIO_File fd, int flag, ADIO_Fcntl_t *fcntl_struct, int *error_code)
{
    MPI_Datatype copy_etype, copy_filetype;
    int combiner, i, j, k, filetype_is_contig, err;
    ADIOI_Flatlist_node *flat_file;
    struct flock64 fl;
#ifndef PRINT_ERR_MSG
    static char myname[] = "ADIOI_XFS_FCNTL";
#endif

    switch(flag) {
    case ADIO_FCNTL_SET_VIEW:
        /* free copies of old etypes and filetypes and delete flattened 
           version of filetype if necessary */

	MPI_Type_get_envelope(fd->etype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->etype));

	ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (!filetype_is_contig) ADIOI_Delete_flattened(fd->filetype);

	MPI_Type_get_envelope(fd->filetype, &i, &j, &k, &combiner);
	if (combiner != MPI_COMBINER_NAMED) MPI_Type_free(&(fd->filetype));

	/* set new info */
	ADIO_SetInfo(fd, fcntl_struct->info, &err);

        /* set new etypes and filetypes */

	MPI_Type_get_envelope(fcntl_struct->etype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) fd->etype = fcntl_struct->etype;
	else {
	    MPI_Type_contiguous(1, fcntl_struct->etype, &copy_etype);
	    MPI_Type_commit(&copy_etype);
	    fd->etype = copy_etype;
	}
	MPI_Type_get_envelope(fcntl_struct->filetype, &i, &j, &k, &combiner);
	if (combiner == MPI_COMBINER_NAMED) 
	    fd->filetype = fcntl_struct->filetype;
	else {
	    MPI_Type_contiguous(1, fcntl_struct->filetype, &copy_filetype);
	    MPI_Type_commit(&copy_filetype);
	    fd->filetype = copy_filetype;
	    ADIOI_Flatten_datatype(fd->filetype);
            /* this function will not flatten the filetype if it turns out
               to be all contiguous. */
	}

	MPI_Type_size(fd->etype, &(fd->etype_size));
	fd->disp = fcntl_struct->disp;

        /* reset MPI-IO file pointer to point to the first byte that can
           be accessed in this view. */

        ADIOI_Datatype_iscontig(fd->filetype, &filetype_is_contig);
	if (filetype_is_contig) fd->fp_ind = fcntl_struct->disp;
	else {
	    flat_file = ADIOI_Flatlist;
	    while (flat_file->type != fd->filetype) 
		flat_file = flat_file->next;
	    for (i=0; i<flat_file->count; i++) {
		if (flat_file->blocklens[i]) {
		    fd->fp_ind = fcntl_struct->disp + flat_file->indices[i];
		    break;
		}
	    }
	}
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_GET_FSIZE:
	fcntl_struct->fsize = lseek64(fd->fd_sys, 0, SEEK_END);
#ifdef PRINT_ERR_MSG
	*error_code = (fcntl_struct->fsize == -1) ? MPI_ERR_UNKNOWN : MPI_SUCCESS;
#else
	if (fcntl_struct->fsize == -1) {
	    *error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	    ADIOI_Error(fd, *error_code, myname);	    
	}
	else *error_code = MPI_SUCCESS;
#endif
	break;

    case ADIO_FCNTL_SET_DISKSPACE:
	i = 0;
	fl.l_start = 0;
	fl.l_whence = SEEK_SET;
	fl.l_len = fcntl_struct->diskspace;
	err = fcntl(fd->fd_sys, F_RESVSP64, &fl);
	if (err) i = 1;
	if (fcntl_struct->diskspace > lseek64(fd->fd_sys, 0, SEEK_END)) {
	    /* also need to set the file size */
	    err = ftruncate64(fd->fd_sys, fcntl_struct->diskspace);
	    if (err) i = 1;
	}
#ifdef PRINT_ERR_MSG
	*error_code = (i == 0) ? MPI_SUCCESS : MPI_ERR_UNKNOWN;
#else
	if (i == 1) {
	    *error_code = MPIR_Err_setmsg(MPI_ERR_IO, MPIR_ADIO_ERROR,
			      myname, "I/O Error", "%s", strerror(errno));
	    ADIOI_Error(fd, *error_code, myname);	    
	}
	else *error_code = MPI_SUCCESS;
#endif
	break;

    case ADIO_FCNTL_SET_IOMODE:
        /* for implementing PFS I/O modes. will not occur in MPI-IO
           implementation.*/
	if (fd->iomode != fcntl_struct->iomode) {
	    fd->iomode = fcntl_struct->iomode;
	    MPI_Barrier(MPI_COMM_WORLD);
	}
	*error_code = MPI_SUCCESS;
	break;

    case ADIO_FCNTL_SET_ATOMICITY:
	fd->atomicity = (fcntl_struct->atomicity == 0) ? 0 : 1;
	*error_code = MPI_SUCCESS;
	break;

    default:
	FPRINTF(stderr, "Unknown flag passed to ADIOI_XFS_Fcntl\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
}
Example #29
0
/*
 * Synopsis
 *
 * This function inverts MPIX_Type_contiguous_x, i.e. it provides
 * the original arguments for that call so that we know how many
 * built-in types are in the user-defined datatype.
 *
 * This function is primary used inside of BigMPI and does not
 * correspond to an MPI function, so we do avoid the use of the
 * MPIX namespace.
 *
 * int BigMPI_Decode_contiguous_x(MPI_Datatype   intype,
 *                                MPI_Count    * count,
 *                                MPI_Datatype * basetype)
 *
 *  Input Parameters
 *
 *   newtype           new datatype (handle)
 *
 * Output Parameter
 *
 *   count             replication count (nonnegative integer)
 *   oldtype           old datatype (handle)
 *
 */
int BigMPI_Decode_contiguous_x(MPI_Datatype intype, MPI_Count * count, MPI_Datatype * basetype)
{
    int nint, nadd, ndts, combiner;

    /* Step 1: Decode the type_create_struct call. */

    MPI_Type_get_envelope(intype, &nint, &nadd, &ndts, &combiner);
    assert(combiner==MPI_COMBINER_STRUCT || combiner==MPI_COMBINER_VECTOR);
#ifdef BIGMPI_AVOID_TYPE_CREATE_STRUCT
    if (combiner==MPI_COMBINER_VECTOR) {
        assert(nint==3);
        assert(nadd==0);
        assert(ndts==1);

        int cbs[3]; /* {count,blocklength,stride} */
        MPI_Datatype vbasetype[1];
        MPI_Type_get_contents(intype, 3, 0, 1, cbs, NULL, vbasetype);
        MPI_Count a = cbs[0];   /* count */
        MPI_Count b = cbs[1];   /* blocklength */
        assert(cbs[1]==cbs[2]); /* blocklength==stride */

        *count = a*b;
        *basetype = vbasetype[0];
        return MPI_SUCCESS;
    }
#else
    assert(combiner==MPI_COMBINER_STRUCT);
#endif
    assert(nint==3);
    assert(nadd==2);
    assert(ndts==2);

    int cnbls[3]; /* {count, blocklengths[]} */
    MPI_Aint displacements[2]; /* {0,remdisp} */
    MPI_Datatype types[2]; /* {chunks,remainder} */;
    MPI_Type_get_contents(intype, 3, 2, 2, cnbls, displacements, types);
    assert(cnbls[0]==2);
    assert(cnbls[1]==1);
    assert(cnbls[2]==1);
    assert(displacements[0]==0);

    /* Step 2: Decode the type_vector call. */

    MPI_Type_get_envelope(types[0], &nint, &nadd, &ndts, &combiner);
    assert(combiner==MPI_COMBINER_VECTOR);
    assert(nint==3);
    assert(nadd==0);
    assert(ndts==1);

    int cbs[3]; /* {count,blocklength,stride} */
    MPI_Datatype vbasetype[1];
    MPI_Type_get_contents(types[0], 3, 0, 1, cbs, NULL, vbasetype);
    assert(/* blocklength = */ cbs[1]==bigmpi_int_max);
    assert(/* stride = */ cbs[2]==bigmpi_int_max);

    /* chunk count - see above */
    MPI_Count c = cbs[0];

    /* Step 3: Decode the type_contiguous call. */

    MPI_Type_get_envelope(types[1], &nint, &nadd, &ndts, &combiner);
    assert(combiner==MPI_COMBINER_CONTIGUOUS);
    assert(nint==1);
    assert(nadd==0);
    assert(ndts==1);

    int ccc[1]; /* {count} */
    MPI_Datatype cbasetype[1];
    MPI_Type_get_contents(types[1], 1, 0, 1, ccc, NULL, cbasetype);

    /* remainder - see above */
    MPI_Count r = ccc[0];

    /* The underlying type of the vector and contig types must match. */
    assert(cbasetype[0]==vbasetype[0]);
    *basetype = cbasetype[0];

    /* This should not overflow because everything is already MPI_Count type. */
    *count = c*bigmpi_int_max+r;

    return MPI_SUCCESS;
}
Example #30
0
/* indexed_of_basics_test(void)
 *
 * Simple indexed type.
 *
 * Returns number of errors encountered.
 */
int indexed_of_basics_test(void)
{
    MPI_Datatype parent_type;
    int s_count = 3, s_blocklengths[3] = { 3, 2, 1 };
    int s_displacements[3] = { 10, 20, 30 };

    int nints, nadds, ntypes, combiner, *ints;
    MPI_Aint *adds = NULL;
    MPI_Datatype *types;

    int err, errs = 0;

    /* set up type */
    err = MPI_Type_indexed(s_count,
			   s_blocklengths,
			   s_displacements,
			   MPI_INT,
			   &parent_type);

    /* decode */
    err = MPI_Type_get_envelope(parent_type,
				&nints,
				&nadds,
				&ntypes,
				&combiner);

    if (nints != 7) errs++;
    if (nadds != 0) errs++;
    if (ntypes != 1) errs++;
    if (combiner != MPI_COMBINER_INDEXED) errs++;

    if (verbose) {
        if (nints != 7) fprintf(stderr, "nints = %d; should be 7\n", nints);
	if (nadds != 0) fprintf(stderr, "nadds = %d; should be 0\n", nadds);
	if (ntypes != 1) fprintf(stderr, "ntypes = %d; should be 1\n", ntypes);
	if (combiner != MPI_COMBINER_INDEXED)
	    fprintf(stderr, "combiner = %s; should be indexed\n",
		    combiner_to_string(combiner));
    }

    ints = malloc(nints * sizeof(*ints));
    if (nadds) adds = malloc(nadds * sizeof(*adds));
    types = malloc(ntypes *sizeof(*types));

    err = MPI_Type_get_contents(parent_type,
				nints,
				nadds,
				ntypes,
				ints,
				adds,
				types);

    if (ints[0] != s_count) errs++;
    if (ints[1] != s_blocklengths[0]) errs++;
    if (ints[2] != s_blocklengths[1]) errs++;
    if (ints[3] != s_blocklengths[2]) errs++;
    if (ints[4] != s_displacements[0]) errs++;
    if (ints[5] != s_displacements[1]) errs++;
    if (ints[6] != s_displacements[2]) errs++;
    if (types[0] != MPI_INT) errs++;

    if (verbose) {
	if (ints[0] != s_count) 
	    fprintf(stderr, "count = %d; should be %d\n", ints[0], s_count);
	if (ints[1] != s_blocklengths[0]) 
	    fprintf(stderr, "blocklength[0] = %d; should be %d\n", ints[1], s_blocklengths[0]);
	if (ints[2] != s_blocklengths[1]) 
	    fprintf(stderr, "blocklength[1] = %d; should be %d\n", ints[2], s_blocklengths[1]);
	if (ints[3] != s_blocklengths[2]) 
	    fprintf(stderr, "blocklength[2] = %d; should be %d\n", ints[3], s_blocklengths[2]);
	if (ints[4] != s_displacements[0]) 
	    fprintf(stderr, "displacement[0] = %d; should be %d\n", ints[4], s_displacements[0]);
	if (ints[5] != s_displacements[1]) 
	    fprintf(stderr, "displacement[1] = %d; should be %d\n", ints[5], s_displacements[1]);
	if (ints[6] != s_displacements[2]) 
	    fprintf(stderr, "displacement[2] = %d; should be %d\n", ints[6], s_displacements[2]);
	if (types[0] != MPI_INT) fprintf(stderr, "type[0] does not match\n");
    }

    free(ints);
    if (nadds) free(adds);
    free(types);

    MPI_Type_free( &parent_type );
    return errs;
}