Example #1
0
/*-------------------------------------------------------------------------
 * Function:	H5MF_alloc_overflow
 *
 * Purpose:	Checks if an allocation of file space would cause an overflow.
 *          F is the file whose space is being allocated, SIZE is the amount
 *          of space needed.
 *
 * Return:	0 if no overflow would result
 *          1 if overflow would result (the allocation should not be allowed)
 *
 * Programmer:	James Laird
 *				Nat Furrer
 *              Tuesday, June 1, 2004
 *
 * Modifications:
 *-------------------------------------------------------------------------
 */
hbool_t
H5MF_alloc_overflow(H5F_t *f, hsize_t size)
{
    hsize_t space_needed;       /* Accumulator variable */
    size_t c;                   /* Local index variable */
    hbool_t ret_value;           /* Return value */

    FUNC_ENTER_NOAPI_NOFUNC(H5MF_alloc_overflow)

    /* Start with the current end of the file's address. */
    space_needed = (hsize_t)H5F_get_eoa(f);
    HDassert(H5F_addr_defined(space_needed));

    /* Subtract the file's base address to get the actual amount of
     * space being used:
     * (end of allocated space - beginning of allocated space)
     */
    HDassert(H5F_BASE_ADDR(f) < space_needed);
    space_needed -= (hsize_t)H5F_BASE_ADDR(f);

    /* Add the amount of space requested for this allocation */
    space_needed += size;

    /* Also add space that is "reserved" for data to be flushed
     * to disk (e.g., for object headers and the heap).
     * This is the total amount of file space that will be
     * allocated.
     */
    space_needed += f->shared->lf->reserved_alloc;

    /* Ensure that this final number is less than the file's
     * address space.  We do this by shifting in multiples
     * of 16 bits because some systems will do nothing if
     * we shift by the size of a long long (64 bits) all at
     * once (<cough> Linux <cough>).  Thus, we break one shift
     * into several smaller shifts.
     */
    for(c=0; c < H5F_SIZEOF_ADDR(f); c += 2)
        space_needed = space_needed >> 16;

    if(space_needed != 0)
        ret_value=TRUE;
    else
        ret_value=FALSE;

    FUNC_LEAVE_NOAPI(ret_value)
}
Example #2
0
/*-------------------------------------------------------------------------
 * Function:	H5MF_extend
 *
 * Purpose:	Extend a block in the file.
 *
 * Return:	Success:	TRUE(1)/FALSE(0)
 *
 * 		Failure:	FAIL
 *
 * Programmer:	Quincey Koziol
 *              Saturday, June 12, 2004
 *
 * Modifications:
 *
 *-------------------------------------------------------------------------
 */
htri_t
H5MF_extend(H5F_t *f, H5FD_mem_t type, haddr_t addr, hsize_t size, hsize_t extra_requested)
{
    htri_t	ret_value;      /* Return value */

    FUNC_ENTER_NOAPI(H5MF_extend, FAIL);

    /* Make sure there is enough addressable space to satisfy the request */
    if ( H5MF_alloc_overflow(f, extra_requested) )
        HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate new file memory: out of address space");

    /* Convert old relative address to absolute address */
    addr += H5F_BASE_ADDR(f);

    /* Pass the request down to the virtual file layer */
    if((ret_value=H5FD_extend(f->shared->lf, type, addr, size, extra_requested))<0)
	HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "unable to allocate new file memory");

done:
    FUNC_LEAVE_NOAPI(ret_value);
} /* end H5MF_extend() */
Example #3
0
/*-------------------------------------------------------------------------
 * Function:  H5D_mpio_spaces_xfer
 *
 * Purpose:  Use MPI-IO to transfer data efficiently
 *    directly between app buffer and file.
 *
 * Return:  non-negative on success, negative on failure.
 *
 * Programmer:  rky 980813
 *
 * Notes:
 *      For collective data transfer only since this would eventually call
 *      H5FD_mpio_setup to do setup to eveually call MPI_File_set_view in
 *      H5FD_mpio_read or H5FD_mpio_write.  MPI_File_set_view is a collective
 *      call.  Letting independent data transfer use this route would result in
 *      hanging.
 *
 *      The preconditions for calling this routine are located in the
 *      H5S_mpio_opt_possible() routine, which determines whether this routine
 *      can be called for a given dataset transfer.
 *
 * Modifications:
 *  rky 980918
 *  Added must_convert parameter to let caller know we can't optimize
 *  the xfer.
 *
 *  Albert Cheng, 001123
 *  Include the MPI_type freeing as part of cleanup code.
 *
 *      QAK - 2002/04/02
 *      Removed the must_convert parameter and move preconditions to
 *      H5S_mpio_opt_possible() routine
 *
 *      QAK - 2002/06/17
 *      Removed 'disp' parameter from H5FD_mpio_setup routine and use the
 *      address of the dataset in MPI_File_set_view() calls, as necessary.
 *
 *      QAK - 2002/06/18
 *      Removed 'dc_plist' parameter, since it was not used.  Also, switch to
 *      getting the 'extra_offset' setting for each selection.
 *
 *-------------------------------------------------------------------------
 */
static herr_t
H5D_mpio_spaces_xfer(H5D_io_info_t *io_info, size_t elmt_size,
    const H5S_t *file_space, const H5S_t *mem_space,
    void *_buf /*out*/, hbool_t do_write )
{
    haddr_t   addr;                  /* Address of dataset (or selection) within file */
    size_t   mpi_buf_count, mpi_file_count;       /* Number of "objects" to transfer */
    hsize_t   mpi_buf_offset, mpi_file_offset;       /* Offset within dataset where selection (ie. MPI type) begins */
    MPI_Datatype mpi_buf_type, mpi_file_type;   /* MPI types for buffer (memory) and file */
    hbool_t   mbt_is_derived=0,      /* Whether the buffer (memory) type is derived and needs to be free'd */
     mft_is_derived=0;      /* Whether the file type is derived and needs to be free'd */
    hbool_t   plist_is_setup=0;      /* Whether the dxpl has been customized */
    uint8_t  *buf=(uint8_t *)_buf;   /* Alias for pointer arithmetic */
    int          mpi_code;              /* MPI return code */
    herr_t   ret_value = SUCCEED;   /* Return value */

    FUNC_ENTER_NOAPI_NOINIT(H5D_mpio_spaces_xfer);

    /* Check args */
    assert (io_info);
    assert (io_info->dset);
    assert (file_space);
    assert (mem_space);
    assert (buf);
    assert (IS_H5FD_MPIO(io_info->dset->ent.file));
    /* Make certain we have the correct type of property list */
    assert(TRUE==H5P_isa_class(io_info->dxpl_id,H5P_DATASET_XFER));

    /* create the MPI buffer type */
    if (H5S_mpio_space_type( mem_space, elmt_size,
             /* out: */
             &mpi_buf_type,
             &mpi_buf_count,
             &mpi_buf_offset,
             &mbt_is_derived )<0)
      HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't create MPI buf type");

    /* create the MPI file type */
    if ( H5S_mpio_space_type( file_space, elmt_size,
             /* out: */
             &mpi_file_type,
             &mpi_file_count,
             &mpi_file_offset,
             &mft_is_derived )<0)
      HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,"couldn't create MPI file type");

    /* Get the base address of the contiguous dataset or the chunk */
    if(io_info->dset->shared->layout.type == H5D_CONTIGUOUS)
       addr = H5D_contig_get_addr(io_info->dset) + mpi_file_offset;
    else {
        haddr_t   chunk_addr; /* for collective chunk IO */

        assert(io_info->dset->shared->layout.type == H5D_CHUNKED);
        chunk_addr=H5D_istore_get_addr(io_info,NULL);
        addr = H5F_BASE_ADDR(io_info->dset->ent.file) + chunk_addr + mpi_file_offset;
    }

    /*
     * Pass buf type, file type to the file driver. Request an MPI type
     * transfer (instead of an elementary byteblock transfer).
     */
    if(H5FD_mpi_setup_collective(io_info->dxpl_id, mpi_buf_type, mpi_file_type)<0)
        HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O properties");
    plist_is_setup=1;

    /* Adjust the buffer pointer to the beginning of the selection */
    buf+=mpi_buf_offset;

    /* transfer the data */
    if (do_write) {
      if (H5F_block_write(io_info->dset->ent.file, H5FD_MEM_DRAW, addr, mpi_buf_count, io_info->dxpl_id, buf) <0)
      HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL,"MPI write failed");
    } else {
      if (H5F_block_read (io_info->dset->ent.file, H5FD_MEM_DRAW, addr, mpi_buf_count, io_info->dxpl_id, buf) <0)
      HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL,"MPI read failed");
    }

done:
    /* Reset the dxpl settings */
    if(plist_is_setup) {
        if(H5FD_mpi_teardown_collective(io_info->dxpl_id)<0)
          HDONE_ERROR(H5E_DATASPACE, H5E_CANTFREE, FAIL, "unable to reset dxpl values");
    } /* end if */

    /* free the MPI buf and file types */
    if (mbt_is_derived) {
  if (MPI_SUCCESS != (mpi_code= MPI_Type_free( &mpi_buf_type )))
            HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
    }
    if (mft_is_derived) {
  if (MPI_SUCCESS != (mpi_code= MPI_Type_free( &mpi_file_type )))
            HMPI_DONE_ERROR(FAIL, "MPI_Type_free failed", mpi_code);
    }

    FUNC_LEAVE_NOAPI(ret_value);
} /* end H5D_mpio_spaces_xfer() */