Beispiel #1
0
struct uio *
afsio_partialcopy(struct uio *auio, size_t size)
{
    struct uio *res;
    int i;
    user_addr_t iovaddr;
    user_size_t iovsize;

    if (proc_is64bit(current_proc())) {
	res = uio_create(uio_iovcnt(auio), uio_offset(auio),
			 uio_isuserspace(auio) ? UIO_USERSPACE64 : UIO_SYSSPACE32,
			 uio_rw(auio));
    } else {
	res = uio_create(uio_iovcnt(auio), uio_offset(auio),
			 uio_isuserspace(auio) ? UIO_USERSPACE32 : UIO_SYSSPACE32,
			 uio_rw(auio));
    }

    for (i = 0;i < uio_iovcnt(auio) && size > 0;i++) {
	if (uio_getiov(auio, i, &iovaddr, &iovsize))
	    break;
	if (iovsize > size)
	    iovsize = size;
	if (uio_addiov(res, iovaddr, iovsize))
	    break;
	size -= iovsize;
    }
    return res;
}
Beispiel #2
0
static int
vnop_write_9p(struct vnop_write_args *ap)
{
	vnode_t vp;
	node_9p *np;
	uio_t uio;
	user_ssize_t resid;
	off_t eof, zh, zt, off;
	int e, flag;

	TRACE();
	vp = ap->a_vp;
	uio = ap->a_uio;
	np = NTO9P(vp);

	if (vnode_isdir(vp))
		return EISDIR;
	
	off = uio_offset(uio);
	if (off < 0)
		return EINVAL;
	
	resid = uio_resid(uio);
	if (resid == 0)
		return 0;

	flag = ap->a_ioflag;
	if (ISSET(flag, IO_APPEND)) {
		off = np->dir.length;
		uio_setoffset(uio, off);
	}
	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	if (vnode_isnocache(vp) || ISSET(flag, IO_NOCACHE)) {
		ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_PUSHDIRTY|UBC_SYNC);
		ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE);
		e = nwrite_9p(np, uio);
	} else {
		zh = zt = 0;
		eof = MAX(np->dir.length, resid+off);
		if (eof > np->dir.length) {
			if (off > np->dir.length) {
				zh = np->dir.length;
				SET(flag, IO_HEADZEROFILL);
			}
  			zt = (eof + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64;
			if (zt > eof) {
				zt = eof;
				SET(flag, IO_TAILZEROFILL);
			}
		}
		e = cluster_write(vp, uio, np->dir.length, eof, zh, zt, flag);
		if (e==0 && eof>np->dir.length) {
			np->dirtimer = 0;
			np->dir.length = eof;
			ubc_setsize(vp, eof);
		}
	}
	nunlock_9p(np);
	return e;
}
Beispiel #3
0
Datei: gfs.c Projekt: RJVB/zfs
/*
 * gfs_readdir_init: initiate a generic readdir
 *   st		- a pointer to an uninitialized gfs_readdir_state_t structure
 *   name_max	- the directory's maximum file name length
 *   ureclen	- the exported file-space record length (1 for non-legacy FSs)
 *   uiop	- the uiop passed to readdir
 *   parent	- the parent directory's inode
 *   self	- this directory's inode
 *   flags	- flags from VOP_READDIR
 *
 * Returns 0 or a non-zero errno.
 *
 * Typical VOP_READDIR usage of gfs_readdir_*:
 *
 *	if ((error = gfs_readdir_init(...)) != 0)
 *		return (error);
 *	eof = 0;
 *	while ((error = gfs_readdir_pred(..., &voffset)) != 0) {
 *		if (!consumer_entry_at(voffset))
 *			voffset = consumer_next_entry(voffset);
 *		if (consumer_eof(voffset)) {
 *			eof = 1
 *			break;
 *		}
 *		if ((error = gfs_readdir_emit(..., voffset,
 *		    consumer_ino(voffset), consumer_name(voffset))) != 0)
 *			break;
 *	}
 *	return (gfs_readdir_fini(..., error, eofp, eof));
 *
 * As you can see, a zero result from gfs_readdir_pred() or
 * gfs_readdir_emit() indicates that processing should continue,
 * whereas a non-zero result indicates that the loop should terminate.
 * Most consumers need do nothing more than let gfs_readdir_fini()
 * determine what the cause of failure was and return the appropriate
 * value.
 */
int
gfs_readdir_init(gfs_readdir_state_t *st, int name_max, int ureclen,
    uio_t *uiop, ino64_t parent, ino64_t self, int flags)
{
	size_t dirent_size;
    boolean_t   extended = (flags & VNODE_READDIR_EXTENDED);

	if (uio_offset(uiop) < 0 || uio_resid(uiop) <= 0 ||
	    (uio_offset(uiop) % ureclen) != 0)
		return (EINVAL);


	st->grd_ureclen = ureclen;
	st->grd_oresid = uio_resid(uiop);
	st->grd_namlen = name_max;

    dirent_size = DIRENT_RECLEN(st->grd_namlen, extended);

	st->grd_dirent = kmem_zalloc(dirent_size, KM_SLEEP);
	st->grd_parent = parent;
	st->grd_self = self;
	st->grd_flags = flags;

	return (0);
}
Beispiel #4
0
static int
vnop_read_9p(struct vnop_read_args *ap)
{
	node_9p *np;
	vnode_t vp;
	uio_t uio;
	int e;

	TRACE();
	vp = ap->a_vp;
	uio = ap->a_uio;
	np = NTO9P(vp);

	if (vnode_isdir(vp))
		return EISDIR;

	if (uio_offset(uio) < 0)
		return EINVAL;

	if (uio_resid(uio) == 0)
		return 0;

	nlock_9p(np, NODE_LCK_SHARED);
	if (vnode_isnocache(vp) || ISSET(ap->a_ioflag, IO_NOCACHE)) {
		if (ISSET(np->flags, NODE_MMAPPED))
			ubc_msync(vp, 0, ubc_getsize(vp), NULL, UBC_PUSHDIRTY|UBC_SYNC);
		else
			cluster_push(vp, IO_SYNC);
		ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE);
		e = nread_9p(np, uio);
	} else
		e = cluster_read(vp, uio, np->dir.length, ap->a_ioflag);
	nunlock_9p(np);
	return e;
}
Beispiel #5
0
// This function uses as many pmem_partial_read() calls as necessary,
// to copy uio->resid bytes of physical memory from the physical address, as
// specified in uio->offset to the buffer in the uio.
static kern_return_t pmem_read_memory(struct uio *uio) {
  while (uio_resid(uio) > 0) {
    // Try to read as many times as necessary until the uio is full.
    pmem_partial_read(uio, uio_offset(uio),
                      uio_offset(uio) + uio_curriovlen(uio));
  }
  return KERN_SUCCESS;
}
Beispiel #6
0
// This function uses as many pmem_partial_read() calls as necessary,
// to copy uio->resid bytes of physical memory from the physical address, as
// specified in uio->offset to the buffer in the uio.
static kern_return_t pmem_read_memory(struct uio *uio) {
    size_t read_bytes = 0;

    while (uio_resid(uio) > 0) {
        uio_update(uio, 0);
        // Try to read as many times as necessary until the uio is full.
        read_bytes = pmem_partial_read(uio, uio_offset(uio),
                                       uio_offset(uio) + uio_curriovlen(uio));
        uio_update(uio, read_bytes);
    }
    return KERN_SUCCESS;
}
Beispiel #7
0
Datei: gfs.c Projekt: RJVB/zfs
/*
 * gfs_readdir_pred: readdir loop predicate
 *   voffp - a pointer in which the next virtual offset should be stored
 *
 * Returns a 0 on success, a non-zero errno on failure, or -1 if the
 * readdir loop should terminate.  A non-zero result (either errno or
 * -1) from this function is typically passed directly to
 * gfs_readdir_fini().
 */
int
gfs_readdir_pred(gfs_readdir_state_t *st, uio_t *uiop, offset_t *voffp,
    int *ncookies, u_long **cookies)
{
	offset_t off, voff;
	int error;

top:
	if (uio_resid(uiop) <= 0)
		return (-1);

	off = uio_offset(uiop) / st->grd_ureclen;
	voff = off - 2;
	if (off == 0) {
		if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_self,
		    ".", 0, ncookies, cookies)) == 0)
			goto top;
	} else if (off == 1) {
        dprintf("Sending out .. with id %d\n", st->grd_parent);
		if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_parent,
		    "..", 0, ncookies, cookies)) == 0)
			goto top;
	} else {
		*voffp = voff;
		return (0);
	}

	return (error);
}
Beispiel #8
0
static int
vnread_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, 
	      vfs_context_t ctx)
{
	u_int32_t		blocksize = vn->sc_secsize;
	int 		error = 0;
	off_t		offset;
	user_ssize_t	resid;
	off_t		orig_offset;
	user_ssize_t	orig_resid;

	orig_resid = resid = uio_resid(uio);
	orig_offset = offset = uio_offset(uio);

	while (resid > 0) {
		u_int32_t		remainder;
		u_int32_t		this_block_number;
		u_int32_t		this_block_count;
		off_t		this_offset;
		user_ssize_t	this_resid;
		struct vnode *	vp;

		/* figure out which blocks to read */
		remainder = block_remainder(offset, blocksize);
		if (shadow_map_read(vn->sc_shadow_map,
				    block_truncate(offset, blocksize),
				    block_round(resid + remainder, blocksize),
				    &this_block_number, &this_block_count)) {
			vp = vn->sc_shadow_vp;
		}
		else {
			vp = vn->sc_vp;
		}

		/* read the blocks (or parts thereof) */
		this_offset = (off_t)this_block_number * blocksize + remainder;
		uio_setoffset(uio, this_offset);
		this_resid = this_block_count * blocksize - remainder;
		if (this_resid > resid) {
			this_resid = resid;
		}
		uio_setresid(uio, this_resid);
		error = VNOP_READ(vp, uio, ioflag, ctx);
		if (error) {
			break;
		}

		/* figure out how much we actually read */
		this_resid -= uio_resid(uio);
		if (this_resid == 0) {
			printf("vn device: vnread_shadow zero length read\n");
			break;
		}
		resid -= this_resid;
		offset += this_resid;
	}
	uio_setresid(uio, resid);
	uio_setoffset(uio, offset);
	return (error);
}
Beispiel #9
0
static int
nread_9p(node_9p *np, uio_t uio)
{
	openfid_9p *op;
	uint32_t n, l, sz;
	char *p;
	int e;

	TRACE();
	op = &np->openfid[OREAD];
	if (op->fid == NOFID)
		op = &np->openfid[ORDWR];
	if (op->fid == NOFID)
		return EBADF;

	sz = np->iounit;
	if (sz == 0)
		sz = np->nmp->msize-IOHDRSZ;

	p = malloc_9p(sz);
	if (p == NULL)
		return ENOMEM;

	e = 0;
	while (uio_resid(uio) > 0) {
		n = MIN(uio_resid(uio), sz);
		if ((e=read_9p(np->nmp, op->fid, p, n, uio_offset(uio), &l)) || l==0)
			break;
		if ((e=uiomove(p, l, uio)))
			break;
	}
	free_9p(p);
	return e;
}
Beispiel #10
0
__private_extern__
int
fuse_internal_readdir(vnode_t                 vp,
                      uio_t                   uio,
                      vfs_context_t           context,
                      struct fuse_filehandle *fufh,
                      struct fuse_iov        *cookediov,
                      int                    *numdirent)
{
    int err = 0;
    struct fuse_dispatcher fdi;
    struct fuse_read_in   *fri;
    struct fuse_data      *data;

    if (uio_resid(uio) == 0) {
        return 0;
    }

    fdisp_init(&fdi, 0);

    /* Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p I/O). */

    while (uio_resid(uio) > 0) {

        fdi.iosize = sizeof(*fri);
        fdisp_make_vp(&fdi, FUSE_READDIR, vp, context);

        fri = fdi.indata;
        fri->fh = fufh->fh_id;
        fri->offset = uio_offset(uio);
        data = fuse_get_mpdata(vnode_mount(vp));
        fri->size = (typeof(fri->size))min((size_t)uio_resid(uio), data->iosize);

        if ((err = fdisp_wait_answ(&fdi))) {
            goto out;
        }

        if ((err = fuse_internal_readdir_processdata(vp,
                                                     uio,
                                                     fri->size,
                                                     fdi.answ,
                                                     fdi.iosize,
                                                     cookediov,
                                                     numdirent))) {
            break;
        }
    }

/* done: */

    fuse_ticket_drop(fdi.tick);

out:
    return ((err == -1) ? 0 : err);
}
Beispiel #11
0
int
fuse_internal_readdir(struct vnode *vp,
    struct uio *uio,
    struct fuse_filehandle *fufh,
    struct fuse_iov *cookediov)
{
	int err = 0;
	struct fuse_dispatcher fdi;
	struct fuse_read_in *fri;

	if (uio_resid(uio) == 0) {
		return 0;
	}
	fdisp_init(&fdi, 0);

	/*
	 * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p
	 * I/O).
	 */

	while (uio_resid(uio) > 0) {

		fdi.iosize = sizeof(*fri);
		fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL);

		fri = fdi.indata;
		fri->fh = fufh->fh_id;
		fri->offset = uio_offset(uio);
		fri->size = min(uio_resid(uio), FUSE_DEFAULT_IOSIZE);
		/* mp->max_read */

		    if ((err = fdisp_wait_answ(&fdi))) {
			break;
		}
		if ((err = fuse_internal_readdir_processdata(uio, fri->size, fdi.answ,
		    fdi.iosize, cookediov))) {
			break;
		}
	}

	fdisp_destroy(&fdi);
	return ((err == -1) ? 0 : err);
}
Beispiel #12
0
static int
nwrite_9p(node_9p *np, uio_t uio)
{
	openfid_9p *op;
	user_ssize_t resid;
	uint32_t l, sz;
	off_t off;
	char *p;
	int n, e;

	TRACE();
	op = &np->openfid[OWRITE];
	if (op->fid == NOFID)
		op = &np->openfid[ORDWR];
	if (op->fid == NOFID)
		return EBADF;

	sz = np->iounit;
	if (sz == 0)
		sz = np->nmp->msize-IOHDRSZ;

	p = malloc_9p(sz);
	if (p == NULL)
		return ENOMEM;

	e = 0;
	while (uio_resid(uio) > 0) {
		l = 0;
		off = uio_offset(uio);
		resid = uio_resid(uio);
		n = MIN(resid, sz);
		if ((e=uiomove(p, n, uio)))
			break;
		if ((e=write_9p(np->nmp, op->fid, p, n, off, &l)))
			break;
		uio_setoffset(uio, off+l);
		uio_setresid(uio, resid-l);
	}
	free_9p(p);
	return e;
}
Beispiel #13
0
static int
vboxvfs_vnode_readdir(struct vnop_readdir_args *args)
{
    vboxvfs_mount_t *pMount;
    vboxvfs_vnode_t *pVnodeData;
    SHFLDIRINFO     *Info;
    uint32_t         cbInfo;
    mount_t          mp;
    vnode_t          vnode;
    struct uio      *uio;

    int rc = 0, rc2;

    PDEBUG("Reading directory...");

    AssertReturn(args,              EINVAL);
    AssertReturn(args->a_eofflag,   EINVAL);
    AssertReturn(args->a_numdirent, EINVAL);

    uio             = args->a_uio;                             AssertReturn(uio,        EINVAL);
    vnode           = args->a_vp;                              AssertReturn(vnode,      EINVAL); AssertReturn(vnode_isdir(vnode), EINVAL);
    pVnodeData      = (vboxvfs_vnode_t *)vnode_fsnode(vnode);  AssertReturn(pVnodeData, EINVAL);
    mp              = vnode_mount(vnode);                      AssertReturn(mp,         EINVAL);
    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);             AssertReturn(pMount,     EINVAL);

    lck_rw_lock_shared(pVnodeData->pLock);

    cbInfo = sizeof(Info) + MAXPATHLEN;
    Info   = (SHFLDIRINFO *)RTMemAllocZ(cbInfo);
    if (!Info)
    {
        PDEBUG("No memory to allocate internal data");
        lck_rw_unlock_shared(pVnodeData->pLock);
        return ENOMEM;
    }

    uint32_t index = (uint32_t)uio_offset(uio) / (uint32_t)sizeof(struct dirent);
    uint32_t cFiles = 0;

    PDEBUG("Exploring VBoxVFS directory (%s), handle (0x%.8X), offset (0x%X), count (%d)", (char *)pVnodeData->pPath->String.utf8, (int)pVnodeData->pHandle, index, uio_iovcnt(uio));

    /* Currently, there is a problem when VbglR0SfDirInfo() is not able to
     * continue retrieve directory content if the same VBoxVFS handle is used.
     * This macro forces to use a new handle in readdir() callback. If enabled,
     * the original handle (obtained in open() callback is ignored). */

    SHFLHANDLE Handle;
    rc = vboxvfs_open_internal(pMount,
                               pVnodeData->pPath,
                               SHFL_CF_DIRECTORY | SHFL_CF_ACCESS_READ | SHFL_CF_ACT_OPEN_IF_EXISTS | SHFL_CF_ACT_FAIL_IF_NEW,
                               &Handle);
    if (rc != 0)
    {
        PDEBUG("Unable to open dir: %d", rc);
        RTMemFree(Info);
        lck_rw_unlock_shared(pVnodeData->pLock);
        return rc;
    }

#if 0
    rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0, 0, index, &cbInfo, (PSHFLDIRINFO)Info, &cFiles);
#else
    SHFLSTRING *pMask = vboxvfs_construct_shflstring("*", strlen("*"));
    if (pMask)
    {
        for (uint32_t cSkip = 0; (cSkip < index + 1) && (rc == VINF_SUCCESS); cSkip++)
        {
            //rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0 /* pMask */, 0 /* SHFL_LIST_RETURN_ONE */, 0, &cbInfo, (PSHFLDIRINFO)Info, &cFiles);

            uint32_t cbReturned = cbInfo;
            //rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, pMask, SHFL_LIST_RETURN_ONE, 0, &cbReturned, (PSHFLDIRINFO)Info, &cFiles);
            rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0, SHFL_LIST_RETURN_ONE, 0,
                                 &cbReturned, (PSHFLDIRINFO)Info, &cFiles);

        }

        PDEBUG("read %d files", cFiles);
        RTMemFree(pMask);
    }
    else
    {
        PDEBUG("Can't alloc mask");
        rc = ENOMEM;
    }
#endif
    rc2 = vboxvfs_close_internal(pMount, Handle);
    if (rc2 != 0)
    {
        PDEBUG("Unable to close directory: %s: %d",
               pVnodeData->pPath->String.utf8,
               rc2);
    }

    switch (rc)
    {
        case VINF_SUCCESS:
        {
            rc = vboxvfs_vnode_readdir_copy_data((ino_t)(index + 1), Info, uio, args->a_numdirent);
            break;
        }

        case VERR_NO_MORE_FILES:
        {
            PDEBUG("No more entries in directory");
            *(args->a_eofflag) = 1;
            break;
        }

        default:
        {
            PDEBUG("VbglR0SfDirInfo() for item #%d has failed: %d", (int)index, (int)rc);
            rc = EINVAL;
            break;
        }
    }

    RTMemFree(Info);
    lck_rw_unlock_shared(pVnodeData->pLock);

    return rc;
}
Beispiel #14
0
static int 
vnwrite(dev_t dev, struct uio *uio, int ioflag)
{
	struct vfs_context  	context;
	int 			error;
	off_t			offset;
	proc_t			p;
	user_ssize_t		resid;
	struct vn_softc *	vn;
	int 			unit;

	unit = vnunit(dev);
	if (vnunit(dev) >= NVNDEVICE) {
		return (ENXIO);
	}
	p = current_proc();
	vn = vn_table + unit;
	if ((vn->sc_flags & VNF_INITED) == 0) {
		error = ENXIO;
		goto done;
	}
	if (vn->sc_flags & VNF_READONLY) {
		error = EROFS;
		goto done;
	}

	context.vc_thread = current_thread();
	context.vc_ucred = vn->sc_cred;

	error = vnode_getwithvid(vn->sc_vp, vn->sc_vid);
	if (error != 0) {
		/* the vnode is no longer available, abort */
		error = ENXIO;
		vnclear(vn, &context);
		goto done;
	}
	resid = uio_resid(uio);
	offset = uio_offset(uio);

	/*
	 * If out of bounds return an error.  If at the EOF point,
	 * simply write less.
	 */
	if (offset >= (off_t)vn->sc_fsize) {
		if (offset > (off_t)vn->sc_fsize) {
			error = EINVAL;
		}
		goto done;
	}
	/*
	 * If the request crosses EOF, truncate the request.
	 */
	if ((offset + resid) > (off_t)vn->sc_fsize) {
		resid = (off_t)vn->sc_fsize - offset;
		uio_setresid(uio, resid);
	}

	if (vn->sc_shadow_vp != NULL) {
		error = vnode_getwithvid(vn->sc_shadow_vp,
					 vn->sc_shadow_vid);
		if (error != 0) {
			/* the vnode is no longer available, abort */
			error = ENXIO;
			vnode_put(vn->sc_vp);
			vnclear(vn, &context);
			goto done;
		}
		error = vnwrite_shadow(vn, uio, ioflag, &context);
		vnode_put(vn->sc_shadow_vp);
	} else {
		error = VNOP_WRITE(vn->sc_vp, uio, ioflag, &context);
	}
	vnode_put(vn->sc_vp);
 done:
	return (error);
}
Beispiel #15
0
static int
vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, 
	       vfs_context_t ctx)
{
	u_int32_t		blocksize = vn->sc_secsize;
	int 		error = 0;
	user_ssize_t	resid;
	off_t		offset;

	resid = uio_resid(uio);
	offset = uio_offset(uio);

	while (resid > 0) {
		int		flags = 0;
		u_int32_t		offset_block_number;
		u_int32_t		remainder;
		u_int32_t		resid_block_count;
		u_int32_t		shadow_block_count;
		u_int32_t		shadow_block_number;
		user_ssize_t	this_resid;

		/* figure out which blocks to write */
		offset_block_number = block_truncate(offset, blocksize);
		remainder = block_remainder(offset, blocksize);
		resid_block_count = block_round(resid + remainder, blocksize);
		/* figure out if the first or last blocks are partial writes */
		if (remainder > 0
		    && !shadow_map_is_written(vn->sc_shadow_map,
					      offset_block_number)) {
			/* the first block is a partial write */
			flags |= FLAGS_FIRST_BLOCK_PARTIAL;
		}
		if (resid_block_count > 1
		    && !shadow_map_is_written(vn->sc_shadow_map,
					      offset_block_number
					      + resid_block_count - 1)
		    && block_remainder(offset + resid, blocksize) > 0) {
			/* the last block is a partial write */
			flags |= FLAGS_LAST_BLOCK_PARTIAL;
		}
		if (shadow_map_write(vn->sc_shadow_map,
				     offset_block_number, resid_block_count,
				     &shadow_block_number, 
				     &shadow_block_count)) {
			/* shadow file is growing */
#if 0
			/* truncate the file to its new length before write */
			off_t	size;
			size = (off_t)shadow_map_shadow_size(vn->sc_shadow_map) 
				* vn->sc_secsize;
			vnode_setsize(vn->sc_shadow_vp, size, IO_SYNC, ctx);
#endif
		}
		/* write the blocks (or parts thereof) */
		uio_setoffset(uio, (off_t)
			      shadow_block_number * blocksize + remainder);
		this_resid = (off_t)shadow_block_count * blocksize - remainder;
		if (this_resid >= resid) {
			this_resid = resid;
			if ((flags & FLAGS_LAST_BLOCK_PARTIAL) != 0) {
				/* copy the last block to the shadow */
				u_int32_t 	d;
				u_int32_t	s;

				s = offset_block_number 
					+ resid_block_count - 1;
				d = shadow_block_number 
					+ shadow_block_count - 1;
				error = vncopy_block_to_shadow(vn, ctx, s, d);
				if (error) {
					printf("vnwrite_shadow: failed to copy"
					       " block %u to shadow block %u\n",
					       s, d);
					break;
				}
			}
		}
		uio_setresid(uio, this_resid);
		if ((flags & FLAGS_FIRST_BLOCK_PARTIAL) != 0) {
			/* copy the first block to the shadow */
			error = vncopy_block_to_shadow(vn, ctx,
						       offset_block_number,
						       shadow_block_number);
			if (error) {
				printf("vnwrite_shadow: failed to"
				       " copy block %u to shadow block %u\n", 
				       offset_block_number, 
				       shadow_block_number);
				break;
			}
		}
		error = VNOP_WRITE(vn->sc_shadow_vp, uio, ioflag, ctx);
		if (error) {
			break;
		}
		/* figure out how much we actually wrote */
		this_resid -= uio_resid(uio);
		if (this_resid == 0) {
			printf("vn device: vnwrite_shadow zero length write\n");
			break;
		}
		resid -= this_resid;
		offset += this_resid;
	}
	uio_setresid(uio, resid);
	uio_setoffset(uio, offset);
	return (error);
}
Beispiel #16
0
int
physio( void (*f_strategy)(buf_t), 
	buf_t bp,
	dev_t dev,
	int flags,
	u_int (*f_minphys)(buf_t),
	struct uio *uio,
	int blocksize)
{
	struct proc *p = current_proc();
	int error, i, buf_allocated, todo, iosize;
	int orig_bflags = 0;
	int64_t done;

	error = 0;
	flags &= B_READ | B_WRITE;
	buf_allocated = 0;

	/*
	 * [check user read/write access to the data buffer]
	 *
	 * Check each iov one by one.  Note that we know if we're reading or
	 * writing, so we ignore the uio's rw parameter.  Also note that if
	 * we're doing a read, that's a *write* to user-space.
	 */
	for (i = 0; i < uio->uio_iovcnt; i++) {
		if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
			user_addr_t base;
			user_size_t len;
			
			if (uio_getiov(uio, i, &base, &len) ||
				!useracc(base,
					len,
		    		(flags == B_READ) ? B_WRITE : B_READ))
			return (EFAULT);
		}
	}
	/*
	 * Make sure we have a buffer, creating one if necessary.
	 */
	if (bp == NULL) {
		bp = buf_alloc((vnode_t)0);
		buf_allocated = 1;
	} else
	        orig_bflags = buf_flags(bp);
	/*
	 * at this point we should have a buffer
	 * that is marked BL_BUSY... we either 
	 * acquired it via buf_alloc, or it was
	 * passed into us... if it was passed
	 * in, it needs to already be owned by
	 * the caller (i.e. BL_BUSY is set)
	 */
	assert(bp->b_lflags & BL_BUSY);

	/*
	 * [set up the fixed part of the buffer for a transfer]
	 */
	bp->b_dev = dev;
	bp->b_proc = p;

	/*
	 * [mark the buffer busy for physical I/O]
	 * (i.e. set B_PHYS (because it's an I/O to user
	 * memory, and B_RAW, because B_RAW is to be
	 * "Set by physio for raw transfers.", in addition
	 * to the read/write flag.)
	 */
        buf_setflags(bp, B_PHYS | B_RAW);

	/*
	 * [while there is data to transfer and no I/O error]
	 * Note that I/O errors are handled with a 'goto' at the bottom
	 * of the 'while' loop.
	 */
	while (uio_resid(uio) > 0) {
			
			if ( (iosize = uio_curriovlen(uio)) > MAXPHYSIO_WIRED)
			        iosize = MAXPHYSIO_WIRED;
			/*
			 * make sure we're set to issue a fresh I/O
			 * in the right direction
			 */
			buf_reset(bp, flags);

			/* [set up the buffer for a maximum-sized transfer] */
 			buf_setblkno(bp, uio_offset(uio) / blocksize);
			buf_setcount(bp, iosize);
			buf_setdataptr(bp, (uintptr_t)CAST_DOWN(caddr_t, uio_curriovbase(uio)));
			
			/*
			 * [call f_minphys to bound the tranfer size]
			 * and remember the amount of data to transfer,
			 * for later comparison.
			 */
			(*f_minphys)(bp);
			todo = buf_count(bp);

			/*
			 * [lock the part of the user address space involved
			 *    in the transfer]
			 */

			if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
				error = vslock(CAST_USER_ADDR_T(buf_dataptr(bp)),
					       (user_size_t)todo);
				if (error)
					goto done;
			}
			
			/* [call f_strategy to start the transfer] */
			(*f_strategy)(bp);


			/* [wait for the transfer to complete] */
			error = (int)buf_biowait(bp);

			/*
			 * [unlock the part of the address space previously
			 *    locked]
			 */
			if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg))
				vsunlock(CAST_USER_ADDR_T(buf_dataptr(bp)),
					 (user_size_t)todo,
					 (flags & B_READ));

			/*
			 * [deduct the transfer size from the total number
			 *    of data to transfer]
			 */
			done = buf_count(bp) - buf_resid(bp);
			uio_update(uio, done);

			/*
			 * Now, check for an error.
			 * Also, handle weird end-of-disk semantics.
			 */
			if (error || done < todo)
				goto done;
	}

done:
	if (buf_allocated)
	        buf_free(bp);
	else
		buf_setflags(bp, orig_bflags);

	return (error);
}
Beispiel #17
0
static int
nullfs_special_readdir(struct vnop_readdir_args * ap)
{
	struct vnode * vp           = ap->a_vp;
	struct uio * uio            = ap->a_uio;
	struct null_mount * null_mp = MOUNTTONULLMOUNT(vnode_mount(vp));
	off_t offset                = uio_offset(uio);
	int error                   = ERANGE;
	int items                   = 0;
	ino_t ino                   = 0;
	const char * name           = NULL;

	if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF))
		return (EINVAL);

	if (offset == 0) {
		/* . case */
		if (vp == null_mp->nullm_rootvp) {
			ino = NULL_ROOT_INO;
		} else /* only get here if vp matches nullm_rootvp or nullm_secondvp */
		{
			ino = NULL_SECOND_INO;
		}
		error = store_entry_special(ino, ".", uio);
		if (error) {
			goto out;
		}
		offset++;
		items++;
	}
	if (offset == 1) {
		/* .. case */
		/* only get here if vp matches nullm_rootvp or nullm_secondvp */
		ino = NULL_ROOT_INO;

		error = store_entry_special(ino, "..", uio);
		if (error) {
			goto out;
		}
		offset++;
		items++;
	}
	if (offset == 2) {
		/* the directory case */
		if (vp == null_mp->nullm_rootvp) {
			ino  = NULL_SECOND_INO;
			name = "d";
		} else /* only get here if vp matches nullm_rootvp or nullm_secondvp */
		{
			ino = NULL_THIRD_INO;
			if (vnode_getwithvid(null_mp->nullm_lowerrootvp, null_mp->nullm_lowerrootvid)) {
				/* In this case the lower file system has been ripped out from under us,
				   but we don't want to error out
				   Instead we just want d to look empty. */
				error = 0;
				goto out;
			}
			name = vnode_getname_printable(null_mp->nullm_lowerrootvp);
		}
		error = store_entry_special(ino, name, uio);

		if (ino == NULL_THIRD_INO) {
			vnode_putname_printable(name);
			vnode_put(null_mp->nullm_lowerrootvp);
		}

		if (error) {
			goto out;
		}
		offset++;
		items++;
	}

out:
	if (error == EMSGSIZE) {
		error = 0; /* return success if we ran out of space, but we wanted to make
		              sure that we didn't update offset and items incorrectly */
	}
	uio_setoffset(uio, offset);
	if (ap->a_numdirent) {
		*ap->a_numdirent = items;
	}
	return error;
}
Beispiel #18
0
static int
vnop_readdir_9p(struct vnop_readdir_args *ap)
{
	struct direntry de64;
	struct dirent de32;
	vnode_t vp;
	node_9p *np;
	dir_9p *dp;
	fid_9p fid;
	off_t off;
	uio_t uio;
	uint32_t i, nd, nlen, plen;
	void *p;
	int e;
	
	TRACE();
	vp = ap->a_vp;
	uio = ap->a_uio;
	np = NTO9P(vp);

	if (!vnode_isdir(vp))
		return ENOTDIR;

	if (ISSET(ap->a_flags, VNODE_READDIR_REQSEEKOFF))
		return EINVAL;

	off = uio_offset(uio);
	if (off < 0)
		return EINVAL;
	
	if (uio_resid(uio) == 0)
		return 0;

	e = 0;
	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	fid = np->openfid[OREAD].fid;
	if (fid == NOFID) {
		e = EBADF;
		goto error;
	}

	if (ap->a_eofflag)
		ap->a_eofflag = 0;

	if (off == 0 || np->direntries==NULL) {
		if((e=readdirs_9p(np->nmp, fid, &np->direntries, &np->ndirentries)))
			goto error;
		if (np->ndirentries && np->direntries==NULL)
			panic("bug in readdir");
	}
	
	dp = np->direntries;
	nd = np->ndirentries;
	for (i=off; i<nd; i++) {
		if (ISSET(ap->a_flags, VNODE_READDIR_EXTENDED)) {
			bzero(&de64, sizeof(de64));
			de64.d_ino = QTOI(dp[i].qid);
			de64.d_type = dp[i].mode&DMDIR? DT_DIR: DT_REG;
			nlen = strlen(dp[i].name);
			de64.d_namlen = MIN(nlen, sizeof(de64.d_name)-1);
			bcopy(dp[i].name, de64.d_name, de64.d_namlen);
			de64.d_reclen = DIRENT64_LEN(de64.d_namlen);
			plen = de64.d_reclen;
			p = &de64;
		} else {
			bzero(&de32, sizeof(de32));
			de32.d_ino = QTOI(dp[i].qid);
			de32.d_type = dp[i].mode&DMDIR? DT_DIR: DT_REG;
			nlen = strlen(dp[i].name);
			de32.d_namlen = MIN(nlen, sizeof(de32.d_name)-1);
			bcopy(dp[i].name, de32.d_name, de32.d_namlen);
			de32.d_reclen = DIRENT32_LEN(de32.d_namlen);
			plen = de32.d_reclen;
			p = &de32;
		}

		if (uio_resid(uio) < plen)
			break;

		if ((e=uiomove(p, plen, uio)))
			goto error;
	}

	uio_setoffset(uio, i);
	if (ap->a_numdirent)
		*ap->a_numdirent = i - off;
	if (i==nd && ap->a_eofflag) {
		*ap->a_eofflag = 1;
		free_9p(np->direntries);
		np->direntries = NULL;
		np->ndirentries = 0;
	}

error:
	nunlock_9p(np);
	return e;
}