示例#1
0
/*
 * Give next character to user as result of read.
 */
int
ureadc(int c, struct uio *uio)
{
	if (uio_resid(uio) <= 0)
		panic("ureadc: non-positive resid");
	uio_update(uio, 0);
	if (uio->uio_iovcnt == 0)
		panic("ureadc: non-positive iovcnt");
	if (uio_curriovlen(uio) <= 0)
		panic("ureadc: non-positive iovlen");

	switch ((int) uio->uio_segflg) {

	case UIO_USERSPACE32:
	case UIO_USERSPACE:
	case UIO_USERISPACE32:
	case UIO_USERISPACE:
	case UIO_USERSPACE64:
	case UIO_USERISPACE64:
		if (subyte((user_addr_t)uio->uio_iovs.uiovp->iov_base, c) < 0)
			return (EFAULT);
		break;

	case UIO_SYSSPACE32:
	case UIO_SYSSPACE:
		*(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base)) = c;
		break;

	default:
		break;
	}
	uio_update(uio, 1);
	return (0);
}
示例#2
0
// This function uses as many pmem_partial_read() calls as necessary,
// to copy uio->resid bytes of physical memory from the physical address, as
// specified in uio->offset to the buffer in the uio.
static kern_return_t pmem_read_memory(struct uio *uio) {
    size_t read_bytes = 0;

    while (uio_resid(uio) > 0) {
        uio_update(uio, 0);
        // Try to read as many times as necessary until the uio is full.
        read_bytes = pmem_partial_read(uio, uio_offset(uio),
                                       uio_offset(uio) + uio_curriovlen(uio));
        uio_update(uio, read_bytes);
    }
    return KERN_SUCCESS;
}
示例#3
0
文件: vnops.c 项目: aredridel/mac9p
static int
vnop_strategy_9p(struct vnop_strategy_args *ap)
{
	mount_t mp;
	struct buf *bp;
	node_9p *np;
	caddr_t addr;
	uio_t uio;
	int e, flags;

	TRACE();
	bp = ap->a_bp;
	np = NTO9P(buf_vnode(bp));
	flags = buf_flags(bp);
	uio = NULL;
	addr = NULL;

	mp = vnode_mount(buf_vnode(bp));
	if (mp == NULL)
		return ENXIO;

	if ((e=buf_map(bp, &addr)))
		goto error;

	uio = uio_create(1, buf_blkno(bp) * vfs_statfs(mp)->f_bsize, UIO_SYSSPACE,
					 ISSET(flags, B_READ)? UIO_READ: UIO_WRITE);
	if (uio == NULL) {
		e = ENOMEM;
		goto error;
	}
	
	uio_addiov(uio, CAST_USER_ADDR_T(addr), buf_count(bp));
	if (ISSET(flags, B_READ)) {
		if((e=nread_9p(np, uio)))
			goto error;
		/* zero the rest of the page if we reached EOF */
		if (uio_resid(uio) > 0) {
			bzero(addr+buf_count(bp)-uio_resid(uio), uio_resid(uio));
			uio_update(uio, uio_resid(uio));
		}
	} else {
		if ((e=nwrite_9p(np, uio)))
			goto error;
	}
	buf_setresid(bp, uio_resid(uio));
error:
	if (uio)
		uio_free(uio);
	if (addr)
		buf_unmap(bp);
	buf_seterror(bp, e);
	buf_biodone(bp);
	return e;
}
示例#4
0
/*
 * Returns:	0			Success
 *		EFAULT
 *	copyout:EFAULT
 *	copyin:EFAULT
 *	copywithin:EFAULT
 *	copypv:EFAULT
 */
int
uiomove64(const addr64_t c_cp, int n, struct uio *uio)
{
	addr64_t cp = c_cp;
	uint64_t acnt;
	int error = 0;

#if DIAGNOSTIC
	if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
		panic("uiomove: mode");
#endif

#if LP64_DEBUG
	if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
		panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); 
	}
#endif /* LP64_DEBUG */

	while (n > 0 && uio_resid(uio)) {
		uio_update(uio, 0);
		acnt = uio_curriovlen(uio);
		if (acnt == 0) {
			continue;
		}
		if (n > 0 && acnt > (uint64_t)n)
			acnt = n;

		switch ((int) uio->uio_segflg) {

		case UIO_USERSPACE64:
		case UIO_USERISPACE64:
		case UIO_USERSPACE32:
		case UIO_USERISPACE32:
		case UIO_USERSPACE:
		case UIO_USERISPACE:
			// LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0,0);

					error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.uiovp->iov_base, acnt );

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0,0);

			        error = copyin(uio->uio_iovs.uiovp->iov_base, CAST_DOWN(caddr_t, cp), acnt);

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_SYSSPACE32:
		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				error = copywithin(CAST_DOWN(caddr_t, cp), CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base),
						   acnt);
			else
				error = copywithin(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base), CAST_DOWN(caddr_t, cp),
						   acnt);
			break;

		case UIO_PHYS_USERSPACE64:
		case UIO_PHYS_USERSPACE32:
		case UIO_PHYS_USERSPACE:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1,0);

				error = copypv((addr64_t)cp, uio->uio_iovs.uiovp->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
				if (error) 	/* Copy physical to virtual */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1,0);

				error = copypv(uio->uio_iovs.uiovp->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
				if (error)	/* Copy virtual to physical */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_PHYS_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2,0);

					error = copypv((addr64_t)cp, uio->uio_iovs.kiovp->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
				if (error) 	/* Copy physical to virtual */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2,0);

					error = copypv(uio->uio_iovs.kiovp->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
				if (error)	/* Copy virtual to physical */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2,0);
			  }
			if (error)
				return (error);
			break;

		default:
			break;
		}
		uio_update(uio, acnt);
		cp += acnt;
		n -= acnt;
	}
	return (error);
}
示例#5
0
int
physio( void (*f_strategy)(buf_t), 
	buf_t bp,
	dev_t dev,
	int flags,
	u_int (*f_minphys)(buf_t),
	struct uio *uio,
	int blocksize)
{
	struct proc *p = current_proc();
	int error, i, buf_allocated, todo, iosize;
	int orig_bflags = 0;
	int64_t done;

	error = 0;
	flags &= B_READ | B_WRITE;
	buf_allocated = 0;

	/*
	 * [check user read/write access to the data buffer]
	 *
	 * Check each iov one by one.  Note that we know if we're reading or
	 * writing, so we ignore the uio's rw parameter.  Also note that if
	 * we're doing a read, that's a *write* to user-space.
	 */
	for (i = 0; i < uio->uio_iovcnt; i++) {
		if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
			user_addr_t base;
			user_size_t len;
			
			if (uio_getiov(uio, i, &base, &len) ||
				!useracc(base,
					len,
		    		(flags == B_READ) ? B_WRITE : B_READ))
			return (EFAULT);
		}
	}
	/*
	 * Make sure we have a buffer, creating one if necessary.
	 */
	if (bp == NULL) {
		bp = buf_alloc((vnode_t)0);
		buf_allocated = 1;
	} else
	        orig_bflags = buf_flags(bp);
	/*
	 * at this point we should have a buffer
	 * that is marked BL_BUSY... we either 
	 * acquired it via buf_alloc, or it was
	 * passed into us... if it was passed
	 * in, it needs to already be owned by
	 * the caller (i.e. BL_BUSY is set)
	 */
	assert(bp->b_lflags & BL_BUSY);

	/*
	 * [set up the fixed part of the buffer for a transfer]
	 */
	bp->b_dev = dev;
	bp->b_proc = p;

	/*
	 * [mark the buffer busy for physical I/O]
	 * (i.e. set B_PHYS (because it's an I/O to user
	 * memory, and B_RAW, because B_RAW is to be
	 * "Set by physio for raw transfers.", in addition
	 * to the read/write flag.)
	 */
        buf_setflags(bp, B_PHYS | B_RAW);

	/*
	 * [while there is data to transfer and no I/O error]
	 * Note that I/O errors are handled with a 'goto' at the bottom
	 * of the 'while' loop.
	 */
	while (uio_resid(uio) > 0) {
			
			if ( (iosize = uio_curriovlen(uio)) > MAXPHYSIO_WIRED)
			        iosize = MAXPHYSIO_WIRED;
			/*
			 * make sure we're set to issue a fresh I/O
			 * in the right direction
			 */
			buf_reset(bp, flags);

			/* [set up the buffer for a maximum-sized transfer] */
 			buf_setblkno(bp, uio_offset(uio) / blocksize);
			buf_setcount(bp, iosize);
			buf_setdataptr(bp, (uintptr_t)CAST_DOWN(caddr_t, uio_curriovbase(uio)));
			
			/*
			 * [call f_minphys to bound the tranfer size]
			 * and remember the amount of data to transfer,
			 * for later comparison.
			 */
			(*f_minphys)(bp);
			todo = buf_count(bp);

			/*
			 * [lock the part of the user address space involved
			 *    in the transfer]
			 */

			if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) {
				error = vslock(CAST_USER_ADDR_T(buf_dataptr(bp)),
					       (user_size_t)todo);
				if (error)
					goto done;
			}
			
			/* [call f_strategy to start the transfer] */
			(*f_strategy)(bp);


			/* [wait for the transfer to complete] */
			error = (int)buf_biowait(bp);

			/*
			 * [unlock the part of the address space previously
			 *    locked]
			 */
			if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg))
				vsunlock(CAST_USER_ADDR_T(buf_dataptr(bp)),
					 (user_size_t)todo,
					 (flags & B_READ));

			/*
			 * [deduct the transfer size from the total number
			 *    of data to transfer]
			 */
			done = buf_count(bp) - buf_resid(bp);
			uio_update(uio, done);

			/*
			 * Now, check for an error.
			 * Also, handle weird end-of-disk semantics.
			 */
			if (error || done < todo)
				goto done;
	}

done:
	if (buf_allocated)
	        buf_free(bp);
	else
		buf_setflags(bp, orig_bflags);

	return (error);
}