Пример #1
0
static int
vnread_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, 
	      vfs_context_t ctx)
{
	u_int32_t		blocksize = vn->sc_secsize;
	int 		error = 0;
	off_t		offset;
	user_ssize_t	resid;
	off_t		orig_offset;
	user_ssize_t	orig_resid;

	orig_resid = resid = uio_resid(uio);
	orig_offset = offset = uio_offset(uio);

	while (resid > 0) {
		u_int32_t		remainder;
		u_int32_t		this_block_number;
		u_int32_t		this_block_count;
		off_t		this_offset;
		user_ssize_t	this_resid;
		struct vnode *	vp;

		/* figure out which blocks to read */
		remainder = block_remainder(offset, blocksize);
		if (shadow_map_read(vn->sc_shadow_map,
				    block_truncate(offset, blocksize),
				    block_round(resid + remainder, blocksize),
				    &this_block_number, &this_block_count)) {
			vp = vn->sc_shadow_vp;
		}
		else {
			vp = vn->sc_vp;
		}

		/* read the blocks (or parts thereof) */
		this_offset = (off_t)this_block_number * blocksize + remainder;
		uio_setoffset(uio, this_offset);
		this_resid = this_block_count * blocksize - remainder;
		if (this_resid > resid) {
			this_resid = resid;
		}
		uio_setresid(uio, this_resid);
		error = VNOP_READ(vp, uio, ioflag, ctx);
		if (error) {
			break;
		}

		/* figure out how much we actually read */
		this_resid -= uio_resid(uio);
		if (this_resid == 0) {
			printf("vn device: vnread_shadow zero length read\n");
			break;
		}
		resid -= this_resid;
		offset += this_resid;
	}
	uio_setresid(uio, resid);
	uio_setoffset(uio, offset);
	return (error);
}
Пример #2
0
/*
 * Get next character written in by user from uio.
 */
int
uwritec(uio_t uio)
{
	int c = 0;

	if (uio_resid(uio) <= 0)
		return (-1);
again:
	if (uio->uio_iovcnt <= 0)
		panic("uwritec: non-positive iovcnt");

	if (uio_iov_len(uio) == 0) {
		uio_next_iov(uio);
		if (--uio->uio_iovcnt == 0)
			return (-1);
		goto again;
	}
	switch (uio->uio_segflg) {

	case UIO_USERSPACE32:
	case UIO_USERSPACE:
		c = fubyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base));
		break;

	case UIO_USERSPACE64:
		c = fubyte((user_addr_t)uio->uio_iovs.iov64p->iov_base); 
		break;

	case UIO_SYSSPACE32:
	case UIO_SYSSPACE:
		c = *((caddr_t)uio->uio_iovs.iov32p->iov_base) & 0377;
		break;

	case UIO_USERISPACE32:
	case UIO_USERISPACE:
		c = fuibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base));
		break;

	default:
		c = 0;	/* avoid uninitialized variable warning */
		panic("uwritec: bogus uio_segflg");
		break;
	}
	if (c < 0)
		return (-1);
	uio_iov_base_add(uio, 1);
	uio_iov_len_add(uio, -1);
	uio_setresid(uio, (uio_resid(uio) - 1));
	uio->uio_offset++;
	return (c);
}
Пример #3
0
/*
 * Give next character to user as result of read.
 */
int
ureadc(int c, struct uio *uio)
{
	if (uio_resid(uio) <= 0)
		panic("ureadc: non-positive resid");
again:
	if (uio->uio_iovcnt == 0)
		panic("ureadc: non-positive iovcnt");
	if (uio_iov_len(uio) <= 0) {
		uio->uio_iovcnt--;
		uio_next_iov(uio);
		goto again;
	}
	switch (uio->uio_segflg) {

	case UIO_USERSPACE32:
	case UIO_USERSPACE:
		if (subyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0)
			return (EFAULT);
		break;

	case UIO_USERSPACE64:
		if (subyte((user_addr_t)uio->uio_iovs.iov64p->iov_base, c) < 0)
			return (EFAULT);
		break;

	case UIO_SYSSPACE32:
	case UIO_SYSSPACE:
		*((caddr_t)uio->uio_iovs.iov32p->iov_base) = c;
		break;

	case UIO_USERISPACE32:
	case UIO_USERISPACE:
		if (suibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0)
			return (EFAULT);
		break;

	default:
		break;
	}
	uio_iov_base_add(uio, 1);
	uio_iov_len_add(uio, -1);
	uio_setresid(uio, (uio_resid(uio) - 1));
	uio->uio_offset++;
	return (0);
}
Пример #4
0
int st_readwrite(dev_t dev, struct uio *uio, int ioflag)
{
	IOSCSITape			*st			= IOSCSITape::devices[minor(dev)];
	IOMemoryDescriptor	*dataBuffer	= IOMemoryDescriptorFromUIO(uio);
	int					status		= ENOSYS;
	IOReturn			opStatus	= kIOReturnError;
	int					lastRealizedBytes = 0;
	
	if (dataBuffer == 0)
		return ENOMEM;
	
	dataBuffer->prepare();
	
	opStatus = st->ReadWrite(dataBuffer, &lastRealizedBytes);
	
	dataBuffer->complete();
	dataBuffer->release();
	
	if (opStatus == kIOReturnSuccess)
	{
		uio_setresid(uio, uio_resid(uio) - lastRealizedBytes);
		
		if (st->blkno != -1)
		{
			if (st->IsFixedBlockSize())
				st->blkno += (lastRealizedBytes / st->blksize);
			else
				st->blkno++;
		}

		status = KERN_SUCCESS;
	}
	else if (st->sense_flags & SENSE_FILEMARK)
	{
		if (st->fileno != -1)
		{
			st->fileno++;
			st->blkno = 0;
		}
		
		status = KERN_SUCCESS;
	}
	
	return status;
}
Пример #5
0
static int
nwrite_9p(node_9p *np, uio_t uio)
{
	openfid_9p *op;
	user_ssize_t resid;
	uint32_t l, sz;
	off_t off;
	char *p;
	int n, e;

	TRACE();
	op = &np->openfid[OWRITE];
	if (op->fid == NOFID)
		op = &np->openfid[ORDWR];
	if (op->fid == NOFID)
		return EBADF;

	sz = np->iounit;
	if (sz == 0)
		sz = np->nmp->msize-IOHDRSZ;

	p = malloc_9p(sz);
	if (p == NULL)
		return ENOMEM;

	e = 0;
	while (uio_resid(uio) > 0) {
		l = 0;
		off = uio_offset(uio);
		resid = uio_resid(uio);
		n = MIN(resid, sz);
		if ((e=uiomove(p, n, uio)))
			break;
		if ((e=write_9p(np->nmp, op->fid, p, n, off, &l)))
			break;
		uio_setoffset(uio, off+l);
		uio_setresid(uio, resid-l);
	}
	free_9p(p);
	return e;
}
Пример #6
0
static int 
vnwrite(dev_t dev, struct uio *uio, int ioflag)
{
	struct vfs_context  	context;
	int 			error;
	off_t			offset;
	proc_t			p;
	user_ssize_t		resid;
	struct vn_softc *	vn;
	int 			unit;

	unit = vnunit(dev);
	if (vnunit(dev) >= NVNDEVICE) {
		return (ENXIO);
	}
	p = current_proc();
	vn = vn_table + unit;
	if ((vn->sc_flags & VNF_INITED) == 0) {
		error = ENXIO;
		goto done;
	}
	if (vn->sc_flags & VNF_READONLY) {
		error = EROFS;
		goto done;
	}

	context.vc_thread = current_thread();
	context.vc_ucred = vn->sc_cred;

	error = vnode_getwithvid(vn->sc_vp, vn->sc_vid);
	if (error != 0) {
		/* the vnode is no longer available, abort */
		error = ENXIO;
		vnclear(vn, &context);
		goto done;
	}
	resid = uio_resid(uio);
	offset = uio_offset(uio);

	/*
	 * If out of bounds return an error.  If at the EOF point,
	 * simply write less.
	 */
	if (offset >= (off_t)vn->sc_fsize) {
		if (offset > (off_t)vn->sc_fsize) {
			error = EINVAL;
		}
		goto done;
	}
	/*
	 * If the request crosses EOF, truncate the request.
	 */
	if ((offset + resid) > (off_t)vn->sc_fsize) {
		resid = (off_t)vn->sc_fsize - offset;
		uio_setresid(uio, resid);
	}

	if (vn->sc_shadow_vp != NULL) {
		error = vnode_getwithvid(vn->sc_shadow_vp,
					 vn->sc_shadow_vid);
		if (error != 0) {
			/* the vnode is no longer available, abort */
			error = ENXIO;
			vnode_put(vn->sc_vp);
			vnclear(vn, &context);
			goto done;
		}
		error = vnwrite_shadow(vn, uio, ioflag, &context);
		vnode_put(vn->sc_shadow_vp);
	} else {
		error = VNOP_WRITE(vn->sc_vp, uio, ioflag, &context);
	}
	vnode_put(vn->sc_vp);
 done:
	return (error);
}
Пример #7
0
static int
vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, 
	       vfs_context_t ctx)
{
	u_int32_t		blocksize = vn->sc_secsize;
	int 		error = 0;
	user_ssize_t	resid;
	off_t		offset;

	resid = uio_resid(uio);
	offset = uio_offset(uio);

	while (resid > 0) {
		int		flags = 0;
		u_int32_t		offset_block_number;
		u_int32_t		remainder;
		u_int32_t		resid_block_count;
		u_int32_t		shadow_block_count;
		u_int32_t		shadow_block_number;
		user_ssize_t	this_resid;

		/* figure out which blocks to write */
		offset_block_number = block_truncate(offset, blocksize);
		remainder = block_remainder(offset, blocksize);
		resid_block_count = block_round(resid + remainder, blocksize);
		/* figure out if the first or last blocks are partial writes */
		if (remainder > 0
		    && !shadow_map_is_written(vn->sc_shadow_map,
					      offset_block_number)) {
			/* the first block is a partial write */
			flags |= FLAGS_FIRST_BLOCK_PARTIAL;
		}
		if (resid_block_count > 1
		    && !shadow_map_is_written(vn->sc_shadow_map,
					      offset_block_number
					      + resid_block_count - 1)
		    && block_remainder(offset + resid, blocksize) > 0) {
			/* the last block is a partial write */
			flags |= FLAGS_LAST_BLOCK_PARTIAL;
		}
		if (shadow_map_write(vn->sc_shadow_map,
				     offset_block_number, resid_block_count,
				     &shadow_block_number, 
				     &shadow_block_count)) {
			/* shadow file is growing */
#if 0
			/* truncate the file to its new length before write */
			off_t	size;
			size = (off_t)shadow_map_shadow_size(vn->sc_shadow_map) 
				* vn->sc_secsize;
			vnode_setsize(vn->sc_shadow_vp, size, IO_SYNC, ctx);
#endif
		}
		/* write the blocks (or parts thereof) */
		uio_setoffset(uio, (off_t)
			      shadow_block_number * blocksize + remainder);
		this_resid = (off_t)shadow_block_count * blocksize - remainder;
		if (this_resid >= resid) {
			this_resid = resid;
			if ((flags & FLAGS_LAST_BLOCK_PARTIAL) != 0) {
				/* copy the last block to the shadow */
				u_int32_t 	d;
				u_int32_t	s;

				s = offset_block_number 
					+ resid_block_count - 1;
				d = shadow_block_number 
					+ shadow_block_count - 1;
				error = vncopy_block_to_shadow(vn, ctx, s, d);
				if (error) {
					printf("vnwrite_shadow: failed to copy"
					       " block %u to shadow block %u\n",
					       s, d);
					break;
				}
			}
		}
		uio_setresid(uio, this_resid);
		if ((flags & FLAGS_FIRST_BLOCK_PARTIAL) != 0) {
			/* copy the first block to the shadow */
			error = vncopy_block_to_shadow(vn, ctx,
						       offset_block_number,
						       shadow_block_number);
			if (error) {
				printf("vnwrite_shadow: failed to"
				       " copy block %u to shadow block %u\n", 
				       offset_block_number, 
				       shadow_block_number);
				break;
			}
		}
		error = VNOP_WRITE(vn->sc_shadow_vp, uio, ioflag, ctx);
		if (error) {
			break;
		}
		/* figure out how much we actually wrote */
		this_resid -= uio_resid(uio);
		if (this_resid == 0) {
			printf("vn device: vnwrite_shadow zero length write\n");
			break;
		}
		resid -= this_resid;
		offset += this_resid;
	}
	uio_setresid(uio, resid);
	uio_setoffset(uio, offset);
	return (error);
}
Пример #8
0
	// LP64todo - fix this! 'n' should be int64_t?
int
uiomove64(const addr64_t c_cp, int n, struct uio *uio)
{
	addr64_t cp = c_cp;
#if LP64KERN
	uint64_t acnt;
#else
	u_int acnt;
#endif
	int error = 0;

#if DIAGNOSTIC
	if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
		panic("uiomove: mode");
#endif

#if LP64_DEBUG
	if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) {
		panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); 
	}
#endif /* LP64_DEBUG */

	while (n > 0 && uio_resid(uio)) {
		acnt = uio_iov_len(uio);
		if (acnt == 0) {
			uio_next_iov(uio);
			uio->uio_iovcnt--;
			continue;
		}
		if (n > 0 && acnt > (uint64_t)n)
			acnt = n;

		switch (uio->uio_segflg) {

		case UIO_USERSPACE64:
		case UIO_USERISPACE64:
			// LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);

					error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.iov64p->iov_base, acnt );

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);

			        error = copyin(uio->uio_iovs.iov64p->iov_base, CAST_DOWN(caddr_t, cp), acnt);

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_USERSPACE32:
		case UIO_USERISPACE32:
		case UIO_USERSPACE:
		case UIO_USERISPACE:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);

					error = copyout( CAST_DOWN(caddr_t, cp), CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), acnt );

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);

			        error = copyin(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), CAST_DOWN(caddr_t, cp), acnt);

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_SYSSPACE32:
		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				error = copywithin(CAST_DOWN(caddr_t, cp), (caddr_t)uio->uio_iovs.iov32p->iov_base,
						   acnt);
			else
				error = copywithin((caddr_t)uio->uio_iovs.iov32p->iov_base, CAST_DOWN(caddr_t, cp),
						   acnt);
			break;

		case UIO_PHYS_USERSPACE64:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);

				error = copypv((addr64_t)cp, uio->uio_iovs.iov64p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
				if (error) 	/* Copy physical to virtual */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);

				error = copypv(uio->uio_iovs.iov64p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
				if (error)	/* Copy virtual to physical */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_PHYS_USERSPACE32:
		case UIO_PHYS_USERSPACE:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);

				error = copypv((addr64_t)cp, (addr64_t)uio->uio_iovs.iov32p->iov_base, acnt, cppvPsrc | cppvNoRefSrc);
				if (error) 	/* Copy physical to virtual */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);

				error = copypv((addr64_t)uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
				if (error)	/* Copy virtual to physical */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0);
			  }
			if (error)
				return (error);
			break;

		case UIO_PHYS_SYSSPACE32:
		case UIO_PHYS_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);

				error = copypv((addr64_t)cp, uio->uio_iovs.iov32p->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc);
				if (error) 	/* Copy physical to virtual */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END,
					 (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0);
			  }
			else
			  {
			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);

				error = copypv(uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk);
				if (error)	/* Copy virtual to physical */
				        error = EFAULT;

			        KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END,
					 (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0);
			  }
			if (error)
				return (error);
			break;

		default:
			break;
		}
		uio_iov_base_add(uio, acnt);
#if LP64KERN
		uio_iov_len_add(uio, -((int64_t)acnt));
		uio_setresid(uio, (uio_resid(uio) - ((int64_t)acnt)));
#else
		uio_iov_len_add(uio, -((int)acnt));
		uio_setresid(uio, (uio_resid(uio) - ((int)acnt)));
#endif
		uio->uio_offset += acnt;
		cp += acnt;
		n -= acnt;
	}
	return (error);
}