Beispiel #1
0
/*
 * General purpose yield system call
 *
 * MPSAFE
 */
int
sys_yield(struct yield_args *uap) 
{
	uap->sysmsg_result = 0;
	lwkt_user_yield();
	return(0);
}
Beispiel #2
0
int
hammer2_signal_check(time_t *timep)
{
	int error = 0;

	lwkt_user_yield();
	if (*timep != time_second) {
		*timep = time_second;
		if (CURSIG(curthread->td_lwp) != 0)
			error = EINTR;
	}
	return error;
}
Beispiel #3
0
/*
 * Check for a user signal interrupting a long operation
 *
 * MPSAFE
 */
int
hammer_signal_check(hammer_mount_t hmp)
{
	int sig;

	lwkt_user_yield();
	if (++hmp->check_interrupt < 100)
		return(0);
	hmp->check_interrupt = 0;

	if ((sig = CURSIG(curthread->td_lwp)) != 0)
		return(EINTR);
	return(0);
}
Beispiel #4
0
/*
 * Package up an I/O request on a vnode into a uio and do it.  The I/O
 * request is split up into smaller chunks and we try to avoid saturating
 * the buffer cache while potentially holding a vnode locked, so we 
 * check bwillwrite() before calling vn_rdwr().  We also call lwkt_user_yield()
 * to give other processes a chance to lock the vnode (either other processes
 * core'ing the same binary, or unrelated processes scanning the directory).
 *
 * MPSAFE
 */
int
vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
		 off_t offset, enum uio_seg segflg, int ioflg,
		 struct ucred *cred, int *aresid)
{
	int error = 0;

	do {
		int chunk;

		/*
		 * Force `offset' to a multiple of MAXBSIZE except possibly
		 * for the first chunk, so that filesystems only need to
		 * write full blocks except possibly for the first and last
		 * chunks.
		 */
		chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;

		if (chunk > len)
			chunk = len;
		if (vp->v_type == VREG) {
			switch(rw) {
			case UIO_READ:
				bwillread(chunk);
				break;
			case UIO_WRITE:
				bwillwrite(chunk);
				break;
			}
		}
		error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
				ioflg, cred, aresid);
		len -= chunk;	/* aresid calc already includes length */
		if (error)
			break;
		offset += chunk;
		base += chunk;
		lwkt_user_yield();
	} while (len);
	if (aresid)
		*aresid += len;
	return (error);
}
Beispiel #5
0
/*
 * UIO_READ:	copy the kernelspace cp to the user or kernelspace UIO
 * UIO_WRITE:	copy the user or kernelspace UIO to the kernelspace cp
 *
 * For userspace UIO's, uio_td must be the current thread.
 *
 * The syscall interface is responsible for limiting the length to
 * ssize_t for things like read() or write() which return the bytes
 * read or written as ssize_t.  These functions work with unsigned
 * lengths.
 */
int
uiomove(caddr_t cp, size_t n, struct uio *uio)
{
	thread_t td = curthread;
	struct iovec *iov;
	size_t cnt;
	size_t tot;
	int error = 0;
	int save = 0;

	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
	    ("uiomove: mode"));
	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
	    ("uiomove proc"));

	crit_enter();
	save = td->td_flags & TDF_DEADLKTREAT;
	td->td_flags |= TDF_DEADLKTREAT;
	crit_exit();

	tot = 0;

	while (n > 0 && uio->uio_resid) {
		iov = uio->uio_iov;
		cnt = iov->iov_len;
		if (cnt == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			continue;
		}
		if (cnt > n)
			cnt = n;
		tot += cnt;

		switch (uio->uio_segflg) {
		case UIO_USERSPACE:
			if (tot > 1024*1024)
				lwkt_user_yield();
			if (uio->uio_rw == UIO_READ)
				error = copyout(cp, iov->iov_base, cnt);
			else
				error = copyin(iov->iov_base, cp, cnt);
			break;
		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				bcopy(cp, iov->iov_base, cnt);
			else
				bcopy(iov->iov_base, cp, cnt);
			break;
		case UIO_NOCOPY:
			break;
		}

		if (error)
			break;
		iov->iov_base = (char *)iov->iov_base + cnt;
		iov->iov_len -= cnt;
		uio->uio_resid -= cnt;
		uio->uio_offset += cnt;
		cp += cnt;
		n -= cnt;
	}
	crit_enter();
	td->td_flags = (td->td_flags & ~TDF_DEADLKTREAT) | save;
	crit_exit();

	return (error);
}
Beispiel #6
0
static int
ffs_rawread_readahead(struct vnode *vp, caddr_t udata, off_t loffset,
		      size_t len, struct buf *bp)
{
	int error;
	int iolen;
	int blockoff;
	int bsize;
	struct vnode *dp;
	int bforwards;
	
	bsize = vp->v_mount->mnt_stat.f_iosize;

	/*
	 * Make sure it fits into the pbuf
	 */
	iolen = (int)(intptr_t)udata & PAGE_MASK;
	if (len + iolen > bp->b_kvasize) {
		len = bp->b_kvasize;
		if (iolen != 0)
			len -= PAGE_SIZE;
	}

	/*
	 * Raw disk address is in bio2, but we wait for it to
	 * chain to bio1.
	 */
	bp->b_flags &= ~B_ERROR;
	bp->b_loffset = loffset;
	bp->b_bio2.bio_offset = NOOFFSET;
	bp->b_bio1.bio_done = biodone_sync;
	bp->b_bio1.bio_flags |= BIO_SYNC;

	blockoff = (loffset % bsize) / DEV_BSIZE;

	error = VOP_BMAP(vp, bp->b_loffset, &bp->b_bio2.bio_offset,
			 &bforwards, NULL, BUF_CMD_READ);
	if (error != 0)
		return error;
	dp = VTOI(vp)->i_devvp;
	if (bp->b_bio2.bio_offset == NOOFFSET) {
		/* 
		 * Fill holes with NULs to preserve semantics 
		 */
		if (len + blockoff * DEV_BSIZE > bsize)
			len = bsize - blockoff * DEV_BSIZE;
		
		if (vmapbuf(bp, udata, len) < 0)
			return EFAULT;
		
		lwkt_user_yield();
		bzero(bp->b_data, bp->b_bcount);

		/* Mark operation completed (similar to bufdone()) */

		bp->b_resid = 0;
		return 0;
	}
	
	if (len + blockoff * DEV_BSIZE > bforwards)
		len = bforwards - blockoff * DEV_BSIZE;
	bp->b_bio2.bio_offset += blockoff * DEV_BSIZE;
	
	if (vmapbuf(bp, udata, len) < 0)
		return EFAULT;
	
	/*
	 * Access the block device layer using the device vnode (dp) and
	 * the translated block number (bio2) instead of the logical block
	 * number (bio1).
	 *
	 * Even though we are bypassing the vnode layer, we still
	 * want the vnode state to indicate that an I/O on its behalf
	 * is in progress.
	 */
	bp->b_cmd = BUF_CMD_READ;
	bio_start_transaction(&bp->b_bio1, &vp->v_track_read);
	vn_strategy(dp, &bp->b_bio2);
	return 0;
}
Beispiel #7
0
/*
 * UIO_READ:	copy the kernelspace cp to the user or kernelspace UIO
 * UIO_WRITE:	copy the user or kernelspace UIO to the kernelspace cp
 *
 * For userspace UIO's, uio_td must be the current thread.
 *
 * The syscall interface is responsible for limiting the length to
 * ssize_t for things like read() or write() which return the bytes
 * read or written as ssize_t.  These functions work with unsigned
 * lengths.
 */
int
uiomove(caddr_t cp, size_t n, struct uio *uio)
{
	struct iovec *iov;
	size_t cnt;
	int error = 0;
	int save = 0;

	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
	    ("uiomove: mode"));
	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
	    ("uiomove proc"));

	if (curproc) {
		save = curproc->p_flag & P_DEADLKTREAT;
		curproc->p_flag |= P_DEADLKTREAT;
	}

	while (n > 0 && uio->uio_resid) {
		iov = uio->uio_iov;
		cnt = iov->iov_len;
		if (cnt == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			continue;
		}
		if (cnt > n)
			cnt = n;

		switch (uio->uio_segflg) {

		case UIO_USERSPACE:
			lwkt_user_yield();
			if (uio->uio_rw == UIO_READ)
				error = copyout(cp, iov->iov_base, cnt);
			else
				error = copyin(iov->iov_base, cp, cnt);
			if (error)
				break;
			break;

		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				bcopy((caddr_t)cp, iov->iov_base, cnt);
			else
				bcopy(iov->iov_base, (caddr_t)cp, cnt);
			break;
		case UIO_NOCOPY:
			break;
		}
		iov->iov_base = (char *)iov->iov_base + cnt;
		iov->iov_len -= cnt;
		uio->uio_resid -= cnt;
		uio->uio_offset += cnt;
		cp += cnt;
		n -= cnt;
	}
	if (curproc)
		curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
	return (error);
}