Пример #1
0
/*
 * readdir returns directory entries from ptyfsnode (vp).
 *
 * the strategy here with ptyfs is to generate a single
 * directory entry at a time (struct dirent) and then
 * copy that out to userland using uiomove.  a more efficent
 * though more complex implementation, would try to minimize
 * the number of calls to uiomove().  for ptyfs, this is
 * hardly worth the added code complexity.
 *
 * this should just be done through read()
 */
int
ptyfs_readdir(void *v)
{
	struct vop_readdir_args /* {
		struct vnode *a_vp;
		struct uio *a_uio;
		kauth_cred_t a_cred;
		int *a_eofflag;
		off_t **a_cookies;
		int *a_ncookies;
	} */ *ap = v;
	struct uio *uio = ap->a_uio;
	struct dirent *dp;
	struct ptyfsnode *ptyfs;
	off_t i;
	int error;
	off_t *cookies = NULL;
	int ncookies;
	struct vnode *vp;
	int nc = 0;

	vp = ap->a_vp;
	ptyfs = VTOPTYFS(vp);

	if (uio->uio_resid < UIO_MX)
		return EINVAL;
	if (uio->uio_offset < 0)
		return EINVAL;

	dp = malloc(sizeof(struct dirent), M_PTYFSTMP, M_WAITOK | M_ZERO);

	error = 0;
	i = uio->uio_offset;
	dp->d_reclen = UIO_MX;
	ncookies = uio->uio_resid / UIO_MX;

	if (ptyfs->ptyfs_type != PTYFSroot) {
		error = ENOTDIR;
		goto out;
	}

	if (i >= npty)
		goto out;

	if (ap->a_ncookies) {
		ncookies = min(ncookies, (npty + 2 - i));
		cookies = malloc(ncookies * sizeof (off_t),
		    M_TEMP, M_WAITOK);
		*ap->a_cookies = cookies;
	}

	for (; i < 2; i++) {
		/* `.' and/or `..' */
		dp->d_fileno = PTYFS_FILENO(0, PTYFSroot);
		dp->d_namlen = i + 1;
		(void)memcpy(dp->d_name, "..", dp->d_namlen);
		dp->d_name[i + 1] = '\0';
		dp->d_type = DT_DIR;
		if ((error = uiomove(dp, UIO_MX, uio)) != 0)
			goto out;
		if (cookies)
			*cookies++ = i + 1;
		nc++;
	}
	for (; uio->uio_resid >= UIO_MX && i < npty; i++) {
		/* check for used ptys */
		if (pty_isfree(i - 2, 1))
			continue;

		dp->d_fileno = PTYFS_FILENO(i - 2, PTYFSpts);
		dp->d_namlen = snprintf(dp->d_name, sizeof(dp->d_name),
		    "%lld", (long long)(i - 2));
		dp->d_type = DT_CHR;
		if ((error = uiomove(dp, UIO_MX, uio)) != 0)
			goto out;
		if (cookies)
			*cookies++ = i + 1;
		nc++;
	}

out:
	/* not pertinent in error cases */
	ncookies = nc;

	if (ap->a_ncookies) {
		if (error) {
			if (cookies)
				free(*ap->a_cookies, M_TEMP);
			*ap->a_ncookies = 0;
			*ap->a_cookies = NULL;
		} else
			*ap->a_ncookies = ncookies;
	}
	uio->uio_offset = i;
	free(dp, M_PTYFSTMP);
	return error;
}
Пример #2
0
int
bpf_movein(struct uio *uio, u_int linktype, struct mbuf **mp,
    struct sockaddr *sockp, struct bpf_insn *filter)
{
	struct mbuf *m;
	struct m_tag *mtag;
	int error;
	u_int hlen;
	u_int len;
	u_int slen;

	/*
	 * Build a sockaddr based on the data link layer type.
	 * We do this at this level because the ethernet header
	 * is copied directly into the data field of the sockaddr.
	 * In the case of SLIP, there is no header and the packet
	 * is forwarded as is.
	 * Also, we are careful to leave room at the front of the mbuf
	 * for the link level header.
	 */
	switch (linktype) {

	case DLT_SLIP:
		sockp->sa_family = AF_INET;
		hlen = 0;
		break;

	case DLT_PPP:
		sockp->sa_family = AF_UNSPEC;
		hlen = 0;
		break;

	case DLT_EN10MB:
		sockp->sa_family = AF_UNSPEC;
		/* XXX Would MAXLINKHDR be better? */
		hlen = ETHER_HDR_LEN;
		break;

	case DLT_FDDI:
		sockp->sa_family = AF_UNSPEC;
		/* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
		hlen = 24;
		break;

	case DLT_IEEE802_11:
	case DLT_IEEE802_11_RADIO:
		sockp->sa_family = AF_UNSPEC;
		hlen = 0;
		break;

	case DLT_RAW:
	case DLT_NULL:
		sockp->sa_family = AF_UNSPEC;
		hlen = 0;
		break;

	case DLT_ATM_RFC1483:
		/*
		 * en atm driver requires 4-byte atm pseudo header.
		 * though it isn't standard, vpi:vci needs to be
		 * specified anyway.
		 */
		sockp->sa_family = AF_UNSPEC;
		hlen = 12; 	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
		break;

	default:
		return (EIO);
	}

	if (uio->uio_resid > MCLBYTES)
		return (EIO);
	len = uio->uio_resid;

	MGETHDR(m, M_WAIT, MT_DATA);
	m->m_pkthdr.rcvif = 0;
	m->m_pkthdr.len = len - hlen;

	if (len > MHLEN) {
		MCLGET(m, M_WAIT);
		if ((m->m_flags & M_EXT) == 0) {
			error = ENOBUFS;
			goto bad;
		}
	}
	m->m_len = len;
	*mp = m;

	error = uiomove(mtod(m, caddr_t), len, uio);
	if (error)
		goto bad;

	slen = bpf_filter(filter, mtod(m, u_char *), len, len);
	if (slen < len) {
		error = EPERM;
		goto bad;
	}

	if (m->m_len < hlen) {
		error = EPERM;
		goto bad;
	}
	/*
	 * Make room for link header, and copy it to sockaddr
	 */
	if (hlen != 0) {
		bcopy(m->m_data, sockp->sa_data, hlen);
		m->m_len -= hlen;
		m->m_data += hlen; /* XXX */
	}

	/*
	 * Prepend the data link type as a mbuf tag
	 */
	mtag = m_tag_get(PACKET_TAG_DLT, sizeof(u_int), M_NOWAIT);
	if (mtag == NULL)
		return (ENOMEM);
	*(u_int *)(mtag + 1) = linktype;
	m_tag_prepend(m, mtag);

	return (0);
 bad:
	m_freem(m);
	return (error);
}
Пример #3
0
/*ARGSUSED*/
int
maxrtc_write(dev_t dev, struct uio *uio, int flags)
{
	struct maxrtc_softc *sc;
	u_int8_t cmdbuf[2];
	int a, error, sverror;

	if ((sc = device_lookup_private(&maxrtc_cd, minor(dev))) == NULL)
		return (ENXIO);

	if (uio->uio_offset >= MAX6900_RAM_BYTES)
		return (EINVAL);

	if ((error = iic_acquire_bus(sc->sc_tag, 0)) != 0)
		return (error);

	/* Start by clearing the control register's write-protect bit. */
	cmdbuf[0] = MAX6900_REG_CONTROL | MAX6900_CMD_WRITE;
	cmdbuf[1] = 0;

	if ((error = iic_exec(sc->sc_tag, I2C_OP_WRITE, sc->sc_address,
			      cmdbuf, 1, &cmdbuf[1], 1, 0)) != 0) {
		iic_release_bus(sc->sc_tag, 0);
		aprint_error_dev(sc->sc_dev,
		    "maxrtc_write: failed to clear WP bit\n");
		return (error);
	}

	while (uio->uio_resid && uio->uio_offset < MAX6900_RAM_BYTES) {
		a = (int)uio->uio_offset;

		cmdbuf[0] = MAX6900_REG_RAM(a) | MAX6900_CMD_WRITE;
		if ((error = uiomove(&cmdbuf[1], 1, uio)) != 0)
			break;

		if ((error = iic_exec(sc->sc_tag, I2C_OP_WRITE, sc->sc_address,
				      cmdbuf, 1, &cmdbuf[1], 1, 0)) != 0) {
			aprint_error_dev(sc->sc_dev,
			    "maxrtc_write: write failed at 0x%x\n", a);
			break;
		}
	}

	/* Set the write-protect bit again. */
	cmdbuf[0] = MAX6900_REG_CONTROL | MAX6900_CMD_WRITE;
	cmdbuf[1] = MAX6900_CONTROL_WP;

	sverror = error;
	if ((error = iic_exec(sc->sc_tag, I2C_OP_WRITE_WITH_STOP,
			      sc->sc_address, cmdbuf, 1,
			      &cmdbuf[1], 1, 0)) != 0) {
		if (sverror != 0)
			error = sverror;
		aprint_error_dev(sc->sc_dev,
		    "maxrtc_write: failed to set WP bit\n");
	}

	iic_release_bus(sc->sc_tag, 0);

	return (error);
}
Пример #4
0
static int
tmpfs_write (struct vop_write_args *ap)
{
	struct buf *bp;
	struct vnode *vp = ap->a_vp;
	struct uio *uio = ap->a_uio;
	struct thread *td = uio->uio_td;
	struct tmpfs_node *node;
	boolean_t extended;
	off_t oldsize;
	int error;
	off_t base_offset;
	size_t offset;
	size_t len;
	struct rlimit limit;
	int trivial = 0;
	int kflags = 0;

	error = 0;
	if (uio->uio_resid == 0) {
		return error;
	}

	node = VP_TO_TMPFS_NODE(vp);

	if (vp->v_type != VREG)
		return (EINVAL);

	lwkt_gettoken(&vp->v_mount->mnt_token);

	oldsize = node->tn_size;
	if (ap->a_ioflag & IO_APPEND)
		uio->uio_offset = node->tn_size;

	/*
	 * Check for illegal write offsets.
	 */
	if (uio->uio_offset + uio->uio_resid >
	  VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) {
		lwkt_reltoken(&vp->v_mount->mnt_token);
		return (EFBIG);
	}

	if (vp->v_type == VREG && td != NULL) {
		error = kern_getrlimit(RLIMIT_FSIZE, &limit);
		if (error != 0) {
			lwkt_reltoken(&vp->v_mount->mnt_token);
			return error;
		}
		if (uio->uio_offset + uio->uio_resid > limit.rlim_cur) {
			ksignal(td->td_proc, SIGXFSZ);
			lwkt_reltoken(&vp->v_mount->mnt_token);
			return (EFBIG);
		}
	}


	/*
	 * Extend the file's size if necessary
	 */
	extended = ((uio->uio_offset + uio->uio_resid) > node->tn_size);

	while (uio->uio_resid > 0) {
		/*
		 * Use buffer cache I/O (via tmpfs_strategy)
		 */
		offset = (size_t)uio->uio_offset & BMASK;
		base_offset = (off_t)uio->uio_offset - offset;
		len = BSIZE - offset;
		if (len > uio->uio_resid)
			len = uio->uio_resid;

		if ((uio->uio_offset + len) > node->tn_size) {
			trivial = (uio->uio_offset <= node->tn_size);
			error = tmpfs_reg_resize(vp, uio->uio_offset + len,  trivial);
			if (error)
				break;
		}

		/*
		 * Read to fill in any gaps.  Theoretically we could
		 * optimize this if the write covers the entire buffer
		 * and is not a UIO_NOCOPY write, however this can lead
		 * to a security violation exposing random kernel memory
		 * (whatever junk was in the backing VM pages before).
		 *
		 * So just use bread() to do the right thing.
		 */
		error = bread(vp, base_offset, BSIZE, &bp);
		error = uiomove((char *)bp->b_data + offset, len, uio);
		if (error) {
			kprintf("tmpfs_write uiomove error %d\n", error);
			brelse(bp);
			break;
		}

		if (uio->uio_offset > node->tn_size) {
			node->tn_size = uio->uio_offset;
			kflags |= NOTE_EXTEND;
		}
		kflags |= NOTE_WRITE;

		/*
		 * Always try to flush the page if the request is coming
		 * from the pageout daemon (IO_ASYNC), else buwrite() the
		 * buffer.
		 *
		 * buwrite() dirties the underlying VM pages instead of
		 * dirtying the buffer, releasing the buffer as a clean
		 * buffer.  This allows tmpfs to use essentially all
		 * available memory to cache file data.  If we used bdwrite()
		 * the buffer cache would wind up flushing the data to
		 * swap too quickly.
		 */
		bp->b_flags |= B_AGE;
		if (ap->a_ioflag & IO_ASYNC) {
			bawrite(bp);
		} else {
			buwrite(bp);
		}

		if (bp->b_error) {
			kprintf("tmpfs_write bwrite error %d\n", bp->b_error);
			break;
		}
	}

	if (error) {
		if (extended) {
			(void)tmpfs_reg_resize(vp, oldsize, trivial);
			kflags &= ~NOTE_EXTEND;
		}
		goto done;
	}

	/*
	 * Currently we don't set the mtime on files modified via mmap()
	 * because we can't tell the difference between those modifications
	 * and an attempt by the pageout daemon to flush tmpfs pages to
	 * swap.
	 *
	 * This is because in order to defer flushes as long as possible
	 * buwrite() works by marking the underlying VM pages dirty in
	 * order to be able to dispose of the buffer cache buffer without
	 * flushing it.
	 */
	TMPFS_NODE_LOCK(node);
	if (uio->uio_segflg != UIO_NOCOPY)
		node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED;
	if (extended)
		node->tn_status |= TMPFS_NODE_CHANGED;

	if (node->tn_mode & (S_ISUID | S_ISGID)) {
		if (priv_check_cred(ap->a_cred, PRIV_VFS_RETAINSUGID, 0))
			node->tn_mode &= ~(S_ISUID | S_ISGID);
	}
	TMPFS_NODE_UNLOCK(node);
done:

	tmpfs_knote(vp, kflags);


	lwkt_reltoken(&vp->v_mount->mnt_token);
	return(error);
}
Пример #5
0
/* ARGSUSED */
static int
udsir_read(void *h, struct uio *uio, int flag)
{
	struct udsir_softc *sc = h;
	int s;
	int error;
	u_int uframelen;

	DPRINTFN(1, ("%s: sc=%p\n", __func__, sc));

	if (sc->sc_dying)
		return EIO;

#ifdef DIAGNOSTIC
	if (sc->sc_rd_buf == NULL)
		return EINVAL;
#endif

	sc->sc_refcnt++;

	if (!sc->sc_rd_readinprogress && !UDSIR_BLOCK_RX_DATA(sc))
		/* Possibly wake up polling thread */
		wakeup(&sc->sc_thread);

	do {
		s = splusb();
		while (sc->sc_ur_framelen == 0) {
			DPRINTFN(5, ("%s: calling tsleep()\n", __func__));
			error = tsleep(&sc->sc_ur_framelen, PZERO | PCATCH,
				       "usirrd", 0);
			if (sc->sc_dying)
				error = EIO;
			if (error) {
				splx(s);
				DPRINTFN(0, ("%s: tsleep() = %d\n",
					     __func__, error));
				goto ret;
			}
		}
		splx(s);

		uframelen = sc->sc_ur_framelen;
		DPRINTFN(1, ("%s: sc=%p framelen=%u, hdr=0x%02x\n",
			     __func__, sc, uframelen, sc->sc_ur_buf[0]));
		if (uframelen > uio->uio_resid)
			error = EINVAL;
		else
			error = uiomove(sc->sc_ur_buf, uframelen, uio);
		sc->sc_ur_framelen = 0;

		if (deframe_rd_ur(sc) == 0 && uframelen > 0) {
			/*
			 * Need to wait for another read to obtain a
			 * complete frame...  If we also obtained
			 * actual data, wake up the possibly sleeping
			 * thread immediately...
			 */
			wakeup(&sc->sc_thread);
		}
	} while (uframelen == 0);

	DPRINTFN(1, ("%s: return %d\n", __func__, error));

 ret:
	if (--sc->sc_refcnt < 0)
		usb_detach_wakeupold(sc->sc_dev);
	return error;
}
Пример #6
0
int
ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
{
	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT];
	u_int32_t n;
	int flags, error = 0;
	char buf[UGEN_BBSIZE];
	struct usbd_xfer *xfer;
	usbd_status err;

	DPRINTFN(5, ("%s: ugenwrite: %d\n", sc->sc_dev.dv_xname, endpt));

	if (usbd_is_dying(sc->sc_udev))
		return (EIO);

	if (endpt == USB_CONTROL_ENDPOINT)
		return (ENODEV);

#ifdef DIAGNOSTIC
	if (sce->edesc == NULL) {
		printf("ugenwrite: no edesc\n");
		return (EIO);
	}
	if (sce->pipeh == NULL) {
		printf("ugenwrite: no pipe\n");
		return (EIO);
	}
#endif
	flags = USBD_SYNCHRONOUS;
	if (sce->timeout == 0)
		flags |= USBD_CATCH;

	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
	case UE_BULK:
		xfer = usbd_alloc_xfer(sc->sc_udev);
		if (xfer == 0)
			return (EIO);
		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
			error = uiomove(buf, n, uio);
			if (error)
				break;
			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
			usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n,
			    flags, sce->timeout, NULL);
			err = usbd_transfer(xfer);
			if (err) {
				usbd_clear_endpoint_stall(sce->pipeh);
				if (err == USBD_INTERRUPTED)
					error = EINTR;
				else if (err == USBD_TIMEOUT)
					error = ETIMEDOUT;
				else
					error = EIO;
				break;
			}
		}
		usbd_free_xfer(xfer);
		break;
	case UE_INTERRUPT:
		xfer = usbd_alloc_xfer(sc->sc_udev);
		if (xfer == 0)
			return (EIO);
		while ((n = min(UGETW(sce->edesc->wMaxPacketSize),
		    uio->uio_resid)) != 0) {
			error = uiomove(buf, n, uio);
			if (error)
				break;
			DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n));
			usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n,
			    flags, sce->timeout, NULL);
			err = usbd_transfer(xfer);
			if (err) {
				usbd_clear_endpoint_stall(sce->pipeh);
				if (err == USBD_INTERRUPTED)
					error = EINTR;
				else if (err == USBD_TIMEOUT)
					error = ETIMEDOUT;
				else
					error = EIO;
				break;
			}
		}
		usbd_free_xfer(xfer);
		break;
	default:
		return (ENXIO);
	}
	return (error);
}
Пример #7
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_long c = 0, v;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr, eaddr;

	GIANT_REQUIRED;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (minor(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;
kmemphys:
			o = v & PAGE_MASK;
			c = min(uio->uio_resid, (u_int)(PAGE_SIZE - o));
			error = uiomove((void *)PHYS_TO_DMAP(v), (int)c, uio);
			continue;
		}
		else if (minor(dev) == CDEV_MINOR_KMEM) {
			v = uio->uio_offset;

			if (v >= DMAP_MIN_ADDRESS && v < DMAP_MAX_ADDRESS) {
				v = DMAP_TO_PHYS(v);
				goto kmemphys;
			}

			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(v);
			eaddr = round_page(v + c);

			if (addr < VM_MIN_KERNEL_ADDRESS)
				return (EFAULT);
			for (; addr < eaddr; addr += PAGE_SIZE) 
				if (pmap_extract(kernel_pmap, addr) == 0)
					return (EFAULT);

			if (!kernacc((caddr_t)(long)v, c,
			    uio->uio_rw == UIO_READ ? 
			    VM_PROT_READ : VM_PROT_WRITE))
				return (EFAULT);

			error = uiomove((caddr_t)(long)v, (int)c, uio);
			continue;
		}
		/* else panic! */
	}
	return (error);
}
Пример #8
0
int
ttyinq_read_uio(struct ttyinq *ti, struct tty *tp, struct uio *uio,
    size_t rlen, size_t flen)
{

	MPASS(rlen <= uio->uio_resid);

	while (rlen > 0) {
		int error;
		struct ttyinq_block *tib;
		size_t cbegin, cend, clen;

		/* See if there still is data. */
		if (ti->ti_begin == ti->ti_linestart)
			return (0);
		tib = ti->ti_firstblock;
		if (tib == NULL)
			return (0);

		/*
		 * The end address should be the lowest of these three:
		 * - The write pointer
		 * - The blocksize - we can't read beyond the block
		 * - The end address if we could perform the full read
		 */
		cbegin = ti->ti_begin;
		cend = MIN(MIN(ti->ti_linestart, ti->ti_begin + rlen),
		    TTYINQ_DATASIZE);
		clen = cend - cbegin;
		MPASS(clen >= flen);
		rlen -= clen;

		/*
		 * We can prevent buffering in some cases:
		 * - We need to read the block until the end.
		 * - We don't need to read the block until the end, but
		 *   there is no data beyond it, which allows us to move
		 *   the write pointer to a new block.
		 */
		if (cend == TTYINQ_DATASIZE || cend == ti->ti_end) {
			/*
			 * Fast path: zero copy. Remove the first block,
			 * so we can unlock the TTY temporarily.
			 */
			TTYINQ_REMOVE_HEAD(ti);
			ti->ti_begin = 0;

			/*
			 * Because we remove the first block, we must
			 * fix up the block offsets.
			 */
#define CORRECT_BLOCK(t) do {			\
	if (t <= TTYINQ_DATASIZE)		\
		t = 0;				\
	else					\
		t -= TTYINQ_DATASIZE;		\
} while (0)
			CORRECT_BLOCK(ti->ti_linestart);
			CORRECT_BLOCK(ti->ti_reprint);
			CORRECT_BLOCK(ti->ti_end);
#undef CORRECT_BLOCK

			/*
			 * Temporary unlock and copy the data to
			 * userspace. We may need to flush trailing
			 * bytes, like EOF characters.
			 */
			tty_unlock(tp);
			error = uiomove(tib->tib_data + cbegin,
			    clen - flen, uio);
			tty_lock(tp);

			/* Block can now be readded to the list. */
			TTYINQ_RECYCLE(ti, tib);
		} else {
			char ob[TTYINQ_DATASIZE - 1];

			/*
			 * Slow path: store data in a temporary buffer.
			 */
			memcpy(ob, tib->tib_data + cbegin, clen - flen);
			ti->ti_begin += clen;
			MPASS(ti->ti_begin < TTYINQ_DATASIZE);

			/* Temporary unlock and copy the data to userspace. */
			tty_unlock(tp);
			error = uiomove(ob, clen - flen, uio);
			tty_lock(tp);
		}

		if (error != 0)
			return (error);
		if (tty_gone(tp))
			return (ENXIO);
	}

	return (0);
}
Пример #9
0
static int
ams_read(struct cdev *dev, struct uio *uio, int flag)
{
	struct adb_mouse_softc *sc;
	size_t len;
	int8_t outpacket[8];
	int error;

	sc = CDEV_GET_SOFTC(dev);
	if (sc == NULL)
		return (EIO);

	if (uio->uio_resid <= 0)
		return (0);

	mtx_lock(&sc->sc_mtx);

	if (!sc->packet_read_len) {
		if (sc->xdelta == 0 && sc->ydelta == 0 && 
		   sc->buttons == sc->last_buttons) {

			if (flag & O_NONBLOCK) {
				mtx_unlock(&sc->sc_mtx);
				return EWOULDBLOCK;
			}

	
			/* Otherwise, block on new data */
			error = cv_wait_sig(&sc->sc_cv, &sc->sc_mtx);
			if (error) {
				mtx_unlock(&sc->sc_mtx);
				return (error);
			}
		}

		sc->packet[0] = 1 << 7;
		sc->packet[0] |= (!(sc->buttons & 1)) << 2;
		sc->packet[0] |= (!(sc->buttons & 4)) << 1;
		sc->packet[0] |= (!(sc->buttons & 2));

		if (sc->xdelta > 127) {
			sc->packet[1] = 127;
			sc->packet[3] = sc->xdelta - 127;
		} else if (sc->xdelta < -127) {
			sc->packet[1] = -127;
			sc->packet[3] = sc->xdelta + 127;
		} else {
			sc->packet[1] = sc->xdelta;
			sc->packet[3] = 0;
		}

		if (sc->ydelta > 127) {
			sc->packet[2] = 127;
			sc->packet[4] = sc->ydelta - 127;
		} else if (sc->ydelta < -127) {
			sc->packet[2] = -127;
			sc->packet[4] = sc->ydelta + 127;
		} else {
			sc->packet[2] = sc->ydelta;
			sc->packet[4] = 0;
		}

		/* No Z movement */
		sc->packet[5] = 0;
		sc->packet[6] = 0; 

		sc->packet[7] = ~((uint8_t)(sc->buttons >> 3)) & 0x7f;


		sc->last_buttons = sc->buttons;
		sc->xdelta = 0;
		sc->ydelta = 0;

		sc->packet_read_len = sc->mode.packetsize;
	}

	len = (sc->packet_read_len > uio->uio_resid) ? 
		uio->uio_resid : sc->packet_read_len;

	memcpy(outpacket,sc->packet + 
		(sc->mode.packetsize - sc->packet_read_len),len);
	sc->packet_read_len -= len;

	mtx_unlock(&sc->sc_mtx);

	error = uiomove(outpacket,len,uio);

	return (error);
}
Пример #10
0
int
gsc_config(dev_t devno, int cmd, struct uio * uiop)
{
    struct gsc_ddsinfo ddsinfo;
    gsc_softc_t *sp;
    int result, i, unit;
    extern int nodev();
    static struct devsw gsc_dsw = {
	gsc_open,	/* entry point for open routine */
	gsc_close,	/* entry point for close routine */
	nodev,		/* entry point for read routine */
	nodev,		/* entry point for write routine */
	gsc_ioctl,	/* entry point for ioctl routine */
	nodev,		/* entry point for strategy routine */
	0,		/* pointer to tty device structure */
	nodev,		/* entry point for select routine */
	gsc_config,	/* entry point for config routine */
	nodev,		/* entry point for print routine */
	nodev,		/* entry point for dump routine */
	nodev,		/* entry point for mpx routine */
	nodev,		/* entry point for revoke routine */
	NULL,		/* pointer to device specific data */
	NULL,		/* select pointer */
	DEV_MPSAFE
    };

    if (lockl(&config_lock, LOCK_SHORT) != LOCK_SUCC) {
	return (EINVAL);
    }
    unit = minor(devno);
    if (unit < 0 || unit >= MAX_UNITS) {
	Trace2(0, "%d: bad unit %d", __LINE__, unit);
	result = EINVAL;
	unlockl(&config_lock);
	return (result);
    }

    switch (cmd) {
    case CFG_INIT:
	Trace2(2, "CFG_INIT: unit %d nunit %d\n", unit, nunits);
	/*
	 * Initialize softinfo, first time around.
	 */
	if (nunits == 0) {
	    memset(softinfo, 0, sizeof (softinfo));
	}
	/*
	 * Copy in DDS information
	 */
	uiomove((caddr_t) &ddsinfo, sizeof ddsinfo, UIO_WRITE, uiop);
	sp = &softinfo[unit];
	if (sp->iscfg) {
	    Trace1(0, "CFG_INIT: unit %d already configd", unit);
	    result = EBUSY;
	    break;
	}
	lock_alloc(&sp->dd_lock, LOCK_ALLOC_PIN, DD_LOCK, -1);
	lock_alloc(&sp->buf_lock, LOCK_ALLOC_PIN, DD_LOCK, -1);
	simple_lock_init(&sp->dd_lock);
	sp->dev = ddsinfo.busid;
	sp->tgt = ddsinfo.target;
	sp->lun = ddsinfo.lun;
	sp->cbuf.index = sp->rbuf.index = unit;
	/*
	 * If this is the first time through:
	 *   Add entry to the device switch table to call this driver
	 *   Pin driver code.
	 */
	if (nunits == 0) {
	    result = devswadd(devno, &gsc_dsw);
	    if (result != 0) {
		Trace1(0, "CFG_INIT: devswadd result: %d", result);
		break;
	    }
	    result = pincode((int (*) ()) gscdd_intr);
	    if (result) {
		Trace1(0, "CFG_INIT: pincode result: %d", result);
		devswdel(devno);
		break;
	    }
	}
	sp->iscfg = 1;
	result = gsopen(sp);
	if (result) {
	    Trace2(0, "CFG_INIT: gsopen returns %d for unit %d", result, unit);
	    sp->iscfg = 0;
	    gsclose(sp, devno);
	    break;
	}
	if (nunits <= unit)
	    nunits = unit + 1;
	sp->iscfg = 1;
	break;

    case CFG_TERM:
	Trace1(2, "CFG_TERM unit %d", unit);
	result = 0;
	sp = &softinfo[unit];
	if (sp->iscfg == 0) {
	    Trace1(0, "CFG_TERM: unit %d not already configd", unit);
	    result = ENXIO;
	    break;
	} else if (sp->isopen) {
	    Trace1(0, "CFG_TERM: unit %d open", unit);
	    result = EBUSY;
	    break;
	}
	sp->iscfg = 0;	/* block further actions */
	gsclose(sp, devno);
	break;

    default:
	result = EINVAL;
	break;
    }
    unlockl(&config_lock);
    return (result);
}
Пример #11
0
static int
smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred)
{
	struct dirent de;
	struct componentname cn;
	struct smb_cred scred;
	struct smbfs_fctx *ctx;
	struct vnode *newvp;
	struct smbnode *np = VTOSMB(vp);
	int error/*, *eofflag = ap->a_eofflag*/;
	long offset, limit;

	np = VTOSMB(vp);
	SMBVDEBUG("dirname='%s'\n", np->n_name);
	smb_makescred(&scred, uio->uio_td, cred);
	offset = uio->uio_offset / DE_SIZE;	/* offset in the directory */
	limit = uio->uio_resid / DE_SIZE;
	if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0)
		return EINVAL;
	while (limit && offset < 2) {
		limit--;
		bzero((caddr_t)&de, DE_SIZE);
		de.d_reclen = DE_SIZE;
		de.d_fileno = (offset == 0) ? np->n_ino :
		    (np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2);
		if (de.d_fileno == 0)
			de.d_fileno = 0x7ffffffd + offset;
		de.d_namlen = offset + 1;
		de.d_name[0] = '.';
		de.d_name[1] = '.';
		de.d_name[offset + 1] = '\0';
		de.d_type = DT_DIR;
		error = uiomove(&de, DE_SIZE, uio);
		if (error)
			return error;
		offset++;
		uio->uio_offset += DE_SIZE;
	}
	if (limit == 0)
		return 0;
	if (offset != np->n_dirofs || np->n_dirseq == NULL) {
		SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs);
		if (np->n_dirseq) {
			smbfs_findclose(np->n_dirseq, &scred);
			np->n_dirseq = NULL;
		}
		np->n_dirofs = 2;
		error = smbfs_findopen(np, "*", 1,
		    SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR,
		    &scred, &ctx);
		if (error) {
			SMBVDEBUG("can not open search, error = %d", error);
			return error;
		}
		np->n_dirseq = ctx;
	} else
		ctx = np->n_dirseq;
	while (np->n_dirofs < offset) {
		error = smbfs_findnext(ctx, offset - np->n_dirofs++, &scred);
		if (error) {
			smbfs_findclose(np->n_dirseq, &scred);
			np->n_dirseq = NULL;
			return error == ENOENT ? 0 : error;
		}
	}
	error = 0;
	for (; limit; limit--, offset++) {
		error = smbfs_findnext(ctx, limit, &scred);
		if (error)
			break;
		np->n_dirofs++;
		bzero((caddr_t)&de, DE_SIZE);
		de.d_reclen = DE_SIZE;
		de.d_fileno = ctx->f_attr.fa_ino;
		de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG;
		de.d_namlen = ctx->f_nmlen;
		bcopy(ctx->f_name, de.d_name, de.d_namlen);
		de.d_name[de.d_namlen] = '\0';
		if (smbfs_fastlookup) {
			error = smbfs_nget(vp->v_mount, vp, ctx->f_name,
			    ctx->f_nmlen, &ctx->f_attr, &newvp);
			if (!error) {
				cn.cn_nameptr = de.d_name;
				cn.cn_namelen = de.d_namlen;
				cache_enter(vp, newvp, &cn);
				vput(newvp);
			}
		}
		error = uiomove(&de, DE_SIZE, uio);
		if (error)
			break;
	}
	if (error == ENOENT)
		error = 0;
	uio->uio_offset = offset * DE_SIZE;
	return error;
}
Пример #12
0
/*
 * Do I/O to a block of a file that doesn't cover the whole block.  We
 * need to read in the original block first, even if we're writing, so
 * we don't clobber the portion of the block we're not intending to
 * write over.
 *
 * skipstart is the number of bytes to skip past at the beginning of
 * the sector; len is the number of bytes to actually read or write.
 * uio is the area to do the I/O into.
 */
static
int
sfs_partialio(struct sfs_vnode *sv, struct uio *uio,
	      u_int32_t skipstart, u_int32_t len)
{
	/*
	 * I/O buffer for handling partial sectors.
	 *
	 * Note: in real life (and when you've done the fs assignment)
	 * you would get space from the disk buffer cache for this,
	 * not use a static area.
	 */
	static char iobuf[SFS_BLOCKSIZE];

	struct sfs_fs *sfs = sv->sv_v.vn_fs->fs_data;
	u_int32_t diskblock;
	u_int32_t fileblock;
	int result;
	
	/* Allocate missing blocks if and only if we're writing */
	int doalloc = (uio->uio_rw==UIO_WRITE);

	assert(skipstart + len <= SFS_BLOCKSIZE);

	/* Compute the block offset of this block in the file */
	fileblock = uio->uio_offset / SFS_BLOCKSIZE;

	/* Get the disk block number */
	result = sfs_bmap(sv, fileblock, doalloc, &diskblock);
	if (result) {
		return result;
	}

	if (diskblock == 0) {
		/*
		 * There was no block mapped at this point in the file.
		 * Zero the buffer.
		 */
		assert(uio->uio_rw == UIO_READ);
		bzero(iobuf, sizeof(iobuf));
	}
	else {
		/*
		 * Read the block.
		 */
		result = sfs_rblock(sfs, iobuf, diskblock);
		if (result) {
			return result;
		}
	}

	/*
	 * Now perform the requested operation into/out of the buffer.
	 */
	result = uiomove(iobuf+skipstart, len, uio);
	if (result) {
		return result;
	}

	/*
	 * If it was a write, write back the modified block.
	 */
	if (uio->uio_rw == UIO_WRITE) {
		result = sfs_wblock(sfs, iobuf, diskblock);
		if (result) {
			return result;
		}
	}

	return 0;
}
Пример #13
0
/* ARGSUSED */
static int
evtchndrv_read(dev_t dev, struct uio *uio, cred_t *cr)
{
	int rc = 0;
	ssize_t count;
	unsigned int c, p, bytes1 = 0, bytes2 = 0;
	struct evtsoftdata *ep;
	minor_t minor = getminor(dev);

	if (secpolicy_xvm_control(cr))
		return (EPERM);

	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));

	/* Whole number of ports. */
	count = uio->uio_resid;
	count &= ~(sizeof (evtchn_port_t) - 1);

	if (count == 0)
		return (0);

	if (count > PAGESIZE)
		count = PAGESIZE;

	mutex_enter(&ep->evtchn_lock);
	for (;;) {
		if (ep->ring_overflow) {
			rc = EFBIG;
			goto done;
		}

		if ((c = ep->ring_cons) != (p = ep->ring_prod))
			break;

		if (uio->uio_fmode & O_NONBLOCK) {
			rc = EAGAIN;
			goto done;
		}

		if (cv_wait_sig(&ep->evtchn_wait, &ep->evtchn_lock) == 0) {
			rc = EINTR;
			goto done;
		}
	}

	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
		    sizeof (evtchn_port_t);
		bytes2 = EVTCHN_RING_MASK(p) * sizeof (evtchn_port_t);
	} else {
		bytes1 = (p - c) * sizeof (evtchn_port_t);
		bytes2 = 0;
	}

	/* Truncate chunks according to caller's maximum byte count. */
	if (bytes1 > count) {
		bytes1 = count;
		bytes2 = 0;
	} else if ((bytes1 + bytes2) > count) {
		bytes2 = count - bytes1;
	}

	if (uiomove(&ep->ring[EVTCHN_RING_MASK(c)], bytes1, UIO_READ, uio) ||
	    ((bytes2 != 0) && uiomove(&ep->ring[0], bytes2, UIO_READ, uio))) {
		rc = EFAULT;
		goto done;
	}

	ep->ring_cons += (bytes1 + bytes2) / sizeof (evtchn_port_t);
done:
	mutex_exit(&ep->evtchn_lock);
	return (rc);
}
Пример #14
0
tpmread(dev_t dev, struct uio *uio, int flags)
#endif
{
	struct tpm_softc *sc = TPMSOFTC(dev);
	u_int8_t buf[TPM_BUFSIZ], *p;
	size_t cnt;
	int n, len, rv, s;

	if (!sc)
		return ENXIO;

	s = spltty();
	if ((rv = (sc->sc_start)(sc, UIO_READ))) {
		splx(s);
		return rv;
	}

#ifdef TPM_DEBUG
	printf("tpmread: getting header\n");
#endif
	if ((rv = (sc->sc_read)(sc, buf, TPM_HDRSIZE, &cnt, 0))) {
		(sc->sc_end)(sc, UIO_READ, rv);
		splx(s);
		return rv;
	}

	len = (buf[2] << 24) | (buf[3] << 16) | (buf[4] << 8) | buf[5];
#ifdef TPM_DEBUG
	printf("tpmread: len %d, io count %d\n", len, uio->uio_resid);
#endif
	if (len > uio->uio_resid) {
		rv = EIO;
		(sc->sc_end)(sc, UIO_READ, rv);
#ifdef TPM_DEBUG
		printf("tpmread: bad residual io count 0x%x\n", uio->uio_resid);
#endif
		splx(s);
		return rv;
	}

	/* Copy out header. */
	if ((rv = uiomove((caddr_t)buf, cnt, uio))) {
		(sc->sc_end)(sc, UIO_READ, rv);
		splx(s);
		return rv;
	}

	/* Get remaining part of the answer (if anything is left). */
	for (len -= cnt, p = buf, n = sizeof(buf); len > 0; p = buf, len -= n,
	    n = sizeof(buf)) {
		n = MIN(n, len);
#ifdef TPM_DEBUG
		printf("tpmread: n %d len %d\n", n, len);
#endif
		if ((rv = (sc->sc_read)(sc, p, n, NULL, TPM_PARAM_SIZE))) {
			(sc->sc_end)(sc, UIO_READ, rv);
			splx(s);
			return rv;
		}
		p += n;
		if ((rv = uiomove((caddr_t)buf, p - buf, uio))) {
			(sc->sc_end)(sc, UIO_READ, rv);
			splx(s);
			return rv;
		}
	}

	rv = (sc->sc_end)(sc, UIO_READ, rv);
	splx(s);
	return rv;
}
Пример #15
0
static int
ptsdev_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
    int flags, struct thread *td)
{
	struct tty *tp = fp->f_data;
	struct pts_softc *psc = tty_softc(tp);
	char ib[256], *ibstart;
	size_t iblen, rintlen;
	int error = 0;

	if (uio->uio_resid == 0)
		return (0);

	for (;;) {
		ibstart = ib;
		iblen = MIN(uio->uio_resid, sizeof ib);
		error = uiomove(ib, iblen, uio);

		tty_lock(tp);
		if (error != 0) {
			iblen = 0;
			goto done;
		}

		/*
		 * When possible, avoid the slow path. rint_bypass()
		 * copies all input to the input queue at once.
		 */
		MPASS(iblen > 0);
		do {
			rintlen = ttydisc_rint_simple(tp, ibstart, iblen);
			ibstart += rintlen;
			iblen -= rintlen;
			if (iblen == 0) {
				/* All data written. */
				break;
			}

			/* Maybe the device isn't used anyway. */
			if (psc->pts_flags & PTS_FINISHED) {
				error = EIO;
				goto done;
			}

			/* Wait for more data. */
			if (fp->f_flag & O_NONBLOCK) {
				error = EWOULDBLOCK;
				goto done;
			}

			/* Wake up users on the slave side. */
			ttydisc_rint_done(tp);
			error = cv_wait_sig(&psc->pts_inwait, tp->t_mtx);
			if (error != 0)
				goto done;
		} while (iblen > 0);

		if (uio->uio_resid == 0)
			break;
		tty_unlock(tp);
	}

done:	ttydisc_rint_done(tp);
	tty_unlock(tp);

	/*
	 * Don't account for the part of the buffer that we couldn't
	 * pass to the TTY.
	 */
	uio->uio_resid += iblen;
	return (error);
}
Пример #16
0
/*ARGSUSED*/
int
mmrw(dev_t dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	boolean_t allowed;
	int error = 0, c;
	vaddr_t v;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor(dev)) {

/* minor device 0 is physical memory */
		case 0:
			v = uio->uio_offset;
			c = iov->iov_len;
			if (v + c > ptoa(physmem))
				return (EFAULT);
			v = (vaddr_t)PHYS_TO_XKPHYS(v, CCA_NONCOHERENT);
			error = uiomove((caddr_t)v, c, uio);
			continue;

/* minor device 1 is kernel memory */
		case 1:
			v = uio->uio_offset;
			c = min(iov->iov_len, MAXPHYS);

			/* Allow access to RAM through XKPHYS... */
			if (IS_XKPHYS(v) && IS_XKPHYS(v + (vsize_t)c) &&
			    XKPHYS_TO_PHYS(v + (vsize_t)c) <= ptoa(physmem))
				allowed = TRUE;
			/* ...or through KSEG0... */
			else if (v >= KSEG0_BASE &&
			    v + (vsize_t)c < KSEG0_BASE + KSEG_SIZE &&
			    (physmem >= atop(KSEG_SIZE) ||
			     v + (vsize_t)c <= KSEG0_BASE + ptoa(physmem)))
				allowed = TRUE;
			/* ...or through KSEG1... */
			else if (v >= KSEG1_BASE &&
			    v + (vsize_t)c < KSEG1_BASE + KSEG_SIZE &&
			    (physmem >= atop(KSEG_SIZE) ||
			     v + c <= KSEG1_BASE + ptoa(physmem)))
				allowed = TRUE;
			/* ...otherwise, check it's within kernel kvm limits. */
			else
				allowed = uvm_kernacc((caddr_t)v, c,
				    uio->uio_rw == UIO_READ ? B_READ : B_WRITE);

			if (allowed) {
				error = uiomove((caddr_t)v, c, uio);
				continue;
			} else {
				return (EFAULT);
			}

/* minor device 2 is EOF/RATHOLE */
		case 2:
			if (uio->uio_rw == UIO_WRITE)
				uio->uio_resid = 0;
			return (0);

/* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */
		case 12:
			if (uio->uio_rw == UIO_WRITE) {
				c = iov->iov_len;
				break;
			}
			if (zeropage == NULL)
				zeropage = malloc(PAGE_SIZE, M_TEMP,
				    M_WAITOK | M_ZERO);
			c = min(iov->iov_len, PAGE_SIZE);
			error = uiomove(zeropage, c, uio);
			continue;

		default:
			return (ENODEV);
		}
		if (error)
			break;
		iov->iov_base += c;
		iov->iov_len -= c;
		uio->uio_offset += c;
		uio->uio_resid -= c;
	}
	return error;
}
Пример #17
0
int
ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag)
{
	struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN];
	u_int32_t n, tn;
	char buf[UGEN_BBSIZE];
	struct usbd_xfer *xfer;
	usbd_status err;
	int s;
	int flags, error = 0;
	u_char buffer[UGEN_CHUNK];

	DPRINTFN(5, ("%s: ugenread: %d\n", sc->sc_dev.dv_xname, endpt));

	if (usbd_is_dying(sc->sc_udev))
		return (EIO);

	if (endpt == USB_CONTROL_ENDPOINT)
		return (ENODEV);

#ifdef DIAGNOSTIC
	if (sce->edesc == NULL) {
		printf("ugenread: no edesc\n");
		return (EIO);
	}
	if (sce->pipeh == NULL) {
		printf("ugenread: no pipe\n");
		return (EIO);
	}
#endif

	switch (sce->edesc->bmAttributes & UE_XFERTYPE) {
	case UE_INTERRUPT:
		/* Block until activity occurred. */
		s = splusb();
		while (sce->q.c_cc == 0) {
			if (flag & IO_NDELAY) {
				splx(s);
				return (EWOULDBLOCK);
			}
			sce->state |= UGEN_ASLP;
			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
			error = tsleep(sce, PZERO | PCATCH, "ugenri",
			    (sce->timeout * hz) / 1000);
			sce->state &= ~UGEN_ASLP;
			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
			if (usbd_is_dying(sc->sc_udev))
				error = EIO;
			if (error == EWOULDBLOCK) {	/* timeout, return 0 */
				error = 0;
				break;
			}
			if (error)
				break;
		}
		splx(s);

		/* Transfer as many chunks as possible. */
		while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) {
			n = min(sce->q.c_cc, uio->uio_resid);
			if (n > sizeof(buffer))
				n = sizeof(buffer);

			/* Remove a small chunk from the input queue. */
			q_to_b(&sce->q, buffer, n);
			DPRINTFN(5, ("ugenread: got %d chars\n", n));

			/* Copy the data to the user process. */
			error = uiomove(buffer, n, uio);
			if (error)
				break;
		}
		break;
	case UE_BULK:
		xfer = usbd_alloc_xfer(sc->sc_udev);
		if (xfer == 0)
			return (ENOMEM);
		flags = USBD_SYNCHRONOUS;
		if (sce->state & UGEN_SHORT_OK)
			flags |= USBD_SHORT_XFER_OK;
		if (sce->timeout == 0)
			flags |= USBD_CATCH;
		while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) {
			DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n));
			usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n,
			    flags, sce->timeout, NULL);
			err = usbd_transfer(xfer);
			if (err) {
				usbd_clear_endpoint_stall(sce->pipeh);
				if (err == USBD_INTERRUPTED)
					error = EINTR;
				else if (err == USBD_TIMEOUT)
					error = ETIMEDOUT;
				else
					error = EIO;
				break;
			}
			usbd_get_xfer_status(xfer, NULL, NULL, &tn, NULL);
			DPRINTFN(1, ("ugenread: got %d bytes\n", tn));
			error = uiomove(buf, tn, uio);
			if (error || tn < n)
				break;
		}
		usbd_free_xfer(xfer);
		break;
	case UE_ISOCHRONOUS:
		s = splusb();
		while (sce->cur == sce->fill) {
			if (flag & IO_NDELAY) {
				splx(s);
				return (EWOULDBLOCK);
			}
			sce->state |= UGEN_ASLP;
			DPRINTFN(5, ("ugenread: sleep on %p\n", sce));
			error = tsleep(sce, PZERO | PCATCH, "ugenri",
			    (sce->timeout * hz) / 1000);
			sce->state &= ~UGEN_ASLP;
			DPRINTFN(5, ("ugenread: woke, error=%d\n", error));
			if (usbd_is_dying(sc->sc_udev))
				error = EIO;
			if (error == EWOULDBLOCK) {	/* timeout, return 0 */
				error = 0;
				break;
			}
			if (error)
				break;
		}

		while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) {
			if(sce->fill > sce->cur)
				n = min(sce->fill - sce->cur, uio->uio_resid);
			else
				n = min(sce->limit - sce->cur, uio->uio_resid);

			DPRINTFN(5, ("ugenread: isoc got %d chars\n", n));

			/* Copy the data to the user process. */
			error = uiomove(sce->cur, n, uio);
			if (error)
				break;
			sce->cur += n;
			if(sce->cur >= sce->limit)
				sce->cur = sce->ibuf;
		}
		splx(s);
		break;


	default:
		return (ENXIO);
	}
	return (error);
}
Пример #18
0
static	int
ptcread(struct dev_read_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct tty *tp = dev->si_tty;
	struct pt_ioctl *pti = dev->si_drv1;
	char buf[BUFSIZ];
	int error = 0, cc;

	lwkt_gettoken(&tty_token);
	/*
	 * We want to block until the slave
	 * is open, and there's something to read;
	 * but if we lost the slave or we're NBIO,
	 * then return the appropriate error instead.
	 */
	for (;;) {
		if (tp->t_state&TS_ISOPEN) {
			if ((pti->pt_flags & PF_PKT) && pti->pt_send) {
				error = ureadc((int)pti->pt_send, ap->a_uio);
				if (error) {
					lwkt_reltoken(&tty_token);
					return (error);
				}
				if (pti->pt_send & TIOCPKT_IOCTL) {
					cc = (int)szmin(ap->a_uio->uio_resid,
							sizeof(tp->t_termios));
					uiomove((caddr_t)&tp->t_termios, cc,
						ap->a_uio);
				}
				pti->pt_send = 0;
				lwkt_reltoken(&tty_token);
				return (0);
			}
			if ((pti->pt_flags & PF_UCNTL) && pti->pt_ucntl) {
				error = ureadc((int)pti->pt_ucntl, ap->a_uio);
				if (error) {
					lwkt_reltoken(&tty_token);
					return (error);
				}
				pti->pt_ucntl = 0;
				lwkt_reltoken(&tty_token);
				return (0);
			}
			if (tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0)
				break;
		}
		if ((tp->t_state & TS_CONNECTED) == 0) {
			lwkt_reltoken(&tty_token);
			return (0);	/* EOF */
		}
		if (ap->a_ioflag & IO_NDELAY) {
			lwkt_reltoken(&tty_token);
			return (EWOULDBLOCK);
		}
		error = tsleep(TSA_PTC_READ(tp), PCATCH, "ptcin", 0);
		if (error) {
			lwkt_reltoken(&tty_token);
			return (error);
		}
	}
	if (pti->pt_flags & (PF_PKT|PF_UCNTL))
		error = ureadc(0, ap->a_uio);
	while (ap->a_uio->uio_resid > 0 && error == 0) {
		cc = q_to_b(&tp->t_outq, buf,
			    (int)szmin(ap->a_uio->uio_resid, BUFSIZ));
		if (cc <= 0)
			break;
		error = uiomove(buf, (size_t)cc, ap->a_uio);
	}
	ttwwakeup(tp);
	lwkt_reltoken(&tty_token);
	return (error);
}
Пример #19
0
int
ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd,
	      caddr_t addr, int flag, struct proc *p)
{
	struct ugen_endpoint *sce;
	int err;
	struct usbd_interface *iface;
	struct usb_config_desc *cd;
	usb_config_descriptor_t *cdesc;
	struct usb_interface_desc *id;
	usb_interface_descriptor_t *idesc;
	struct usb_endpoint_desc *ed;
	usb_endpoint_descriptor_t *edesc;
	struct usb_alt_interface *ai;
	struct usb_string_desc *si;
	u_int8_t conf, alt;

	DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd));
	if (usbd_is_dying(sc->sc_udev))
		return (EIO);

	switch (cmd) {
	case FIONBIO:
		/* All handled in the upper FS layer. */
		return (0);
	case USB_SET_SHORT_XFER:
		if (endpt == USB_CONTROL_ENDPOINT)
			return (EINVAL);
		/* This flag only affects read */
		sce = &sc->sc_endpoints[endpt][IN];
		if (sce == NULL || sce->pipeh == NULL)
			return (EINVAL);
		if (*(int *)addr)
			sce->state |= UGEN_SHORT_OK;
		else
			sce->state &= ~UGEN_SHORT_OK;
		return (0);
	case USB_SET_TIMEOUT:
		sce = &sc->sc_endpoints[endpt][IN];
		if (sce == NULL)
			return (EINVAL);
		sce->timeout = *(int *)addr;
		sce = &sc->sc_endpoints[endpt][OUT];
		if (sce == NULL)
			return (EINVAL);
		sce->timeout = *(int *)addr;
		return (0);
	default:
		break;
	}

	if (endpt != USB_CONTROL_ENDPOINT)
		return (EINVAL);

	switch (cmd) {
#ifdef UGEN_DEBUG
	case USB_SETDEBUG:
		ugendebug = *(int *)addr;
		break;
#endif
	case USB_GET_CONFIG:
		err = usbd_get_config(sc->sc_udev, &conf);
		if (err)
			return (EIO);
		*(int *)addr = conf;
		break;
	case USB_SET_CONFIG:
		if (!(flag & FWRITE))
			return (EPERM);
		err = ugen_set_config(sc, *(int *)addr);
		switch (err) {
		case USBD_NORMAL_COMPLETION:
			break;
		case USBD_IN_USE:
			return (EBUSY);
		default:
			return (EIO);
		}
		break;
	case USB_GET_ALTINTERFACE:
		ai = (struct usb_alt_interface *)addr;
		err = usbd_device2interface_handle(sc->sc_udev,
			  ai->uai_interface_index, &iface);
		if (err)
			return (EINVAL);
		idesc = usbd_get_interface_descriptor(iface);
		if (idesc == NULL)
			return (EIO);
		ai->uai_alt_no = idesc->bAlternateSetting;
		break;
	case USB_SET_ALTINTERFACE:
		if (!(flag & FWRITE))
			return (EPERM);
		ai = (struct usb_alt_interface *)addr;
		err = usbd_device2interface_handle(sc->sc_udev,
			  ai->uai_interface_index, &iface);
		if (err)
			return (EINVAL);
		err = ugen_set_interface(sc, ai->uai_interface_index,
		    ai->uai_alt_no);
		if (err)
			return (EINVAL);
		break;
	case USB_GET_NO_ALT:
		ai = (struct usb_alt_interface *)addr;
		cdesc = usbd_get_cdesc(sc->sc_udev, ai->uai_config_index, 0);
		if (cdesc == NULL)
			return (EINVAL);
		idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0);
		if (idesc == NULL) {
			free(cdesc, M_TEMP, 0);
			return (EINVAL);
		}
		ai->uai_alt_no = usbd_get_no_alts(cdesc,
		    idesc->bInterfaceNumber);
		free(cdesc, M_TEMP, 0);
		break;
	case USB_GET_DEVICE_DESC:
		*(usb_device_descriptor_t *)addr =
			*usbd_get_device_descriptor(sc->sc_udev);
		break;
	case USB_GET_CONFIG_DESC:
		cd = (struct usb_config_desc *)addr;
		cdesc = usbd_get_cdesc(sc->sc_udev, cd->ucd_config_index, 0);
		if (cdesc == NULL)
			return (EINVAL);
		cd->ucd_desc = *cdesc;
		free(cdesc, M_TEMP, 0);
		break;
	case USB_GET_INTERFACE_DESC:
		id = (struct usb_interface_desc *)addr;
		cdesc = usbd_get_cdesc(sc->sc_udev, id->uid_config_index, 0);
		if (cdesc == NULL)
			return (EINVAL);
		if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX &&
		    id->uid_alt_index == USB_CURRENT_ALT_INDEX)
			alt = ugen_get_alt_index(sc, id->uid_interface_index);
		else
			alt = id->uid_alt_index;
		idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt);
		if (idesc == NULL) {
			free(cdesc, M_TEMP, 0);
			return (EINVAL);
		}
		id->uid_desc = *idesc;
		free(cdesc, M_TEMP, 0);
		break;
	case USB_GET_ENDPOINT_DESC:
		ed = (struct usb_endpoint_desc *)addr;
		cdesc = usbd_get_cdesc(sc->sc_udev, ed->ued_config_index, 0);
		if (cdesc == NULL)
			return (EINVAL);
		if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX &&
		    ed->ued_alt_index == USB_CURRENT_ALT_INDEX)
			alt = ugen_get_alt_index(sc, ed->ued_interface_index);
		else
			alt = ed->ued_alt_index;
		edesc = usbd_find_edesc(cdesc, ed->ued_interface_index,
					alt, ed->ued_endpoint_index);
		if (edesc == NULL) {
			free(cdesc, M_TEMP, 0);
			return (EINVAL);
		}
		ed->ued_desc = *edesc;
		free(cdesc, M_TEMP, 0);
		break;
	case USB_GET_FULL_DESC:
	{
		int len;
		struct iovec iov;
		struct uio uio;
		struct usb_full_desc *fd = (struct usb_full_desc *)addr;
		int error;

		cdesc = usbd_get_cdesc(sc->sc_udev, fd->ufd_config_index, &len);
		if (cdesc == NULL)
			return (EINVAL);
		if (len > fd->ufd_size)
			len = fd->ufd_size;
		iov.iov_base = (caddr_t)fd->ufd_data;
		iov.iov_len = len;
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_resid = len;
		uio.uio_offset = 0;
		uio.uio_segflg = UIO_USERSPACE;
		uio.uio_rw = UIO_READ;
		uio.uio_procp = p;
		error = uiomove((void *)cdesc, len, &uio);
		free(cdesc, M_TEMP, 0);
		return (error);
	}
	case USB_GET_STRING_DESC:
	{
		int len;
		si = (struct usb_string_desc *)addr;
		err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index,
			si->usd_language_id, &si->usd_desc, &len);
		if (err)
			return (EINVAL);
		break;
	}
	case USB_DO_REQUEST:
	{
		struct usb_ctl_request *ur = (void *)addr;
		int len = UGETW(ur->ucr_request.wLength);
		struct iovec iov;
		struct uio uio;
		void *ptr = 0;
		int error = 0;

		if (!(flag & FWRITE))
			return (EPERM);
		/* Avoid requests that would damage the bus integrity. */
		if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
		     ur->ucr_request.bRequest == UR_SET_ADDRESS) ||
		    (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE &&
		     ur->ucr_request.bRequest == UR_SET_CONFIG) ||
		    (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE &&
		     ur->ucr_request.bRequest == UR_SET_INTERFACE))
			return (EINVAL);

		if (len < 0 || len > 32767)
			return (EINVAL);
		if (len != 0) {
			iov.iov_base = (caddr_t)ur->ucr_data;
			iov.iov_len = len;
			uio.uio_iov = &iov;
			uio.uio_iovcnt = 1;
			uio.uio_resid = len;
			uio.uio_offset = 0;
			uio.uio_segflg = UIO_USERSPACE;
			uio.uio_rw =
				ur->ucr_request.bmRequestType & UT_READ ?
				UIO_READ : UIO_WRITE;
			uio.uio_procp = p;
			ptr = malloc(len, M_TEMP, M_WAITOK);
			if (uio.uio_rw == UIO_WRITE) {
				error = uiomove(ptr, len, &uio);
				if (error)
					goto ret;
			}
		}
		sce = &sc->sc_endpoints[endpt][IN];
		err = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request,
			  ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout);
		if (err) {
			error = EIO;
			goto ret;
		}
		/* Only if USBD_SHORT_XFER_OK is set. */
		if (len > ur->ucr_actlen)
			len = ur->ucr_actlen;
		if (len != 0) {
			if (uio.uio_rw == UIO_READ) {
				error = uiomove(ptr, len, &uio);
				if (error)
					goto ret;
			}
		}
	ret:
		if (ptr)
			free(ptr, M_TEMP, 0);
		return (error);
	}
	case USB_GET_DEVICEINFO:
		usbd_fill_deviceinfo(sc->sc_udev,
				     (struct usb_device_info *)addr, 1);
		break;
	default:
		return (EINVAL);
	}
	return (0);
}
Пример #20
0
/*
 * I/O ops
 */
static	int
ptcwrite(struct dev_write_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct tty *tp = dev->si_tty;
	u_char *cp = NULL;
	int cc = 0;
	u_char locbuf[BUFSIZ];
	int cnt = 0;
	struct pt_ioctl *pti = dev->si_drv1;
	int error = 0;

	lwkt_gettoken(&tty_token);
again:
	if ((tp->t_state&TS_ISOPEN) == 0)
		goto block;
	if (pti->pt_flags & PF_REMOTE) {
		if (tp->t_canq.c_cc)
			goto block;
		while ((ap->a_uio->uio_resid > 0 || cc > 0) &&
		       tp->t_canq.c_cc < TTYHOG - 1) {
			if (cc == 0) {
				cc = (int)szmin(ap->a_uio->uio_resid, BUFSIZ);
				cc = imin(cc, TTYHOG - 1 - tp->t_canq.c_cc);
				cp = locbuf;
				error = uiomove(cp, (size_t)cc, ap->a_uio);
				if (error) {
					lwkt_reltoken(&tty_token);
					return (error);
				}
				/* check again for safety */
				if ((tp->t_state & TS_ISOPEN) == 0) {
					/* adjust as usual */
					ap->a_uio->uio_resid += cc;
					lwkt_reltoken(&tty_token);
					return (EIO);
				}
			}
			if (cc > 0) {
				cc = b_to_q((char *)cp, cc, &tp->t_canq);
				/*
				 * XXX we don't guarantee that the canq size
				 * is >= TTYHOG, so the above b_to_q() may
				 * leave some bytes uncopied.  However, space
				 * is guaranteed for the null terminator if
				 * we don't fail here since (TTYHOG - 1) is
				 * not a multiple of CBSIZE.
				 */
				if (cc > 0)
					break;
			}
		}
		/* adjust for data copied in but not written */
		ap->a_uio->uio_resid += cc;
		clist_putc(0, &tp->t_canq);
		ttwakeup(tp);
		wakeup(TSA_PTS_READ(tp));
		lwkt_reltoken(&tty_token);
		return (0);
	}
	while (ap->a_uio->uio_resid > 0 || cc > 0) {
		if (cc == 0) {
			cc = (int)szmin(ap->a_uio->uio_resid, BUFSIZ);
			cp = locbuf;
			error = uiomove(cp, (size_t)cc, ap->a_uio);
			if (error) {
				lwkt_reltoken(&tty_token);
				return (error);
			}
			/* check again for safety */
			if ((tp->t_state & TS_ISOPEN) == 0) {
				/* adjust for data copied in but not written */
				ap->a_uio->uio_resid += cc;
				lwkt_reltoken(&tty_token);
				return (EIO);
			}
		}
		while (cc > 0) {
			if ((tp->t_rawq.c_cc + tp->t_canq.c_cc) >= TTYHOG - 2 &&
			   (tp->t_canq.c_cc > 0 || !(tp->t_lflag&ICANON))) {
				wakeup(TSA_HUP_OR_INPUT(tp));
				goto block;
			}
			(*linesw[tp->t_line].l_rint)(*cp++, tp);
			cnt++;
			cc--;
		}
		cc = 0;
	}
	lwkt_reltoken(&tty_token);
	return (0);
block:
	/*
	 * Come here to wait for slave to open, for space
	 * in outq, or space in rawq, or an empty canq.
	 */
	if ((tp->t_state & TS_CONNECTED) == 0) {
		/* adjust for data copied in but not written */
		ap->a_uio->uio_resid += cc;
		lwkt_reltoken(&tty_token);
		return (EIO);
	}
	if (ap->a_ioflag & IO_NDELAY) {
		/* adjust for data copied in but not written */
		ap->a_uio->uio_resid += cc;
		if (cnt == 0) {
			lwkt_reltoken(&tty_token);
			return (EWOULDBLOCK);
		}
		lwkt_reltoken(&tty_token);
		return (0);
	}
	error = tsleep(TSA_PTC_WRITE(tp), PCATCH, "ptcout", 0);
	if (error) {
		/* adjust for data copied in but not written */
		ap->a_uio->uio_resid += cc;
		lwkt_reltoken(&tty_token);
		return (error);
	}
	goto again;
}
Пример #21
0
static int
tmpfs_read (struct vop_read_args *ap)
{
	struct buf *bp;
	struct vnode *vp = ap->a_vp;
	struct uio *uio = ap->a_uio;
	struct tmpfs_node *node;
	off_t base_offset;
	size_t offset;
	size_t len;
	int error;

	error = 0;
	if (uio->uio_resid == 0) {
		return error;
	}

	node = VP_TO_TMPFS_NODE(vp);

	if (uio->uio_offset < 0)
		return (EINVAL);
	if (vp->v_type != VREG)
		return (EINVAL);

	while (uio->uio_resid > 0 && uio->uio_offset < node->tn_size) {
		/*
		 * Use buffer cache I/O (via tmpfs_strategy)
		 */
		offset = (size_t)uio->uio_offset & BMASK;
		base_offset = (off_t)uio->uio_offset - offset;
		bp = getcacheblk(vp, base_offset, BSIZE, 0);
		if (bp == NULL) {
			lwkt_gettoken(&vp->v_mount->mnt_token);
			error = bread(vp, base_offset, BSIZE, &bp);
			if (error) {
				brelse(bp);
				lwkt_reltoken(&vp->v_mount->mnt_token);
				kprintf("tmpfs_read bread error %d\n", error);
				break;
			}
			lwkt_reltoken(&vp->v_mount->mnt_token);
		}

		/*
		 * Figure out how many bytes we can actually copy this loop.
		 */
		len = BSIZE - offset;
		if (len > uio->uio_resid)
			len = uio->uio_resid;
		if (len > node->tn_size - uio->uio_offset)
			len = (size_t)(node->tn_size - uio->uio_offset);

		error = uiomove((char *)bp->b_data + offset, len, uio);
		bqrelse(bp);
		if (error) {
			kprintf("tmpfs_read uiomove error %d\n", error);
			break;
		}
	}

	TMPFS_NODE_LOCK(node);
	node->tn_status |= TMPFS_NODE_ACCESSED;
	TMPFS_NODE_UNLOCK(node);

	return(error);
}
Пример #22
0
int
reiserfs_readdir(struct vop_readdir_args  /* {
		struct vnode *a_vp;
		struct uio *a_uio;
		struct ucred *a_cred;
		int *a_eofflag;
		int *a_ncookies;
		u_long **a_cookies;
	} */*ap)
{
	int error = 0;
	struct dirent dstdp;
	struct uio *uio = ap->a_uio;

	off_t next_pos;
	struct buf *bp;
	struct item_head *ih;
	struct cpu_key pos_key;
	const struct key *rkey;
	struct reiserfs_node *ip;
	struct reiserfs_dir_entry de;
	INITIALIZE_PATH(path_to_entry);
	int entry_num, item_num, search_res;

	/* The NFS part */
	int ncookies = 0;
	u_long *cookies = NULL;

	/*
	 * Form key for search the next directory entry using f_pos field of
	 * file structure
	 */
	ip = VTOI(ap->a_vp);
	make_cpu_key(&pos_key,
	    ip, uio->uio_offset ? uio->uio_offset : DOT_OFFSET,
	    TYPE_DIRENTRY, 3);
	next_pos = cpu_key_k_offset(&pos_key);

	reiserfs_log(LOG_DEBUG, "listing entries for "
	    "(objectid=%d, dirid=%d)\n",
	    pos_key.on_disk_key.k_objectid, pos_key.on_disk_key.k_dir_id);
	reiserfs_log(LOG_DEBUG, "uio_offset = %jd, uio_resid = %d\n",
	    (intmax_t)uio->uio_offset, uio->uio_resid);

	if (ap->a_ncookies && ap->a_cookies) {
		cookies = (u_long *)malloc(
		    uio->uio_resid / 16 * sizeof(u_long),
		    M_REISERFSCOOKIES, M_WAITOK);
	}

	while (1) {
		//research:
		/*
		 * Search the directory item, containing entry with
		 * specified key
		 */
		reiserfs_log(LOG_DEBUG, "search directory to read\n");
		search_res = search_by_entry_key(ip->i_reiserfs, &pos_key,
		    &path_to_entry, &de);
		if (search_res == IO_ERROR) {
			error = EIO;
			goto out;
		}

		entry_num = de.de_entry_num;
		item_num  = de.de_item_num;
		bp = de.de_bp;
		ih = de.de_ih;

		if (search_res == POSITION_FOUND ||
		    entry_num < I_ENTRY_COUNT(ih)) {
			/*
			 * Go through all entries in the directory item
			 * beginning from the entry, that has been found.
			 */
			struct reiserfs_de_head *deh = B_I_DEH(bp, ih) +
			    entry_num;

			if (ap->a_ncookies == NULL) {
				cookies = NULL;
			} else {
				//ncookies = 
			}

			reiserfs_log(LOG_DEBUG,
			    "walking through directory entries\n");
			for (; entry_num < I_ENTRY_COUNT(ih);
			    entry_num++, deh++) {
				int d_namlen;
				char *d_name;
				off_t d_off;
				ino_t d_ino;

				if (!de_visible(deh)) {
					/* It is hidden entry */
					continue;
				}

				d_namlen = entry_length(bp, ih, entry_num);
				d_name   = B_I_DEH_ENTRY_FILE_NAME(bp, ih, deh);
				if (!d_name[d_namlen - 1])
					d_namlen = strlen(d_name);
				reiserfs_log(LOG_DEBUG, "  - `%s' (len=%d)\n",
				    d_name, d_namlen);

				if (d_namlen > REISERFS_MAX_NAME(
				    ip->i_reiserfs->s_blocksize)) {
					/* Too big to send back to VFS */
					continue;
				}

#if 0
				/* Ignore the .reiserfs_priv entry */
				if (reiserfs_xattrs(ip->i_reiserfs) &&
				    !old_format_only(ip->i_reiserfs) &&
				    filp->f_dentry == ip->i_reiserfs->s_root &&
				    REISERFS_SB(ip->i_reiserfs)->priv_root &&
				    REISERFS_SB(ip->i_reiserfs)->priv_root->d_inode &&
				    deh_objectid(deh) ==
				    le32toh(INODE_PKEY(REISERFS_SB(
				    ip->i_reiserfs)->priv_root->d_inode)->k_objectid)) {
					continue;
				}
#endif

				d_off = deh_offset(deh);
				d_ino = deh_objectid(deh);
				uio->uio_offset = d_off;

				/* Copy to user land */
				dstdp.d_fileno = d_ino;
				dstdp.d_type   = DT_UNKNOWN;
				dstdp.d_namlen = d_namlen;
				dstdp.d_reclen = GENERIC_DIRSIZ(&dstdp);
				bcopy(d_name, dstdp.d_name, dstdp.d_namlen);
				bzero(dstdp.d_name + dstdp.d_namlen,
				    dstdp.d_reclen -
				    offsetof(struct dirent, d_name) -
				    dstdp.d_namlen);

				if (d_namlen > 0) {
					if (dstdp.d_reclen <= uio->uio_resid) {
						reiserfs_log(LOG_DEBUG, "     copying to user land\n");
						error = uiomove(&dstdp,
						    dstdp.d_reclen, uio);
						if (error)
							goto end;
						if (cookies != NULL) {
							cookies[ncookies] =
							    d_off;
							ncookies++;
						}
					} else
						break;
				} else {
					error = EIO;
					break;
				}

				next_pos = deh_offset(deh) + 1;
			}
			reiserfs_log(LOG_DEBUG, "...done\n");
		}

		reiserfs_log(LOG_DEBUG, "checking item num (%d == %d ?)\n",
		    item_num, B_NR_ITEMS(bp) - 1);
		if (item_num != B_NR_ITEMS(bp) - 1) {
			/* End of directory has been reached */
			reiserfs_log(LOG_DEBUG, "end reached\n");
			if (ap->a_eofflag)
				*ap->a_eofflag = 1;
			goto end;
		}

		/*
		 * Item we went through is last item of node. Using right
		 * delimiting key check is it directory end
		 */
		reiserfs_log(LOG_DEBUG, "get right key\n");
		rkey = get_rkey(&path_to_entry, ip->i_reiserfs);
		reiserfs_log(LOG_DEBUG, "right key = (objectid=%d, dirid=%d)\n",
		    rkey->k_objectid, rkey->k_dir_id);

		reiserfs_log(LOG_DEBUG, "compare it to MIN_KEY\n");
		reiserfs_log(LOG_DEBUG, "MIN KEY = (objectid=%d, dirid=%d)\n",
		    MIN_KEY.k_objectid, MIN_KEY.k_dir_id);
		if (comp_le_keys(rkey, &MIN_KEY) == 0) {
			/* Set pos_key to key, that is the smallest and greater
			 * that key of the last entry in the item */
			reiserfs_log(LOG_DEBUG, "continuing on the right\n");
			set_cpu_key_k_offset(&pos_key, next_pos);
			continue;
		}

		reiserfs_log(LOG_DEBUG, "compare it to pos_key\n");
		reiserfs_log(LOG_DEBUG, "pos key = (objectid=%d, dirid=%d)\n",
		    pos_key.on_disk_key.k_objectid,
		    pos_key.on_disk_key.k_dir_id);
		if (COMP_SHORT_KEYS(rkey, &pos_key)) {
			/* End of directory has been reached */
			reiserfs_log(LOG_DEBUG, "end reached (right)\n");
			if (ap->a_eofflag)
				*ap->a_eofflag = 1;
			goto end;
		}

		/* Directory continues in the right neighboring block */
		reiserfs_log(LOG_DEBUG, "continuing with a new offset\n");
		set_cpu_key_k_offset(&pos_key,
		    le_key_k_offset(KEY_FORMAT_3_5, rkey));
		reiserfs_log(LOG_DEBUG,
		    "new pos key = (objectid=%d, dirid=%d)\n",
		    pos_key.on_disk_key.k_objectid,
		    pos_key.on_disk_key.k_dir_id);
	}

end:
	uio->uio_offset = next_pos;
	pathrelse(&path_to_entry);
	reiserfs_check_path(&path_to_entry);
out:
	if (error && cookies != NULL) {
		free(cookies, M_REISERFSCOOKIES);
	} else if (ap->a_ncookies != NULL && ap->a_cookies != NULL) {
		*ap->a_ncookies = ncookies;
		*ap->a_cookies  = cookies;
	}
	return (error);
}
Пример #23
0
/*
 * Return directory entries.
 */
static int
pfs_readdir(struct vop_readdir_args *va)
{
	struct vnode *vn = va->a_vp;
	struct pfs_vdata *pvd = vn->v_data;
	struct pfs_node *pd = pvd->pvd_pn;
	pid_t pid = pvd->pvd_pid;
	struct proc *p, *proc;
	struct pfs_node *pn;
	struct dirent *entry;
	struct uio *uio;
	off_t offset;
	int error, i, resid;
	char *buf, *ent;

	KASSERT(pd->pn_info == vn->v_mount->mnt_data,
	    ("%s(): pn_info does not match mountpoint", __func__));
	PFS_TRACE(("%s pid %lu", pd->pn_name, (unsigned long)pid));
	pfs_assert_not_owned(pd);

	if (vn->v_type != VDIR)
		PFS_RETURN (ENOTDIR);
	KASSERT_PN_IS_DIR(pd);
	uio = va->a_uio;

	/* only allow reading entire entries */
	offset = uio->uio_offset;
	resid = uio->uio_resid;
	if (offset < 0 || offset % PFS_DELEN != 0 ||
	    (resid && resid < PFS_DELEN))
		PFS_RETURN (EINVAL);
	if (resid == 0)
		PFS_RETURN (0);

	/* can't do this while holding the proc lock... */
	buf = malloc(resid, M_IOV, M_WAITOK | M_ZERO);
	sx_slock(&allproc_lock);
	pfs_lock(pd);

        /* check if the directory is visible to the caller */
        if (!pfs_visible(curthread, pd, pid, &proc)) {
		sx_sunlock(&allproc_lock);
		pfs_unlock(pd);
		free(buf, M_IOV);
                PFS_RETURN (ENOENT);
	}
	KASSERT(pid == NO_PID || proc != NULL,
	    ("%s(): no process for pid %lu", __func__, (unsigned long)pid));

	/* skip unwanted entries */
	for (pn = NULL, p = NULL; offset > 0; offset -= PFS_DELEN) {
		if (pfs_iterate(curthread, proc, pd, &pn, &p) == -1) {
			/* nothing left... */
			if (proc != NULL)
				PROC_UNLOCK(proc);
			pfs_unlock(pd);
			sx_sunlock(&allproc_lock);
			free(buf, M_IOV);
			PFS_RETURN (0);
		}
	}

	/* fill in entries */
	ent = buf;
	while (pfs_iterate(curthread, proc, pd, &pn, &p) != -1 &&
	    resid >= PFS_DELEN) {
		entry = (struct dirent *)ent;
		entry->d_reclen = PFS_DELEN;
		entry->d_fileno = pn_fileno(pn, pid);
		/* PFS_DELEN was picked to fit PFS_NAMLEN */
		for (i = 0; i < PFS_NAMELEN - 1 && pn->pn_name[i] != '\0'; ++i)
			entry->d_name[i] = pn->pn_name[i];
		entry->d_name[i] = 0;
		entry->d_namlen = i;
		switch (pn->pn_type) {
		case pfstype_procdir:
			KASSERT(p != NULL,
			    ("reached procdir node with p == NULL"));
			entry->d_namlen = snprintf(entry->d_name,
			    PFS_NAMELEN, "%d", p->p_pid);
			/* fall through */
		case pfstype_root:
		case pfstype_dir:
		case pfstype_this:
		case pfstype_parent:
			entry->d_type = DT_DIR;
			break;
		case pfstype_file:
			entry->d_type = DT_REG;
			break;
		case pfstype_symlink:
			entry->d_type = DT_LNK;
			break;
		default:
			panic("%s has unexpected node type: %d", pn->pn_name, pn->pn_type);
		}
		PFS_TRACE(("%s", entry->d_name));
		offset += PFS_DELEN;
		resid -= PFS_DELEN;
		ent += PFS_DELEN;
	}
	if (proc != NULL)
		PROC_UNLOCK(proc);
	pfs_unlock(pd);
	sx_sunlock(&allproc_lock);
	PFS_TRACE(("%zd bytes", ent - buf));
	error = uiomove(buf, ent - buf, uio);
	free(buf, M_IOV);
	PFS_RETURN (error);
}
Пример #24
0
int
mbpp_rw(dev_t dev, struct uio *uio, int flag)
{
	int card = MAGMA_CARD(dev);
	int port = MAGMA_PORT(dev);
	struct mbpp_softc *ms = device_lookup_private(&mbpp_cd, card);
	struct mbpp_port *mp = &ms->ms_port[port];
	char *buffer, *ptr;
	int buflen, cnt, len;
	int s, error = 0;
	int gotdata = 0;

	if( uio->uio_resid == 0 )
		return(0);

	buflen = min(uio->uio_resid, mp->mp_burst);
	buffer = malloc(buflen, M_DEVBUF, M_WAITOK);
	if( buffer == NULL )
		return(ENOMEM);

	SET(mp->mp_flags, MBPPF_UIO);

	/*
	 * start timeout, if needed
	 */
	if( mp->mp_timeout > 0 ) {
		SET(mp->mp_flags, MBPPF_TIMEOUT);
		callout_reset(&mp->mp_timeout_ch, mp->mp_timeout,
		    mbpp_timeout, mp);
	}

	len = cnt = 0;
	while( uio->uio_resid > 0 ) {
		len = min(buflen, uio->uio_resid);
		ptr = buffer;

		if( uio->uio_rw == UIO_WRITE ) {
			error = uiomove(ptr, len, uio);
			if( error ) break;
		}
again:		/* goto bad */
		/* timed out?  */
		if( !ISSET(mp->mp_flags, MBPPF_UIO) )
			break;

		/*
		 * perform the operation
		 */
		if( uio->uio_rw == UIO_WRITE ) {
			cnt = mbpp_send(mp, ptr, len);
		} else {
			cnt = mbpp_recv(mp, ptr, len);
		}

		if( uio->uio_rw == UIO_READ ) {
			if( cnt ) {
				error = uiomove(ptr, cnt, uio);
				if( error ) break;
				gotdata++;
			}
			else if( gotdata )	/* consider us done */
				break;
		}

		/* timed out?  */
		if( !ISSET(mp->mp_flags, MBPPF_UIO) )
			break;

		/*
		 * poll delay?
		 */
		if( mp->mp_delay > 0 ) {
			s = splsoftclock();
			SET(mp->mp_flags, MBPPF_DELAY);
			callout_reset(&mp->mp_start_ch, mp->mp_delay,
			    mbpp_start, mp);
			error = tsleep(mp, PCATCH | PZERO, "mbppdelay", 0);
			splx(s);
			if( error ) break;
		}

		/*
		 * don't call uiomove again until we used all the data we grabbed
		 */
		if( uio->uio_rw == UIO_WRITE && cnt != len ) {
			ptr += cnt;
			len -= cnt;
			cnt = 0;
			goto again;
		}
	}

	/*
	 * clear timeouts
	 */
	s = splsoftclock();
	if( ISSET(mp->mp_flags, MBPPF_TIMEOUT) ) {
		callout_stop(&mp->mp_timeout_ch);
		CLR(mp->mp_flags, MBPPF_TIMEOUT);
	}
	if( ISSET(mp->mp_flags, MBPPF_DELAY) ) {
		callout_stop(&mp->mp_start_ch);
		CLR(mp->mp_flags, MBPPF_DELAY);
	}
	splx(s);

	/*
	 * adjust for those chars that we uiomoved but never actually wrote
	 */
	if( uio->uio_rw == UIO_WRITE && cnt != len ) {
		uio->uio_resid += (len - cnt);
	}

	free(buffer, M_DEVBUF);
	return(error);
}
Пример #25
0
static int
nwfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred) {
	struct nwmount *nmp = VTONWFS(vp);
	int error, count, i;
	struct dirent dp;
	struct nwnode *np = VTONW(vp);
	struct nw_entry_info fattr;
	struct vnode *newvp;
	struct componentname cn;
	ncpfid fid;

	np = VTONW(vp);
	NCPVNDEBUG("dirname='%s'\n",np->n_name);
	if (uio->uio_resid < DE_SIZE || (uio->uio_offset < 0))
		return (EINVAL);
	error = 0;
	count = 0;
	i = uio->uio_offset / DE_SIZE; /* offset in directory */
	if (i == 0) {
		error = ncp_initsearch(vp, uio->uio_td, cred);
		if (error) {
			NCPVNDEBUG("cannot initialize search, error=%d",error);
			return( error );
		}
	}

	for (; uio->uio_resid >= DE_SIZE; i++) {
		bzero((char *) &dp, DE_SIZE);
		dp.d_reclen = DE_SIZE;
		switch (i) {
		    case 0:		/* `.' */
		    case 1:		/* `..' */
			dp.d_fileno = (i == 0) ? np->n_fid.f_id : np->n_parent.f_id;
			if (!dp.d_fileno) dp.d_fileno = NWFS_ROOT_INO;
			dp.d_namlen = i + 1;
			dp.d_name[0] = '.';
			dp.d_name[1] = '.';
			dp.d_name[i + 1] = '\0';
			dp.d_type = DT_DIR;
			break;
		    default:
			error = ncp_search_for_file_or_subdir(nmp, &np->n_seq, &fattr, uio->uio_td, cred);
			if (error && error < 0x80) break;
			dp.d_fileno = fattr.dirEntNum;
			dp.d_type = (fattr.attributes & aDIR) ? DT_DIR : DT_REG;
			dp.d_namlen = fattr.nameLen;
			bcopy(fattr.entryName, dp.d_name, dp.d_namlen);
			dp.d_name[dp.d_namlen] = '\0';
#if 0
			if (error && eofflag) {
			/*	*eofflag = 1;*/
				break;
			}
#endif
			break;
		}
		if (nwfs_fastlookup && !error && i > 1) {
			fid.f_id = fattr.dirEntNum;
			fid.f_parent = np->n_fid.f_id;
			error = nwfs_nget(vp->v_mount, fid, &fattr, vp, &newvp);
			if (!error) {
				VTONW(newvp)->n_ctime = VTONW(newvp)->n_vattr.va_ctime.tv_sec;
				cn.cn_nameptr = dp.d_name;
				cn.cn_namelen = dp.d_namlen;
				cache_enter(vp, newvp, &cn);
				vput(newvp);
			} else
				error = 0;
		}
		if (error >= 0x80) {
		    error = 0;
		    break;
		}
		if ((error = uiomove(&dp, DE_SIZE, uio)))
			break;
	}

	uio->uio_offset = i * DE_SIZE;
	return (error);
}
Пример #26
0
/*ARGSUSED*/
static int
bootfs_readdir(vnode_t *vp, struct uio *uiop, cred_t *cr, int *eofp,
    caller_context_t *ct, int flags)
{
	bootfs_node_t *bnp = (bootfs_node_t *)vp->v_data;
	dirent64_t *dp;
	void *buf;
	ulong_t bsize, brem;
	offset_t coff, roff;
	int dlen, ret;
	bootfs_node_t *dnp;
	boolean_t first = B_TRUE;

	if (uiop->uio_loffset >= MAXOFF_T) {
		if (eofp != NULL)
			*eofp = 1;
		return (0);
	}

	if (uiop->uio_iovcnt != 1)
		return (EINVAL);

	if (!(uiop->uio_iov->iov_len > 0))
		return (EINVAL);

	if (vp->v_type != VDIR)
		return (ENOTDIR);

	roff = uiop->uio_loffset;
	coff = 0;
	brem = bsize = uiop->uio_iov->iov_len;
	buf = kmem_alloc(bsize, KM_SLEEP);
	dp = buf;

	/*
	 * Recall that offsets here are done based on the name of the dirent
	 * excluding the null terminator. Therefore `.` is always at 0, `..` is
	 * always at 1, and then the first real dirent is at 3. This offset is
	 * what's actually stored when we update the offset in the structure.
	 */
	if (roff == 0) {
		dlen = DIRENT64_RECLEN(1);
		if (first == B_TRUE) {
			if (dlen > brem) {
				kmem_free(buf, bsize);
				return (EINVAL);
			}
			first = B_FALSE;
		}
		dp->d_ino = (ino64_t)bnp->bvn_attr.va_nodeid;
		dp->d_off = 0;
		dp->d_reclen = (ushort_t)dlen;
		(void) strncpy(dp->d_name, ".", DIRENT64_NAMELEN(dlen));
		dp = (struct dirent64 *)((uintptr_t)dp + dp->d_reclen);
		brem -= dlen;
	}

	if (roff <= 1) {
		dlen = DIRENT64_RECLEN(2);
		if (first == B_TRUE) {
			if (dlen > brem) {
				kmem_free(buf, bsize);
				return (EINVAL);
			}
			first = B_FALSE;
		}
		dp->d_ino = (ino64_t)bnp->bvn_parent->bvn_attr.va_nodeid;
		dp->d_off = 1;
		dp->d_reclen = (ushort_t)dlen;
		(void) strncpy(dp->d_name, "..", DIRENT64_NAMELEN(dlen));
		dp = (struct dirent64 *)((uintptr_t)dp + dp->d_reclen);
		brem -= dlen;
	}

	coff = 3;
	for (dnp = avl_first(&bnp->bvn_dir); dnp != NULL;
	    dnp = AVL_NEXT(&bnp->bvn_dir, dnp)) {
		size_t nlen = strlen(dnp->bvn_name);

		if (roff > coff) {
			coff += nlen;
			continue;
		}

		dlen = DIRENT64_RECLEN(nlen);
		if (dlen > brem) {
			if (first == B_TRUE) {
				kmem_free(buf, bsize);
				return (EINVAL);
			}
			break;
		}
		first = B_FALSE;

		dp->d_ino = (ino64_t)dnp->bvn_attr.va_nodeid;
		dp->d_off = coff;
		dp->d_reclen = (ushort_t)dlen;
		(void) strncpy(dp->d_name, dnp->bvn_name,
		    DIRENT64_NAMELEN(dlen));
		dp = (struct dirent64 *)((uintptr_t)dp + dp->d_reclen);
		brem -= dlen;
		coff += nlen;
	}

	ret = uiomove(buf, (bsize - brem), UIO_READ, uiop);

	if (ret == 0) {
		if (dnp == NULL) {
			coff++;
			if (eofp != NULL)
				*eofp = 1;
		} else if (eofp != NULL) {
			*eofp = 0;
		}
		uiop->uio_loffset = coff;
	}
	gethrestime(&bnp->bvn_attr.va_atime);
	kmem_free(buf, bsize);
	return (ret);
}
Пример #27
0
/*
 *  bpfread - read next chunk of packets from buffers
 */
int
bpfread(dev_t dev, struct uio *uio, int ioflag)
{
	struct bpf_d *d;
	int error;
	int s;

	d = bpfilter_lookup(minor(dev));
	if (d->bd_bif == 0)
		return (ENXIO);

	/*
	 * Restrict application to use a buffer the same size as
	 * as kernel buffers.
	 */
	if (uio->uio_resid != d->bd_bufsize)
		return (EINVAL);

	s = splnet();

	D_GET(d);

	/*
	 * bd_rdStart is tagged when we start the read, iff there's a timeout.
	 * we can then figure out when we're done reading.
	 */
	if (d->bd_rtout != -1 && d->bd_rdStart == 0)
		d->bd_rdStart = ticks;
	else
		d->bd_rdStart = 0;

	/*
	 * If the hold buffer is empty, then do a timed sleep, which
	 * ends when the timeout expires or when enough packets
	 * have arrived to fill the store buffer.
	 */
	while (d->bd_hbuf == 0) {
		if (d->bd_bif == NULL) {
			/* interface is gone */
			if (d->bd_slen == 0) {
				D_PUT(d);
				splx(s);
				return (EIO);
			}
			ROTATE_BUFFERS(d);
			break;
		}
		if (d->bd_immediate && d->bd_slen != 0) {
			/*
			 * A packet(s) either arrived since the previous
			 * read or arrived while we were asleep.
			 * Rotate the buffers and return what's here.
			 */
			ROTATE_BUFFERS(d);
			break;
		}
		if ((d->bd_rtout != -1) ||
		    (d->bd_rdStart + d->bd_rtout) < ticks) {
			error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf",
			    d->bd_rtout);
		} else {
			if (d->bd_rtout == -1) {
				/* User requested non-blocking I/O */
				error = EWOULDBLOCK;
			} else
				error = 0;
		}
		if (error == EINTR || error == ERESTART) {
			D_PUT(d);
			splx(s);
			return (error);
		}
		if (error == EWOULDBLOCK) {
			/*
			 * On a timeout, return what's in the buffer,
			 * which may be nothing.  If there is something
			 * in the store buffer, we can rotate the buffers.
			 */
			if (d->bd_hbuf)
				/*
				 * We filled up the buffer in between
				 * getting the timeout and arriving
				 * here, so we don't need to rotate.
				 */
				break;

			if (d->bd_slen == 0) {
				D_PUT(d);
				splx(s);
				return (0);
			}
			ROTATE_BUFFERS(d);
			break;
		}
	}
	/*
	 * At this point, we know we have something in the hold slot.
	 */
	splx(s);

	/*
	 * Move data from hold buffer into user space.
	 * We know the entire buffer is transferred since
	 * we checked above that the read buffer is bpf_bufsize bytes.
	 */
	error = uiomove(d->bd_hbuf, d->bd_hlen, uio);

	s = splnet();
	d->bd_fbuf = d->bd_hbuf;
	d->bd_hbuf = 0;
	d->bd_hlen = 0;

	D_PUT(d);
	splx(s);

	return (error);
}
Пример #28
0
/*ARGSUSED*/
static int
bootfs_read(vnode_t *vp, struct uio *uiop, int ioflag, cred_t *cr,
    caller_context_t *ct)
{
	int err;
	ssize_t sres = uiop->uio_resid;
	bootfs_node_t *bnp = vp->v_data;

	if (vp->v_type == VDIR)
		return (EISDIR);

	if (vp->v_type != VREG)
		return (EINVAL);

	if (uiop->uio_loffset < 0)
		return (EINVAL);

	if (uiop->uio_loffset >= bnp->bvn_size)
		return (0);

	err = 0;
	while (uiop->uio_resid != 0) {
		caddr_t base;
		long offset, frem;
		ulong_t poff, segoff;
		size_t bytes;
		int relerr;

		offset = uiop->uio_loffset;
		poff = offset & PAGEOFFSET;
		bytes = MIN(PAGESIZE - poff, uiop->uio_resid);

		frem = bnp->bvn_size - offset;
		if (frem <= 0) {
			err = 0;
			break;
		}

		/* Don't read past EOF */
		bytes = MIN(bytes, frem);

		/*
		 * Segmaps are likely larger than our page size, so make sure we
		 * have the proper offfset into the resulting segmap data.
		 */
		segoff = (offset & PAGEMASK) & MAXBOFFSET;

		base = segmap_getmapflt(segkmap, vp, offset & MAXBMASK, bytes,
		    1, S_READ);

		err = uiomove(base + segoff + poff, bytes, UIO_READ, uiop);
		relerr = segmap_release(segkmap, base, 0);

		if (err == 0)
			err = relerr;

		if (err != 0)
			break;
	}

	/* Even if we had an error in a partial read, return success */
	if (uiop->uio_resid > sres)
		err = 0;

	gethrestime(&bnp->bvn_attr.va_atime);

	return (err);
}
Пример #29
0
int
uhid_do_read(struct uhid_softc *sc, struct uio *uio, int flag)
{
	int s;
	int error = 0;
	size_t length;
	u_char buffer[UHID_CHUNK];
	usbd_status err;

	DPRINTFN(1, ("uhidread\n"));
	if (sc->sc_state & UHID_IMMED) {
		DPRINTFN(1, ("uhidread immed\n"));
		
		err = usbd_get_report(sc->sc_iface, UHID_INPUT_REPORT,
			  sc->sc_iid, buffer, sc->sc_isize);
		if (err)
			return (EIO);
		return (uiomove(buffer, sc->sc_isize, uio));
	}

	s = splusb();
	while (sc->sc_q.c_cc == 0) {
		if (flag & IO_NDELAY) {
			splx(s);
			return (EWOULDBLOCK);
		}
		sc->sc_state |= UHID_ASLP;
		DPRINTFN(5, ("uhidread: sleep on %p\n", &sc->sc_q));
		error = tsleep(&sc->sc_q, PZERO | PCATCH, "uhidrea", 0);
		DPRINTFN(5, ("uhidread: woke, error=%d\n", error));
		if (sc->sc_dying)
			error = EIO;
		if (error) {
			sc->sc_state &= ~UHID_ASLP;
			break;
		}
		if (sc->sc_state & UHID_NEEDCLEAR) {
			DPRINTFN(-1,("uhidread: clearing stall\n"));
			sc->sc_state &= ~UHID_NEEDCLEAR;
			usbd_clear_endpoint_stall(sc->sc_intrpipe);
		}
	}
	splx(s);

	/* Transfer as many chunks as possible. */
	while (sc->sc_q.c_cc > 0 && uio->uio_resid > 0 && !error) {
		length = min(sc->sc_q.c_cc, uio->uio_resid);
		if (length > sizeof(buffer))
			length = sizeof(buffer);

		/* Remove a small chunk from the input queue. */
		(void) q_to_b(&sc->sc_q, buffer, length);
		DPRINTFN(5, ("uhidread: got %lu chars\n", (u_long)length));

		/* Copy the data to the user process. */
		if ((error = uiomove(buffer, length, uio)) != 0)
			break;
	}

	return (error);
}
Пример #30
0
int
midiwrite(dev_t dev, struct uio *uio, int ioflag)
{
	struct midi_softc *sc;
	struct midi_buffer *mb;
	size_t count;
	int error;

	sc = (struct midi_softc *)device_lookup(&midi_cd, minor(dev));
	if (sc == NULL)
		return ENXIO;
	if (!(sc->flags & FWRITE)) {
		error = ENXIO;
		goto done;
	}
	mb = &sc->outbuf;

	/*
	 * If IO_NDELAY flag is set then check if there is enough room
	 * in the buffer to store at least one byte. If not then dont
	 * start the write process.
	 */
	error = 0;
	mtx_enter(&audio_lock);
	if ((ioflag & IO_NDELAY) && MIDIBUF_ISFULL(mb) && (uio->uio_resid > 0)) {
		mtx_leave(&audio_lock);
		error = EWOULDBLOCK;
		goto done;
	}

	while (uio->uio_resid > 0) {
		while (MIDIBUF_ISFULL(mb)) {
			if (ioflag & IO_NDELAY) {
				/*
				 * At this stage at least one byte is already
				 * moved so we do not return EWOULDBLOCK
				 */
				mtx_leave(&audio_lock);
				goto done;
			}
			sc->wchan = 1;
			error = msleep(&sc->wchan, &audio_lock,
			    PWAIT | PCATCH, "mid_wr", 0);
			if (!(sc->dev.dv_flags & DVF_ACTIVE))
				error = EIO;
			if (error) {
				mtx_leave(&audio_lock);
				goto done;
			}
		}

		count = MIDIBUF_SIZE - MIDIBUF_END(mb);
		if (count > MIDIBUF_AVAIL(mb))
			count = MIDIBUF_AVAIL(mb);
		if (count > uio->uio_resid)
			count = uio->uio_resid;
		mtx_leave(&audio_lock);
		error = uiomove(mb->data + MIDIBUF_END(mb), count, uio);
		if (error)
			goto done;
		mtx_enter(&audio_lock);
		mb->used += count;
		midi_out_start(sc);
	}
	mtx_leave(&audio_lock);
done:
	device_unref(&sc->dev);
	return error;
}