示例#1
0
static void
selectDefaultTileVecLen(
    Tile *tile,
    TileCreationFlags tflags,
    const BlasGenSettings *gset,
    BlasFunctionID funcID,
    MatrixRole mrole)
{
    if (tflags & TILE_WITH_FETCH_VECLEN) {
        tile->vecLen = getVecLen(gset, funcID, mrole);
    }
    else {
        size_t w;

        w = (tile->trans) ? tile->nrRows : tile->nrCols;
        if (tile->packed) {
            size_t wpad, height;

            wpad = roundUpPow2(w);
            height = (tile->trans) ? tile->nrCols : tile->nrRows;
            tile->vecLen = (unsigned int)szmin(height * wpad, MAX_TILE_VECLEN);
        }
        else {
            tile->vecLen = (unsigned int)roundUpPow2(w);
            tile->vecLen = (unsigned int)szmin(tile->vecLen, MAX_TILE_VECLEN);
        }
    }
}
示例#2
0
/*
 * hpfs_read(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
 *	     struct ucred *a_cred)
 */
static int
hpfs_read(struct vop_read_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct hpfsnode *hp = VTOHP(vp);
	struct uio *uio = ap->a_uio;
	struct buf *bp;
	u_int xfersz, toread;
	u_int off;
	daddr_t lbn, bn;
	int resid;
	int runl;
	int error = 0;

	resid = (int)szmin(uio->uio_resid, hp->h_fn.fn_size - uio->uio_offset);

	dprintf(("hpfs_read(0x%x, off: %d resid: %d, segflg: %d): "
		 "[resid: 0x%lx]\n",
		 hp->h_no, (u_int32_t)uio->uio_offset,
		 uio->uio_resid, uio->uio_segflg, resid));

	while (resid) {
		lbn = uio->uio_offset >> DEV_BSHIFT;
		off = uio->uio_offset & (DEV_BSIZE - 1);
		dprintf(("hpfs_read: resid: 0x%lx lbn: 0x%x off: 0x%x\n",
			uio->uio_resid, lbn, off));
		error = hpfs_hpbmap(hp, lbn, &bn, &runl);
		if (error)
			return (error);

		toread = min(off + resid, min(DFLTPHYS, (runl+1)*DEV_BSIZE));
		xfersz = (toread + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
		dprintf(("hpfs_read: bn: 0x%x (0x%x) toread: 0x%x (0x%x)\n",
			bn, runl, toread, xfersz));

		if (toread == 0) 
			break;

		error = bread(hp->h_devvp, dbtodoff(bn), xfersz, &bp);
		if (error) {
			brelse(bp);
			break;
		}

		error = uiomove(bp->b_data + off, (size_t)(toread - off), uio);
		if(error) {
			brelse(bp);
			break;
		}
		brelse(bp);
		resid -= toread;
	}
	dprintf(("hpfs_read: successful\n"));
	return (error);
}
示例#3
0
static __inline int
smb_smb_read(struct smb_share *ssp, u_int16_t fid,
	int *len, int *rresid, struct uio *uio, struct smb_cred *scred)
{
	struct smb_rq *rqp;
	struct mbchain *mbp;
	struct mdchain *mdp;
	u_int16_t resid, bc;
	u_int8_t wc;
	int error, rlen, blksz;

	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_READ, scred, &rqp);
	if (error)
		return error;

	blksz = SSTOVC(ssp)->vc_txmax - SMB_HDRLEN - 16;
	rlen = *len = min(blksz, *len);

	smb_rq_getrequest(rqp, &mbp);
	smb_rq_wstart(rqp);
	mb_put_mem(mbp, (caddr_t)&fid, sizeof(fid), MB_MSYSTEM);
	mb_put_uint16le(mbp, rlen);
	mb_put_uint32le(mbp, uio->uio_offset);
	mb_put_uint16le(mbp, (unsigned short)szmin(uio->uio_resid, 0xffff));
	smb_rq_wend(rqp);
	smb_rq_bstart(rqp);
	smb_rq_bend(rqp);
	do {
		error = smb_rq_simple(rqp);
		if (error)
			break;
		smb_rq_getreply(rqp, &mdp);
		md_get_uint8(mdp, &wc);
		if (wc != 5) {
			error = EBADRPC;
			break;
		}
		md_get_uint16le(mdp, &resid);
		md_get_mem(mdp, NULL, 4 * 2, MB_MSYSTEM);
		md_get_uint16le(mdp, &bc);
		md_get_uint8(mdp, NULL);		/* ignore buffer type */
		md_get_uint16le(mdp, &resid);
		if (resid == 0) {
			*rresid = resid;
			break;
		}
		error = md_get_uio(mdp, uio, resid);
		if (error)
			break;
		*rresid = resid;
	} while(0);
	smb_rq_done(rqp);
	return error;
}
示例#4
0
static __inline int
smb_smb_write(struct smb_share *ssp, u_int16_t fid, int *len, int *rresid,
	struct uio *uio, struct smb_cred *scred)
{
	struct smb_rq *rqp;
	struct mbchain *mbp;
	struct mdchain *mdp;
	u_int16_t resid;
	u_int8_t wc;
	int error, blksz;

	/* write data must be real */
	KKASSERT(uio->uio_segflg != UIO_NOCOPY);

	blksz = SSTOVC(ssp)->vc_txmax - SMB_HDRLEN - 16;
	if (blksz > 0xffff)
		blksz = 0xffff;

	resid = *len = min(blksz, *len);

	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_WRITE, scred, &rqp);
	if (error)
		return error;
	smb_rq_getrequest(rqp, &mbp);
	smb_rq_wstart(rqp);
	mb_put_mem(mbp, (caddr_t)&fid, sizeof(fid), MB_MSYSTEM);
	mb_put_uint16le(mbp, resid);
	mb_put_uint32le(mbp, uio->uio_offset);
	mb_put_uint16le(mbp, (unsigned short)szmin(uio->uio_resid, 0xffff));
	smb_rq_wend(rqp);
	smb_rq_bstart(rqp);
	mb_put_uint8(mbp, SMB_DT_DATA);
	mb_put_uint16le(mbp, resid);
	do {
		error = mb_put_uio(mbp, uio, resid);
		if (error)
			break;
		smb_rq_bend(rqp);
		error = smb_rq_simple(rqp);
		if (error)
			break;
		smb_rq_getreply(rqp, &mdp);
		md_get_uint8(mdp, &wc);
		if (wc != 1) {
			error = EBADRPC;
			break;
		}
		md_get_uint16le(mdp, &resid);
		*rresid = resid;
	} while(0);
	smb_rq_done(rqp);
	return error;
}
示例#5
0
static int
snpwrite(struct dev_write_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct uio *uio = ap->a_uio;
	struct snoop *snp;
	struct tty *tp;
	int error, i, len;
	unsigned char c[SNP_INPUT_BUF];

	lwkt_gettoken(&tty_token);
	snp = dev->si_drv1;
	tp = snp->snp_tty;
	if (tp == NULL) {
		lwkt_reltoken(&tty_token);
		return (EIO);
	}
	if ((tp->t_sc == snp) && (tp->t_state & TS_SNOOP) &&
	    tp->t_line == snooplinedisc)
		goto tty_input;

	kprintf("Snoop: attempt to write to bad tty.\n");
	lwkt_reltoken(&tty_token);
	return (EIO);

tty_input:
	if (!(tp->t_state & TS_ISOPEN)) {
		lwkt_reltoken(&tty_token);
		return (EIO);
	}

	while (uio->uio_resid > 0) {
		len = (int)szmin(uio->uio_resid, SNP_INPUT_BUF);
		if ((error = uiomove(c, (size_t)len, uio)) != 0) {
			lwkt_reltoken(&tty_token);
			return (error);
		}
		for (i=0; i < len; i++) {
			if (ttyinput(c[i], tp)) {
				lwkt_reltoken(&tty_token);
				return (EIO);
			}
		}
	}
	lwkt_reltoken(&tty_token);
	return (0);
}
示例#6
0
static int
snplwrite(struct tty *tp, struct uio *uio, int flag)
{
	struct iovec iov;
	struct uio uio2;
	struct snoop *snp;
	int error, ilen;
	char *ibuf;

	lwkt_gettoken(&tty_token);
	error = 0;
	ibuf = NULL;
	snp = tp->t_sc;
	while (uio->uio_resid > 0) {
		ilen = (int)szmin(512, uio->uio_resid);
		ibuf = kmalloc(ilen, M_SNP, M_WAITOK);
		error = uiomove(ibuf, (size_t)ilen, uio);
		if (error != 0)
			break;
		snp_in(snp, ibuf, ilen);
		/* Hackish, but probably the least of all evils. */
		iov.iov_base = ibuf;
		iov.iov_len = ilen;
		uio2.uio_iov = &iov;
		uio2.uio_iovcnt = 1;
		uio2.uio_offset = 0;
		uio2.uio_resid = ilen;
		uio2.uio_segflg = UIO_SYSSPACE;
		uio2.uio_rw = UIO_WRITE;
		uio2.uio_td = uio->uio_td;
		error = ttwrite(tp, &uio2, flag);
		if (error != 0)
			break;
		kfree(ibuf, M_SNP);
		ibuf = NULL;
	}
	if (ibuf != NULL)
		kfree(ibuf, M_SNP);
	lwkt_reltoken(&tty_token);
	return (error);
}
示例#7
0
/*ARGSUSED*/
static	int
logread(struct dev_read_args *ap)
{
	struct uio *uio = ap->a_uio;
	struct msgbuf *mbp = msgbufp;
	long l;
	int error = 0;

	crit_enter();
	while (mbp->msg_bufr == mbp->msg_bufx) {
		if (ap->a_ioflag & IO_NDELAY) {
			crit_exit();
			return (EWOULDBLOCK);
		}
		logsoftc.sc_state |= LOG_RDWAIT;
		if ((error = tsleep((caddr_t)mbp, PCATCH, "klog", 0))) {
			crit_exit();
			return (error);
		}
	}
	crit_exit();
	logsoftc.sc_state &= ~LOG_RDWAIT;

	while (uio->uio_resid > 0) {
		l = (long)mbp->msg_bufx - (long)mbp->msg_bufr;
		if (l < 0)
			l = mbp->msg_size - mbp->msg_bufr;
		l = (long)szmin(l, uio->uio_resid);
		if (l == 0)
			break;
		error = uiomove((caddr_t)msgbufp->msg_ptr + mbp->msg_bufr,
				(size_t)l, uio);
		if (error)
			break;
		mbp->msg_bufr += l;
		if (mbp->msg_bufr >= mbp->msg_size)
			mbp->msg_bufr = 0;
	}
	return (error);
}
示例#8
0
int
puffs_biowrite(struct vnode *vp, struct uio *uio, int ioflag,
    struct ucred *cred)
{
	int biosize = vp->v_mount->mnt_stat.f_iosize;
	struct buf *bp;
	struct vattr vattr;
	off_t loffset, fsize;
	int boff, bytes;
	int error = 0;
	int bcount;
	int trivial;

	KKASSERT(uio->uio_rw == UIO_WRITE);
	KKASSERT(vp->v_type == VREG);

	if (uio->uio_offset < 0)
		return EINVAL;
	if (uio->uio_resid == 0)
		return 0;

	/*
	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
	 * get the append lock.
	 *
	 * We need to obtain exclusize lock if we intend to modify file size
	 * in order to guarentee the append point with multiple contending
	 * writers.
	 */
	if (ioflag & IO_APPEND) {
		/* XXXDF relock if necessary */
		KKASSERT(vn_islocked(vp) == LK_EXCLUSIVE);
		error = VOP_GETATTR(vp, &vattr);
		if (error)
			return error;
		uio->uio_offset = puffs_meta_getsize(vp);
	}

	do {
		boff = uio->uio_offset & (biosize-1);
		loffset = uio->uio_offset - boff;
		bytes = (int)szmin((unsigned)(biosize - boff), uio->uio_resid);
again:
		/*
		 * Handle direct append and file extension cases, calculate
		 * unaligned buffer size.  When extending B_CACHE will be
		 * set if possible.  See UIO_NOCOPY note below.
		 */
		fsize = puffs_meta_getsize(vp);
		if (uio->uio_offset + bytes > fsize) {
			trivial = (uio->uio_segflg != UIO_NOCOPY &&
			    uio->uio_offset <= fsize);
			puffs_meta_setsize(vp, uio->uio_offset + bytes,
			    trivial);
		}
		bp = getblk(vp, loffset, biosize, 0, 0);
		if (bp == NULL) {
			error = EINTR;
			break;
		}

		/*
		 * Actual bytes in buffer which we care about
		 */
		if (loffset + biosize < fsize)
			bcount = biosize;
		else
			bcount = (int)(fsize - loffset);

		/*
		 * Avoid a read by setting B_CACHE where the data we
		 * intend to write covers the entire buffer.  Note
		 * that the buffer may have been set to B_CACHE by
		 * puffs_meta_setsize() above or otherwise inherited the
		 * flag, but if B_CACHE isn't set the buffer may be
		 * uninitialized and must be zero'd to accomodate
		 * future seek+write's.
		 *
		 * See the comments in kern/vfs_bio.c's getblk() for
		 * more information.
		 *
		 * When doing a UIO_NOCOPY write the buffer is not
		 * overwritten and we cannot just set B_CACHE unconditionally
		 * for full-block writes.
		 */
		if (boff == 0 && bytes == biosize &&
		    uio->uio_segflg != UIO_NOCOPY) {
			bp->b_flags |= B_CACHE;
			bp->b_flags &= ~(B_ERROR | B_INVAL);
		}

		/*
		 * b_resid may be set due to file EOF if we extended out.
		 * The NFS bio code will zero the difference anyway so
		 * just acknowledged the fact and set b_resid to 0.
		 */
		if ((bp->b_flags & B_CACHE) == 0) {
			bp->b_cmd = BUF_CMD_READ;
			bp->b_bio2.bio_done = puffs_iodone;
			bp->b_bio2.bio_flags |= BIO_SYNC;
			vfs_busy_pages(vp, bp);
			error = puffs_doio(vp, &bp->b_bio2, uio->uio_td);
			if (error) {
				brelse(bp);
				break;
			}
			bp->b_resid = 0;
		}

		/*
		 * If dirtyend exceeds file size, chop it down.  This should
		 * not normally occur but there is an append race where it
		 * might occur XXX, so we log it.
		 *
		 * If the chopping creates a reverse-indexed or degenerate
		 * situation with dirtyoff/end, we 0 both of them.
		 */
		if (bp->b_dirtyend > bcount) {
			kprintf("PUFFS append race @%08llx:%d\n",
			    (long long)bp->b_bio2.bio_offset,
			    bp->b_dirtyend - bcount);
			bp->b_dirtyend = bcount;
		}

		if (bp->b_dirtyoff >= bp->b_dirtyend)
			bp->b_dirtyoff = bp->b_dirtyend = 0;

		/*
		 * If the new write will leave a contiguous dirty
		 * area, just update the b_dirtyoff and b_dirtyend,
		 * otherwise force a write rpc of the old dirty area.
		 *
		 * While it is possible to merge discontiguous writes due to
		 * our having a B_CACHE buffer ( and thus valid read data
		 * for the hole), we don't because it could lead to
		 * significant cache coherency problems with multiple clients,
		 * especially if locking is implemented later on.
		 *
		 * as an optimization we could theoretically maintain
		 * a linked list of discontinuous areas, but we would still
		 * have to commit them separately so there isn't much
		 * advantage to it except perhaps a bit of asynchronization.
		 */
		if (bp->b_dirtyend > 0 &&
		    (boff > bp->b_dirtyend ||
		    (boff + bytes) < bp->b_dirtyoff)
		   ) {
			if (bwrite(bp) == EINTR) {
				error = EINTR;
				break;
			}
			goto again;
		}

		error = uiomove(bp->b_data + boff, bytes, uio);

		/*
		 * Since this block is being modified, it must be written
		 * again and not just committed.  Since write clustering does
		 * not work for the stage 1 data write, only the stage 2
		 * commit rpc, we have to clear B_CLUSTEROK as well.
		 */
		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);

		if (error) {
			brelse(bp);
			break;
		}

		/*
		 * Only update dirtyoff/dirtyend if not a degenerate
		 * condition.
		 *
		 * The underlying VM pages have been marked valid by
		 * virtue of acquiring the bp.  Because the entire buffer
		 * is marked dirty we do not have to worry about cleaning
		 * out the related dirty bits (and wouldn't really know
		 * how to deal with byte ranges anyway)
		 */
		if (bytes) {
			if (bp->b_dirtyend > 0) {
				bp->b_dirtyoff = imin(boff, bp->b_dirtyoff);
				bp->b_dirtyend = imax(boff + bytes,
				    bp->b_dirtyend);
			} else {
				bp->b_dirtyoff = boff;
				bp->b_dirtyend = boff + bytes;
			}
		}

		if (ioflag & IO_SYNC) {
			if (ioflag & IO_INVAL)
				bp->b_flags |= B_NOCACHE;
			error = bwrite(bp);
			if (error)
				break;
		} else {
			bdwrite(bp);
		}
	} while (uio->uio_resid > 0 && bytes > 0);

	return error;
}
示例#9
0
int
uriowrite(struct dev_write_args *ap)
{
    cdev_t dev = ap->a_head.a_dev;
    struct uio *uio = ap->a_uio;
#if (USBDI >= 1)
    struct urio_softc * sc;
    usbd_xfer_handle reqh;
#else
    usbd_request_handle reqh;
#endif
    int unit = URIOUNIT(dev);
    usbd_status r;
    char buf[URIO_BBSIZE];
    u_int32_t n;
    int error = 0;

    sc = devclass_get_softc(urio_devclass, unit);

    DPRINTFN(5, ("uriowrite: %d\n", unit));
    if (!sc->sc_opened)
        return EIO;

#if (USBDI >= 1)
    sc->sc_refcnt++;
    reqh = usbd_alloc_xfer(sc->sc_udev);
#else
    reqh = usbd_alloc_request();
#endif
    if (reqh == 0)
        return EIO;
    while ((n = szmin(URIO_BBSIZE, uio->uio_resid)) != 0) {
        error = uiomove(buf, n, uio);
        if (error)
            break;
        DPRINTFN(1, ("uriowrite: transfer %d bytes\n", n));
#if (USBDI >= 1)
        usbd_setup_xfer(reqh, sc->sc_pipeh_out, 0, buf, n,
                        0, RIO_RW_TIMEOUT, 0);
#else
        r = usbd_setup_request(reqh, sc->sc_pipeh_out, 0, buf, n,
                               0, RIO_RW_TIMEOUT, 0);
        if (r != USBD_NORMAL_COMPLETION) {
            error = EIO;
            break;
        }
#endif
        r = usbd_sync_transfer(reqh);
        if (r != USBD_NORMAL_COMPLETION) {
            DPRINTFN(1, ("uriowrite: error=%d\n", r));
            usbd_clear_endpoint_stall(sc->sc_pipeh_out);
            error = EIO;
            break;
        }
#if (USBDI >= 1)
        usbd_get_xfer_status(reqh, 0, 0, 0, 0);
#endif
    }

#if (USBDI >= 1)
    usbd_free_xfer(reqh);
#else
    usbd_free_request(reqh);
#endif

    return error;
}
示例#10
0
/*
 * p->p_token is held on entry.
 */
static int
procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio)
{
	int error;
	int writing;
	struct vmspace *vm;
	vm_map_t map;
	vm_offset_t pageno = 0;		/* page number */
	vm_prot_t reqprot;
	vm_offset_t kva;

	/*
	 * if the vmspace is in the midst of being allocated or deallocated,
	 * or the process is exiting, don't try to grab anything.  The
	 * page table usage in that process may be messed up.
	 */
	vm = p->p_vmspace;
	if (p->p_stat == SIDL || p->p_stat == SZOMB)
		return EFAULT;
	if ((p->p_flags & (P_WEXIT | P_INEXEC)) ||
	    sysref_isinactive(&vm->vm_sysref))
		return EFAULT;

	/*
	 * The map we want...
	 */
	vmspace_hold(vm);
	map = &vm->vm_map;

	writing = (uio->uio_rw == UIO_WRITE);
	reqprot = VM_PROT_READ;
	if (writing)
		reqprot |= VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE;

	kva = kmem_alloc_pageable(&kernel_map, PAGE_SIZE);

	/*
	 * Only map in one page at a time.  We don't have to, but it
	 * makes things easier.  This way is trivial - right?
	 */
	do {
		vm_offset_t uva;
		vm_offset_t page_offset;	/* offset into page */
		size_t len;
		vm_page_t m;

		uva = (vm_offset_t) uio->uio_offset;

		/*
		 * Get the page number of this segment.
		 */
		pageno = trunc_page(uva);
		page_offset = uva - pageno;

		/*
		 * How many bytes to copy
		 */
		len = szmin(PAGE_SIZE - page_offset, uio->uio_resid);

		/*
		 * Fault the page on behalf of the process
		 */
		m = vm_fault_page(map, pageno, reqprot,
				  VM_FAULT_NORMAL, &error);
		if (error) {
			KKASSERT(m == NULL);
			error = EFAULT;
			break;
		}

		/*
		 * Cleanup tmap then create a temporary KVA mapping and
		 * do the I/O.  We can switch between cpus so don't bother
		 * synchronizing across all cores.
		 */
		pmap_kenter_quick(kva, VM_PAGE_TO_PHYS(m));
		error = uiomove((caddr_t)(kva + page_offset), len, uio);
		pmap_kremove_quick(kva);

		/*
		 * release the page and we are done
		 */
		vm_page_unhold(m);
	} while (error == 0 && uio->uio_resid > 0);

	vmspace_drop(vm);
	kmem_free(&kernel_map, kva, PAGE_SIZE);

	return (error);
}
示例#11
0
static int
snpread(struct dev_read_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct uio *uio = ap->a_uio;
	struct snoop *snp;
	int error, len, n, nblen;
	caddr_t from;
	char *nbuf;

	lwkt_gettoken(&tty_token);
	snp = dev->si_drv1;
	KASSERT(snp->snp_len + snp->snp_base <= snp->snp_blen,
	    ("snoop buffer error"));

	if (snp->snp_tty == NULL) {
		lwkt_reltoken(&tty_token);
		return (EIO);
	}

	snp->snp_flags &= ~SNOOP_RWAIT;

	do {
		if (snp->snp_len == 0) {
			if (ap->a_ioflag & IO_NDELAY) {
				lwkt_reltoken(&tty_token);
				return (EWOULDBLOCK);
			}
			snp->snp_flags |= SNOOP_RWAIT;
			error = tsleep((caddr_t)snp, PCATCH, "snprd", 0);
			if (error != 0) {
				lwkt_reltoken(&tty_token);
				return (error);
			}
		}
	} while (snp->snp_len == 0);

	n = snp->snp_len;

	error = 0;
	while (snp->snp_len > 0 && uio->uio_resid > 0 && error == 0) {
		len = (int)szmin(uio->uio_resid, snp->snp_len);
		from = (caddr_t)(snp->snp_buf + snp->snp_base);
		if (len == 0)
			break;

		error = uiomove(from, (size_t)len, uio);
		snp->snp_base += len;
		snp->snp_len -= len;
	}
	if ((snp->snp_flags & SNOOP_OFLOW) && (n < snp->snp_len)) {
		snp->snp_flags &= ~SNOOP_OFLOW;
	}
	crit_enter();
	nblen = snp->snp_blen;
	if (((nblen / 2) >= SNOOP_MINLEN) && (nblen / 2) >= snp->snp_len) {
		while (nblen / 2 >= snp->snp_len && nblen / 2 >= SNOOP_MINLEN)
			nblen = nblen / 2;
		if ((nbuf = kmalloc(nblen, M_SNP, M_NOWAIT)) != NULL) {
			bcopy(snp->snp_buf + snp->snp_base, nbuf, snp->snp_len);
			kfree(snp->snp_buf, M_SNP);
			snp->snp_buf = nbuf;
			snp->snp_blen = nblen;
			snp->snp_base = 0;
		}
	}
	crit_exit();

	lwkt_reltoken(&tty_token);
	return (error);
}
示例#12
0
/*
 * I/O ops
 */
static	int
ptcwrite(struct dev_write_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct tty *tp = dev->si_tty;
	u_char *cp = NULL;
	int cc = 0;
	u_char locbuf[BUFSIZ];
	int cnt = 0;
	struct pt_ioctl *pti = dev->si_drv1;
	int error = 0;

	lwkt_gettoken(&tty_token);
again:
	if ((tp->t_state&TS_ISOPEN) == 0)
		goto block;
	if (pti->pt_flags & PF_REMOTE) {
		if (tp->t_canq.c_cc)
			goto block;
		while ((ap->a_uio->uio_resid > 0 || cc > 0) &&
		       tp->t_canq.c_cc < TTYHOG - 1) {
			if (cc == 0) {
				cc = (int)szmin(ap->a_uio->uio_resid, BUFSIZ);
				cc = imin(cc, TTYHOG - 1 - tp->t_canq.c_cc);
				cp = locbuf;
				error = uiomove(cp, (size_t)cc, ap->a_uio);
				if (error) {
					lwkt_reltoken(&tty_token);
					return (error);
				}
				/* check again for safety */
				if ((tp->t_state & TS_ISOPEN) == 0) {
					/* adjust as usual */
					ap->a_uio->uio_resid += cc;
					lwkt_reltoken(&tty_token);
					return (EIO);
				}
			}
			if (cc > 0) {
				cc = b_to_q((char *)cp, cc, &tp->t_canq);
				/*
				 * XXX we don't guarantee that the canq size
				 * is >= TTYHOG, so the above b_to_q() may
				 * leave some bytes uncopied.  However, space
				 * is guaranteed for the null terminator if
				 * we don't fail here since (TTYHOG - 1) is
				 * not a multiple of CBSIZE.
				 */
				if (cc > 0)
					break;
			}
		}
		/* adjust for data copied in but not written */
		ap->a_uio->uio_resid += cc;
		clist_putc(0, &tp->t_canq);
		ttwakeup(tp);
		wakeup(TSA_PTS_READ(tp));
		lwkt_reltoken(&tty_token);
		return (0);
	}
	while (ap->a_uio->uio_resid > 0 || cc > 0) {
		if (cc == 0) {
			cc = (int)szmin(ap->a_uio->uio_resid, BUFSIZ);
			cp = locbuf;
			error = uiomove(cp, (size_t)cc, ap->a_uio);
			if (error) {
				lwkt_reltoken(&tty_token);
				return (error);
			}
			/* check again for safety */
			if ((tp->t_state & TS_ISOPEN) == 0) {
				/* adjust for data copied in but not written */
				ap->a_uio->uio_resid += cc;
				lwkt_reltoken(&tty_token);
				return (EIO);
			}
		}
		while (cc > 0) {
			if ((tp->t_rawq.c_cc + tp->t_canq.c_cc) >= TTYHOG - 2 &&
			   (tp->t_canq.c_cc > 0 || !(tp->t_lflag&ICANON))) {
				wakeup(TSA_HUP_OR_INPUT(tp));
				goto block;
			}
			(*linesw[tp->t_line].l_rint)(*cp++, tp);
			cnt++;
			cc--;
		}
		cc = 0;
	}
	lwkt_reltoken(&tty_token);
	return (0);
block:
	/*
	 * Come here to wait for slave to open, for space
	 * in outq, or space in rawq, or an empty canq.
	 */
	if ((tp->t_state & TS_CONNECTED) == 0) {
		/* adjust for data copied in but not written */
		ap->a_uio->uio_resid += cc;
		lwkt_reltoken(&tty_token);
		return (EIO);
	}
	if (ap->a_ioflag & IO_NDELAY) {
		/* adjust for data copied in but not written */
		ap->a_uio->uio_resid += cc;
		if (cnt == 0) {
			lwkt_reltoken(&tty_token);
			return (EWOULDBLOCK);
		}
		lwkt_reltoken(&tty_token);
		return (0);
	}
	error = tsleep(TSA_PTC_WRITE(tp), PCATCH, "ptcout", 0);
	if (error) {
		/* adjust for data copied in but not written */
		ap->a_uio->uio_resid += cc;
		lwkt_reltoken(&tty_token);
		return (error);
	}
	goto again;
}
示例#13
0
static	int
ptcread(struct dev_read_args *ap)
{
	cdev_t dev = ap->a_head.a_dev;
	struct tty *tp = dev->si_tty;
	struct pt_ioctl *pti = dev->si_drv1;
	char buf[BUFSIZ];
	int error = 0, cc;

	lwkt_gettoken(&tty_token);
	/*
	 * We want to block until the slave
	 * is open, and there's something to read;
	 * but if we lost the slave or we're NBIO,
	 * then return the appropriate error instead.
	 */
	for (;;) {
		if (tp->t_state&TS_ISOPEN) {
			if ((pti->pt_flags & PF_PKT) && pti->pt_send) {
				error = ureadc((int)pti->pt_send, ap->a_uio);
				if (error) {
					lwkt_reltoken(&tty_token);
					return (error);
				}
				if (pti->pt_send & TIOCPKT_IOCTL) {
					cc = (int)szmin(ap->a_uio->uio_resid,
							sizeof(tp->t_termios));
					uiomove((caddr_t)&tp->t_termios, cc,
						ap->a_uio);
				}
				pti->pt_send = 0;
				lwkt_reltoken(&tty_token);
				return (0);
			}
			if ((pti->pt_flags & PF_UCNTL) && pti->pt_ucntl) {
				error = ureadc((int)pti->pt_ucntl, ap->a_uio);
				if (error) {
					lwkt_reltoken(&tty_token);
					return (error);
				}
				pti->pt_ucntl = 0;
				lwkt_reltoken(&tty_token);
				return (0);
			}
			if (tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0)
				break;
		}
		if ((tp->t_state & TS_CONNECTED) == 0) {
			lwkt_reltoken(&tty_token);
			return (0);	/* EOF */
		}
		if (ap->a_ioflag & IO_NDELAY) {
			lwkt_reltoken(&tty_token);
			return (EWOULDBLOCK);
		}
		error = tsleep(TSA_PTC_READ(tp), PCATCH, "ptcin", 0);
		if (error) {
			lwkt_reltoken(&tty_token);
			return (error);
		}
	}
	if (pti->pt_flags & (PF_PKT|PF_UCNTL))
		error = ureadc(0, ap->a_uio);
	while (ap->a_uio->uio_resid > 0 && error == 0) {
		cc = q_to_b(&tp->t_outq, buf,
			    (int)szmin(ap->a_uio->uio_resid, BUFSIZ));
		if (cc <= 0)
			break;
		error = uiomove(buf, (size_t)cc, ap->a_uio);
	}
	ttwwakeup(tp);
	lwkt_reltoken(&tty_token);
	return (error);
}
示例#14
0
/*
 * Implement receive operations on a socket.
 *
 * We depend on the way that records are added to the signalsockbuf
 * by sbappend*.  In particular, each record (mbufs linked through m_next)
 * must begin with an address if the protocol so specifies,
 * followed by an optional mbuf or mbufs containing ancillary data,
 * and then zero or more mbufs of data.
 *
 * Although the signalsockbuf is locked, new data may still be appended.
 * A token inside the ssb_lock deals with MP issues and still allows
 * the network to access the socket if we block in a uio.
 *
 * The caller may receive the data as a single mbuf chain by supplying
 * an mbuf **mp0 for use in returning the chain.  The uio is then used
 * only for the count in uio_resid.
 */
int
soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
	  struct sockbuf *sio, struct mbuf **controlp, int *flagsp)
{
	struct mbuf *m, *n;
	struct mbuf *free_chain = NULL;
	int flags, len, error, offset;
	struct protosw *pr = so->so_proto;
	int moff, type = 0;
	size_t resid, orig_resid;

	if (uio)
		resid = uio->uio_resid;
	else
		resid = (size_t)(sio->sb_climit - sio->sb_cc);
	orig_resid = resid;

	if (psa)
		*psa = NULL;
	if (controlp)
		*controlp = NULL;
	if (flagsp)
		flags = *flagsp &~ MSG_EOR;
	else
		flags = 0;
	if (flags & MSG_OOB) {
		m = m_get(MB_WAIT, MT_DATA);
		if (m == NULL)
			return (ENOBUFS);
		error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
		if (error)
			goto bad;
		if (sio) {
			do {
				sbappend(sio, m);
				KKASSERT(resid >= (size_t)m->m_len);
				resid -= (size_t)m->m_len;
			} while (resid > 0 && m);
		} else {
			do {
				uio->uio_resid = resid;
				error = uiomove(mtod(m, caddr_t),
						(int)szmin(resid, m->m_len),
						uio);
				resid = uio->uio_resid;
				m = m_free(m);
			} while (uio->uio_resid && error == 0 && m);
		}
bad:
		if (m)
			m_freem(m);
		return (error);
	}
	if ((so->so_state & SS_ISCONFIRMING) && resid)
		so_pru_rcvd(so, 0);

	/*
	 * The token interlocks against the protocol thread while
	 * ssb_lock is a blocking lock against other userland entities.
	 */
	lwkt_gettoken(&so->so_rcv.ssb_token);
restart:
	error = ssb_lock(&so->so_rcv, SBLOCKWAIT(flags));
	if (error)
		goto done;

	m = so->so_rcv.ssb_mb;
	/*
	 * If we have less data than requested, block awaiting more
	 * (subject to any timeout) if:
	 *   1. the current count is less than the low water mark, or
	 *   2. MSG_WAITALL is set, and it is possible to do the entire
	 *	receive operation at once if we block (resid <= hiwat).
	 *   3. MSG_DONTWAIT is not set
	 * If MSG_WAITALL is set but resid is larger than the receive buffer,
	 * we have to do the receive in sections, and thus risk returning
	 * a short count if a timeout or signal occurs after we start.
	 */
	if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
	    (size_t)so->so_rcv.ssb_cc < resid) &&
	    (so->so_rcv.ssb_cc < so->so_rcv.ssb_lowat ||
	    ((flags & MSG_WAITALL) && resid <= (size_t)so->so_rcv.ssb_hiwat)) &&
	    m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
		KASSERT(m != NULL || !so->so_rcv.ssb_cc, ("receive 1"));
		if (so->so_error) {
			if (m)
				goto dontblock;
			error = so->so_error;
			if ((flags & MSG_PEEK) == 0)
				so->so_error = 0;
			goto release;
		}
		if (so->so_state & SS_CANTRCVMORE) {
			if (m)
				goto dontblock;
			else
				goto release;
		}
		for (; m; m = m->m_next) {
			if (m->m_type == MT_OOBDATA  || (m->m_flags & M_EOR)) {
				m = so->so_rcv.ssb_mb;
				goto dontblock;
			}
		}
		if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
		    (pr->pr_flags & PR_CONNREQUIRED)) {
			error = ENOTCONN;
			goto release;
		}
		if (resid == 0)
			goto release;
		if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
			error = EWOULDBLOCK;
			goto release;
		}
		ssb_unlock(&so->so_rcv);
		error = ssb_wait(&so->so_rcv);
		if (error)
			goto done;
		goto restart;
	}
dontblock:
	if (uio && uio->uio_td && uio->uio_td->td_proc)
		uio->uio_td->td_lwp->lwp_ru.ru_msgrcv++;

	/*
	 * note: m should be == sb_mb here.  Cache the next record while
	 * cleaning up.  Note that calling m_free*() will break out critical
	 * section.
	 */
	KKASSERT(m == so->so_rcv.ssb_mb);

	/*
	 * Skip any address mbufs prepending the record.
	 */
	if (pr->pr_flags & PR_ADDR) {
		KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
		orig_resid = 0;
		if (psa)
			*psa = dup_sockaddr(mtod(m, struct sockaddr *));
		if (flags & MSG_PEEK)
			m = m->m_next;
		else
			m = sbunlinkmbuf(&so->so_rcv.sb, m, &free_chain);
	}
示例#15
0
/*
 * Send on a socket.
 * If send must go all at once and message is larger than
 * send buffering, then hard error.
 * Lock against other senders.
 * If must go all at once and not enough room now, then
 * inform user that this would block and do nothing.
 * Otherwise, if nonblocking, send as much as possible.
 * The data to be sent is described by "uio" if nonzero,
 * otherwise by the mbuf chain "top" (which must be null
 * if uio is not).  Data provided in mbuf chain must be small
 * enough to send all at once.
 *
 * Returns nonzero on error, timeout or signal; callers
 * must check for short counts if EINTR/ERESTART are returned.
 * Data and control buffers are freed on return.
 */
int
sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
	struct mbuf *top, struct mbuf *control, int flags,
	struct thread *td)
{
	struct mbuf **mp;
	struct mbuf *m;
	size_t resid;
	int space, len;
	int clen = 0, error, dontroute, mlen;
	int atomic = sosendallatonce(so) || top;
	int pru_flags;

	if (uio) {
		resid = uio->uio_resid;
	} else {
		resid = (size_t)top->m_pkthdr.len;
#ifdef INVARIANTS
		len = 0;
		for (m = top; m; m = m->m_next)
			len += m->m_len;
		KKASSERT(top->m_pkthdr.len == len);
#endif
	}

	/*
	 * WARNING!  resid is unsigned, space and len are signed.  space
	 * 	     can wind up negative if the sockbuf is overcommitted.
	 *
	 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
	 * type sockets since that's an error.
	 */
	if (so->so_type == SOCK_STREAM && (flags & MSG_EOR)) {
		error = EINVAL;
		goto out;
	}

	dontroute =
	    (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
	    (so->so_proto->pr_flags & PR_ATOMIC);
	if (td->td_lwp != NULL)
		td->td_lwp->lwp_ru.ru_msgsnd++;
	if (control)
		clen = control->m_len;
#define	gotoerr(errcode)	{ error = errcode; goto release; }

restart:
	error = ssb_lock(&so->so_snd, SBLOCKWAIT(flags));
	if (error)
		goto out;

	do {
		if (so->so_state & SS_CANTSENDMORE)
			gotoerr(EPIPE);
		if (so->so_error) {
			error = so->so_error;
			so->so_error = 0;
			goto release;
		}
		if ((so->so_state & SS_ISCONNECTED) == 0) {
			/*
			 * `sendto' and `sendmsg' is allowed on a connection-
			 * based socket if it supports implied connect.
			 * Return ENOTCONN if not connected and no address is
			 * supplied.
			 */
			if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
			    (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
				if ((so->so_state & SS_ISCONFIRMING) == 0 &&
				    !(resid == 0 && clen != 0))
					gotoerr(ENOTCONN);
			} else if (addr == 0)
			    gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
				   ENOTCONN : EDESTADDRREQ);
		}
		if ((atomic && resid > so->so_snd.ssb_hiwat) ||
		    clen > so->so_snd.ssb_hiwat) {
			gotoerr(EMSGSIZE);
		}
		space = ssb_space(&so->so_snd);
		if (flags & MSG_OOB)
			space += 1024;
		if ((space < 0 || (size_t)space < resid + clen) && uio &&
		    (atomic || space < so->so_snd.ssb_lowat || space < clen)) {
			if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
				gotoerr(EWOULDBLOCK);
			ssb_unlock(&so->so_snd);
			error = ssb_wait(&so->so_snd);
			if (error)
				goto out;
			goto restart;
		}
		mp = &top;
		space -= clen;
		do {
		    if (uio == NULL) {
			/*
			 * Data is prepackaged in "top".
			 */
			resid = 0;
			if (flags & MSG_EOR)
				top->m_flags |= M_EOR;
		    } else do {
			if (resid > INT_MAX)
				resid = INT_MAX;
			m = m_getl((int)resid, MB_WAIT, MT_DATA,
				   top == NULL ? M_PKTHDR : 0, &mlen);
			if (top == NULL) {
				m->m_pkthdr.len = 0;
				m->m_pkthdr.rcvif = NULL;
			}
			len = imin((int)szmin(mlen, resid), space);
			if (resid < MINCLSIZE) {
				/*
				 * For datagram protocols, leave room
				 * for protocol headers in first mbuf.
				 */
				if (atomic && top == 0 && len < mlen)
					MH_ALIGN(m, len);
			}
			space -= len;
			error = uiomove(mtod(m, caddr_t), (size_t)len, uio);
			resid = uio->uio_resid;
			m->m_len = len;
			*mp = m;
			top->m_pkthdr.len += len;
			if (error)
				goto release;
			mp = &m->m_next;
			if (resid == 0) {
				if (flags & MSG_EOR)
					top->m_flags |= M_EOR;
				break;
			}
		    } while (space > 0 && atomic);
		    if (dontroute)
			    so->so_options |= SO_DONTROUTE;
		    if (flags & MSG_OOB) {
		    	    pru_flags = PRUS_OOB;
		    } else if ((flags & MSG_EOF) &&
		    	       (so->so_proto->pr_flags & PR_IMPLOPCL) &&
			       (resid == 0)) {
			    /*
			     * If the user set MSG_EOF, the protocol
			     * understands this flag and nothing left to
			     * send then use PRU_SEND_EOF instead of PRU_SEND.
			     */
		    	    pru_flags = PRUS_EOF;
		    } else if (resid > 0 && space > 0) {
			    /* If there is more to send, set PRUS_MORETOCOME */
		    	    pru_flags = PRUS_MORETOCOME;
		    } else {
		    	    pru_flags = 0;
		    }
		    /*
		     * XXX all the SS_CANTSENDMORE checks previously
		     * done could be out of date.  We could have recieved
		     * a reset packet in an interrupt or maybe we slept
		     * while doing page faults in uiomove() etc. We could
		     * probably recheck again inside the splnet() protection
		     * here, but there are probably other places that this
		     * also happens.  We must rethink this.
		     */
		    error = so_pru_send(so, pru_flags, top, addr, control, td);
		    if (dontroute)
			    so->so_options &= ~SO_DONTROUTE;
		    clen = 0;
		    control = 0;
		    top = NULL;
		    mp = &top;
		    if (error)
			    goto release;
		} while (resid && space > 0);
	} while (resid);

release:
	ssb_unlock(&so->so_snd);
out:
	if (top)
		m_freem(top);
	if (control)
		m_freem(control);
	return (error);
}
示例#16
0
/*
 * hpfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag,
 *	      struct ucred *a_cred)
 */
static int
hpfs_write(struct vop_write_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct hpfsnode *hp = VTOHP(vp);
	struct uio *uio = ap->a_uio;
	struct buf *bp;
	u_int xfersz, towrite;
	u_int off;
	daddr_t lbn, bn;
	int runl;
	int error = 0;

	dprintf(("hpfs_write(0x%x, off: %d resid: %ld, segflg: %d):\n",
		hp->h_no, (u_int32_t)uio->uio_offset,
		uio->uio_resid, uio->uio_segflg));

	if (ap->a_ioflag & IO_APPEND) {
		dprintf(("hpfs_write: APPEND mode\n"));
		uio->uio_offset = hp->h_fn.fn_size;
	}
	if (uio->uio_offset + uio->uio_resid > hp->h_fn.fn_size) {
		error = hpfs_extend (hp, uio->uio_offset + uio->uio_resid);
		if (error) {
			kprintf("hpfs_write: hpfs_extend FAILED %d\n", error);
			return (error);
		}
	}

	while (uio->uio_resid) {
		lbn = uio->uio_offset >> DEV_BSHIFT;
		off = uio->uio_offset & (DEV_BSIZE - 1);
		dprintf(("hpfs_write: resid: 0x%lx lbn: 0x%x off: 0x%x\n",
			uio->uio_resid, lbn, off));
		error = hpfs_hpbmap(hp, lbn, &bn, &runl);
		if (error)
			return (error);

		towrite = szmin(off + uio->uio_resid,
				min(DFLTPHYS, (runl+1)*DEV_BSIZE));
		xfersz = (towrite + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
		dprintf(("hpfs_write: bn: 0x%x (0x%x) towrite: 0x%x (0x%x)\n",
			bn, runl, towrite, xfersz));

		/*
		 * We do not have to issue a read-before-write if the xfer
		 * size does not cover the whole block.
		 *
		 * In the UIO_NOCOPY case, however, we are not overwriting
		 * anything and must do a read-before-write to fill in
		 * any missing pieces.
		 */
		if (off == 0 && towrite == xfersz &&
		    uio->uio_segflg != UIO_NOCOPY) {
			bp = getblk(hp->h_devvp, dbtodoff(bn), xfersz, 0, 0);
			clrbuf(bp);
		} else {
			error = bread(hp->h_devvp, dbtodoff(bn), xfersz, &bp);
			if (error) {
				brelse(bp);
				return (error);
			}
		}

		error = uiomove(bp->b_data + off, (size_t)(towrite - off), uio);
		if(error) {
			brelse(bp);
			return (error);
		}

		if (ap->a_ioflag & IO_SYNC)
			bwrite(bp);
		else
			bawrite(bp);
	}

	dprintf(("hpfs_write: successful\n"));
	return (0);
}