Ejemplo n.º 1
0
/* --------------------------------------------------------------------- */
static int
tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
    vm_offset_t offset, size_t tlen, struct uio *uio)
{
	vm_page_t	m;
	int		error;

	VM_OBJECT_LOCK(tobj);
	vm_object_pip_add(tobj, 1);
	m = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
	    VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
	if (m->valid != VM_PAGE_BITS_ALL) {
		if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
			error = vm_pager_get_pages(tobj, &m, 1, 0);
			if (error != 0) {
				printf("tmpfs get pages from pager error [read]\n");
				goto out;
			}
		} else
			vm_page_zero_invalid(m, TRUE);
	}
	VM_OBJECT_UNLOCK(tobj);
	error = uiomove_fromphys(&m, offset, tlen, uio);
	VM_OBJECT_LOCK(tobj);
out:
	vm_page_lock(m);
	vm_page_unwire(m, TRUE);
	vm_page_unlock(m);
	vm_page_wakeup(m);
	vm_object_pip_subtract(tobj, 1);
	VM_OBJECT_UNLOCK(tobj);

	return (error);
}
Ejemplo n.º 2
0
/* --------------------------------------------------------------------- */
static int
tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
    vm_offset_t offset, size_t tlen, struct uio *uio)
{
	vm_page_t	m;
	int		error, rv;

	VM_OBJECT_LOCK(tobj);
	m = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
	    VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
	if (m->valid != VM_PAGE_BITS_ALL) {
		if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
			rv = vm_pager_get_pages(tobj, &m, 1, 0);
			if (rv != VM_PAGER_OK) {
				vm_page_lock(m);
				vm_page_free(m);
				vm_page_unlock(m);
				VM_OBJECT_UNLOCK(tobj);
				return (EIO);
			}
		} else
			vm_page_zero_invalid(m, TRUE);
	}
	VM_OBJECT_UNLOCK(tobj);
	error = uiomove_fromphys(&m, offset, tlen, uio);
	VM_OBJECT_LOCK(tobj);
	vm_page_lock(m);
	vm_page_unwire(m, TRUE);
	vm_page_unlock(m);
	vm_page_wakeup(m);
	VM_OBJECT_UNLOCK(tobj);

	return (error);
}
Ejemplo n.º 3
0
/*
 * Copy data between an XIO and a UIO.  If the UIO represents userspace it
 * must be relative to the current context.
 *
 * uoffset is the abstracted starting offset in the XIO, not the actual
 * offset, and usually starts at 0.
 *
 * The XIO is not modified.  The UIO is updated to reflect the copy.
 *
 * UIO_READ	xio -> uio
 * UIO_WRITE	uio -> xio
 */
int
xio_uio_copy(xio_t xio, int uoffset, struct uio *uio, size_t *sizep)
{
    size_t bytes;
    int error;

    bytes = xio->xio_bytes - uoffset;
    if (bytes > uio->uio_resid)
	bytes = uio->uio_resid;
    KKASSERT(bytes >= 0);
    error = uiomove_fromphys(xio->xio_pages, xio->xio_offset + uoffset, 
				bytes, uio);
    if (error == 0)
	*sizep = bytes;
    else
	*sizep = 0;
    return(error);
}
Ejemplo n.º 4
0
int
physcopyin(void *src, vm_paddr_t dst, size_t len)
{
    vm_page_t m[PHYS_PAGE_COUNT(len)];
    struct iovec iov[1];
    struct uio uio;
    int i;

    iov[0].iov_base = src;
    iov[0].iov_len = len;
    uio.uio_iov = iov;
    uio.uio_iovcnt = 1;
    uio.uio_offset = 0;
    uio.uio_resid = len;
    uio.uio_segflg = UIO_SYSSPACE;
    uio.uio_rw = UIO_WRITE;
    for (i = 0; i < PHYS_PAGE_COUNT(len); i++, dst += PAGE_SIZE)
        m[i] = PHYS_TO_VM_PAGE(dst);
    return (uiomove_fromphys(m, dst & PAGE_MASK, len, &uio));
}
Ejemplo n.º 5
0
int
physcopyout(vm_paddr_t src, void *dst, size_t len)
{
    vm_page_t m[PHYS_PAGE_COUNT(len)];
    struct iovec iov[1];
    struct uio uio;
    int i;

    iov[0].iov_base = dst;
    iov[0].iov_len = len;
    uio.uio_iov = iov;
    uio.uio_iovcnt = 1;
    uio.uio_offset = 0;
    uio.uio_resid = len;
    uio.uio_segflg = UIO_SYSSPACE;
    uio.uio_rw = UIO_READ;
    for (i = 0; i < PHYS_PAGE_COUNT(len); i++, src += PAGE_SIZE)
        m[i] = PHYS_TO_VM_PAGE(src);
    return (uiomove_fromphys(m, src & PAGE_MASK, len, &uio));
}
Ejemplo n.º 6
0
int
t3_ddp_copy(const struct mbuf *m, int offset, struct uio *uio, int len)
{
	int resid_init, err;
	struct ddp_gather_list *gl = (struct ddp_gather_list *)m->m_ddp_gl;
	
	resid_init = uio->uio_resid;
	
	if (!gl->dgl_pages)
		panic("pages not set\n");

	CTR4(KTR_TOM, "t3_ddp_copy: offset=%d dgl_offset=%d cur_offset=%d len=%d",
	    offset, gl->dgl_offset, m->m_cur_offset, len);
	offset += gl->dgl_offset + m->m_cur_offset;
	KASSERT(len <= gl->dgl_length,
	    ("len=%d > dgl_length=%d in ddp_copy\n", len, gl->dgl_length));


	err = uiomove_fromphys(gl->dgl_pages, offset, len, uio);
	return (err);
}
Ejemplo n.º 7
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	int error = 0;
	vm_offset_t va, eva, off, v;
	vm_prot_t prot;
	struct vm_page m;
	vm_page_t marr;
	vm_size_t cnt;

	cnt = 0;
	error = 0;

	GIANT_REQUIRED;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;

			off = uio->uio_offset & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = min(cnt, PAGE_SIZE - off);
			cnt = min(cnt, iov->iov_len);

			m.phys_addr = trunc_page(v);
			marr = &m;
			error = uiomove_fromphys(&marr, off, cnt, uio);
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = uio->uio_offset;

			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset
			    + iov->iov_len);

			/* 
			 * Make sure that all the pages are currently resident
			 * so that we don't create any zero-fill pages.
			 */
			if (va >= VM_MIN_KERNEL_ADDRESS &&
			    eva <= VM_MAX_KERNEL_ADDRESS) {
				for (; va < eva; va += PAGE_SIZE)
					if (pmap_extract(kernel_pmap, va) == 0)
						return (EFAULT);

				prot = (uio->uio_rw == UIO_READ)
				    ? VM_PROT_READ : VM_PROT_WRITE;

				va = uio->uio_offset;
				if (kernacc((void *) va, iov->iov_len, prot)
				    == FALSE)
					return (EFAULT);
			}

			va = uio->uio_offset;
			error = uiomove((void *)va, iov->iov_len, uio);
			continue;
		}
	}

	return (error);
}
Ejemplo n.º 8
0
static int
tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
{
	vm_pindex_t	idx;
	vm_page_t	vpg, tpg;
	vm_offset_t	offset;
	off_t		addr;
	size_t		tlen;
	int		error, rv;

	error = 0;
	
	addr = uio->uio_offset;
	idx = OFF_TO_IDX(addr);
	offset = addr & PAGE_MASK;
	tlen = MIN(PAGE_SIZE - offset, len);

	if ((vobj == NULL) ||
	    (vobj->resident_page_count == 0 && vobj->cache == NULL)) {
		vpg = NULL;
		goto nocache;
	}

	VM_OBJECT_LOCK(vobj);
lookupvpg:
	if (((vpg = vm_page_lookup(vobj, idx)) != NULL) &&
	    vm_page_is_valid(vpg, offset, tlen)) {
		if ((vpg->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(vpg);
			vm_page_sleep(vpg, "tmfsmw");
			goto lookupvpg;
		}
		vm_page_busy(vpg);
		vm_page_undirty(vpg);
		VM_OBJECT_UNLOCK(vobj);
		error = uiomove_fromphys(&vpg, offset, tlen, uio);
	} else {
		if (__predict_false(vobj->cache != NULL))
			vm_page_cache_free(vobj, idx, idx + 1);
		VM_OBJECT_UNLOCK(vobj);
		vpg = NULL;
	}
nocache:
	VM_OBJECT_LOCK(tobj);
	tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
	    VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
	if (tpg->valid != VM_PAGE_BITS_ALL) {
		if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
			rv = vm_pager_get_pages(tobj, &tpg, 1, 0);
			if (rv != VM_PAGER_OK) {
				vm_page_lock(tpg);
				vm_page_free(tpg);
				vm_page_unlock(tpg);
				error = EIO;
				goto out;
			}
		} else
			vm_page_zero_invalid(tpg, TRUE);
	}
	VM_OBJECT_UNLOCK(tobj);
	if (vpg == NULL)
		error = uiomove_fromphys(&tpg, offset, tlen, uio);
	else {
		KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid"));
		pmap_copy_page(vpg, tpg);
	}
	VM_OBJECT_LOCK(tobj);
	if (error == 0) {
		KASSERT(tpg->valid == VM_PAGE_BITS_ALL,
		    ("parts of tpg invalid"));
		vm_page_dirty(tpg);
	}
	vm_page_lock(tpg);
	vm_page_unwire(tpg, TRUE);
	vm_page_unlock(tpg);
	vm_page_wakeup(tpg);
out:
	VM_OBJECT_UNLOCK(tobj);
	if (vpg != NULL) {
		VM_OBJECT_LOCK(vobj);
		vm_page_wakeup(vpg);
		VM_OBJECT_UNLOCK(vobj);
	}

	return	(error);
}
Ejemplo n.º 9
0
static int
tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
{
	struct sf_buf	*sf;
	vm_pindex_t	idx;
	vm_page_t	m;
	vm_offset_t	offset;
	off_t		addr;
	size_t		tlen;
	char		*ma;
	int		error;

	addr = uio->uio_offset;
	idx = OFF_TO_IDX(addr);
	offset = addr & PAGE_MASK;
	tlen = MIN(PAGE_SIZE - offset, len);

	if ((vobj == NULL) ||
	    (vobj->resident_page_count == 0 && vobj->cache == NULL))
		goto nocache;

	VM_OBJECT_LOCK(vobj);
lookupvpg:
	if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
	    vm_page_is_valid(m, offset, tlen)) {
		if ((m->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(m);
			vm_page_sleep(m, "tmfsmr");
			goto lookupvpg;
		}
		vm_page_busy(m);
		VM_OBJECT_UNLOCK(vobj);
		error = uiomove_fromphys(&m, offset, tlen, uio);
		VM_OBJECT_LOCK(vobj);
		vm_page_wakeup(m);
		VM_OBJECT_UNLOCK(vobj);
		return	(error);
	} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
		KASSERT(offset == 0,
		    ("unexpected offset in tmpfs_mappedread for sendfile"));
		if ((m->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(m);
			vm_page_sleep(m, "tmfsmr");
			goto lookupvpg;
		}
		vm_page_busy(m);
		VM_OBJECT_UNLOCK(vobj);
		sched_pin();
		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
		ma = (char *)sf_buf_kva(sf);
		error = tmpfs_nocacheread_buf(tobj, idx, 0, tlen, ma);
		if (error == 0) {
			if (tlen != PAGE_SIZE)
				bzero(ma + tlen, PAGE_SIZE - tlen);
			uio->uio_offset += tlen;
			uio->uio_resid -= tlen;
		}
		sf_buf_free(sf);
		sched_unpin();
		VM_OBJECT_LOCK(vobj);
		if (error == 0)
			m->valid = VM_PAGE_BITS_ALL;
		vm_page_wakeup(m);
		VM_OBJECT_UNLOCK(vobj);
		return	(error);
	}
	VM_OBJECT_UNLOCK(vobj);
nocache:
	error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio);

	return	(error);
}
Ejemplo n.º 10
0
static int
uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
{
	vm_page_t m;
	vm_pindex_t idx;
	size_t tlen;
	int error, offset, rv;

	idx = OFF_TO_IDX(uio->uio_offset);
	offset = uio->uio_offset & PAGE_MASK;
	tlen = MIN(PAGE_SIZE - offset, len);

	VM_OBJECT_WLOCK(obj);

	/*
	 * Parallel reads of the page content from disk are prevented
	 * by exclusive busy.
	 *
	 * Although the tmpfs vnode lock is held here, it is
	 * nonetheless safe to sleep waiting for a free page.  The
	 * pageout daemon does not need to acquire the tmpfs vnode
	 * lock to page out tobj's pages because tobj is a OBJT_SWAP
	 * type object.
	 */
	m = vm_page_grab(obj, idx, VM_ALLOC_NORMAL);
	if (m->valid != VM_PAGE_BITS_ALL) {
		if (vm_pager_has_page(obj, idx, NULL, NULL)) {
			rv = vm_pager_get_pages(obj, &m, 1, 0);
			m = vm_page_lookup(obj, idx);
			if (m == NULL) {
				printf(
		    "uiomove_object: vm_obj %p idx %jd null lookup rv %d\n",
				    obj, idx, rv);
				VM_OBJECT_WUNLOCK(obj);
				return (EIO);
			}
			if (rv != VM_PAGER_OK) {
				printf(
	    "uiomove_object: vm_obj %p idx %jd valid %x pager error %d\n",
				    obj, idx, m->valid, rv);
				vm_page_lock(m);
				vm_page_free(m);
				vm_page_unlock(m);
				VM_OBJECT_WUNLOCK(obj);
				return (EIO);
			}
		} else
			vm_page_zero_invalid(m, TRUE);
	}
	vm_page_xunbusy(m);
	vm_page_lock(m);
	vm_page_hold(m);
	vm_page_unlock(m);
	VM_OBJECT_WUNLOCK(obj);
	error = uiomove_fromphys(&m, offset, tlen, uio);
	if (uio->uio_rw == UIO_WRITE && error == 0) {
		VM_OBJECT_WLOCK(obj);
		vm_page_dirty(m);
		VM_OBJECT_WUNLOCK(obj);
	}
	vm_page_lock(m);
	vm_page_unhold(m);
	if (m->queue == PQ_NONE) {
		vm_page_deactivate(m);
	} else {
		/* Requeue to maintain LRU ordering. */
		vm_page_requeue(m);
	}
	vm_page_unlock(m);

	return (error);
}
Ejemplo n.º 11
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	int error = 0;
	vm_offset_t va, eva, off, v;
	vm_prot_t prot;
	struct vm_page m;
	vm_page_t marr;
	vm_size_t cnt;

	cnt = 0;
	error = 0;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;

kmem_direct_mapped:	off = v & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = min(cnt, PAGE_SIZE - off);
			cnt = min(cnt, iov->iov_len);

			if (mem_valid(v, cnt)) {
				error = EFAULT;
				break;
			}
	
			if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) {
				error = uiomove((void *)PHYS_TO_DMAP(v), cnt,
				    uio);
			} else {
				m.phys_addr = trunc_page(v);
				marr = &m;
				error = uiomove_fromphys(&marr, off, cnt, uio);
			}
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = uio->uio_offset;

			if ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end)) {
				v = DMAP_TO_PHYS(va);
				goto kmem_direct_mapped;
			}

			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset
			    + iov->iov_len);

			/* 
			 * Make sure that all the pages are currently resident
			 * so that we don't create any zero-fill pages.
			 */

			for (; va < eva; va += PAGE_SIZE)
				if (pmap_extract(kernel_pmap, va) == 0)
					return (EFAULT);

			prot = (uio->uio_rw == UIO_READ)
			    ? VM_PROT_READ : VM_PROT_WRITE;

			va = uio->uio_offset;
			if (kernacc((void *) va, iov->iov_len, prot)
			    == FALSE)
				return (EFAULT);

			error = uiomove((void *)va, iov->iov_len, uio);

			continue;
		}
	}

	return (error);
}
Ejemplo n.º 12
0
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	struct vm_page m;
	vm_page_t marr;
	vm_offset_t off, v;
	u_int cnt;
	int error;

	error = 0;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}

		v = uio->uio_offset;
		off = v & PAGE_MASK;
		cnt = ulmin(iov->iov_len, PAGE_SIZE - (u_int)off);
		if (cnt == 0)
			continue;

		switch(dev2unit(dev)) {
		case CDEV_MINOR_KMEM:
			/* If the address is in the DMAP just copy it */
			if (VIRT_IN_DMAP(v)) {
				error = uiomove((void *)v, cnt, uio);
				break;
			}

			if (!kernacc((void *)v, cnt, uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				break;
			}

			/* Get the physical address to read */
			v = pmap_extract(kernel_pmap, v);
			if (v == 0) {
				error = EFAULT;
				break;
			}

			/* FALLTHROUGH */
		case CDEV_MINOR_MEM:
			/* If within the DMAP use this to copy from */
			if (PHYS_IN_DMAP(v)) {
				v = PHYS_TO_DMAP(v);
				error = uiomove((void *)v, cnt, uio);
				break;
			}

			/* Have uiomove_fromphys handle the data */
			m.phys_addr = trunc_page(v);
			marr = &m;
			uiomove_fromphys(&marr, off, cnt, uio);
			break;
		}
	}

	return (error);
}