Beispiel #1
0
/*
 * old style vnode pager input routine
 */
static int
vnode_pager_input_old(vm_object_t object, vm_page_t m)
{
	struct uio auio;
	struct iovec aiov;
	int error;
	int size;
	struct sf_buf *sf;
	struct vnode *vp;

	VM_OBJECT_ASSERT_WLOCKED(object);
	error = 0;

	/*
	 * Return failure if beyond current EOF
	 */
	if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
		return VM_PAGER_BAD;
	} else {
		size = PAGE_SIZE;
		if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
			size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
		vp = object->handle;
		VM_OBJECT_WUNLOCK(object);

		/*
		 * Allocate a kernel virtual address and initialize so that
		 * we can use VOP_READ/WRITE routines.
		 */
		sf = sf_buf_alloc(m, 0);

		aiov.iov_base = (caddr_t)sf_buf_kva(sf);
		aiov.iov_len = size;
		auio.uio_iov = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = IDX_TO_OFF(m->pindex);
		auio.uio_segflg = UIO_SYSSPACE;
		auio.uio_rw = UIO_READ;
		auio.uio_resid = size;
		auio.uio_td = curthread;

		error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
		if (!error) {
			int count = size - auio.uio_resid;

			if (count == 0)
				error = EINVAL;
			else if (count != PAGE_SIZE)
				bzero((caddr_t)sf_buf_kva(sf) + count,
				    PAGE_SIZE - count);
		}
		sf_buf_free(sf);

		VM_OBJECT_WLOCK(object);
	}
	KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
	if (!error)
		m->valid = VM_PAGE_BITS_ALL;
	return error ? VM_PAGER_ERROR : VM_PAGER_OK;
}
/*
 * Given a user pointer to a page of user memory, return an sf_buf for the
 * page.  Because we may be requesting quite a few sf_bufs, prefer failure to
 * deadlock and use SFB_NOWAIT.
 */
static struct sf_buf *
zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uaddr)
{
	struct sf_buf *sf;
	vm_page_t pp;

	if (vm_fault_quick_hold_pages(map, uaddr, PAGE_SIZE, VM_PROT_READ |
	    VM_PROT_WRITE, &pp, 1) < 0)
		return (NULL);
	vm_page_lock(pp);
	vm_page_wire(pp);
	vm_page_unhold(pp);
	vm_page_unlock(pp);
	sf = sf_buf_alloc(pp, SFB_NOWAIT);
	if (sf == NULL) {
		zbuf_page_free(pp);
		return (NULL);
	}
	return (sf);
}
/**
 * Swap every 64 bytes of this page around, to account for it having a new
 * bit 17 of its physical address and therefore being interpreted differently
 * by the GPU.
 */
static void
i915_gem_swizzle_page(vm_page_t m)
{
	char temp[64];
	char *vaddr;
	struct sf_buf *sf;
	int i;

	/* XXXKIB sleep */
	sf = sf_buf_alloc(m);
	vaddr = (char *)sf_buf_kva(sf);

	for (i = 0; i < PAGE_SIZE; i += 128) {
		memcpy(temp, &vaddr[i], 64);
		memcpy(&vaddr[i], &vaddr[i + 64], 64);
		memcpy(&vaddr[i + 64], temp, 64);
	}

	sf_buf_free(sf);
}
Beispiel #4
0
static void
i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt, unsigned first_entry,
    unsigned num_entries, vm_page_t *pages, uint32_t pte_flags)
{
	uint32_t *pt_vaddr, pte;
	struct sf_buf *sf;
	unsigned act_pd, first_pte;
	unsigned last_pte, i;
	vm_paddr_t page_addr;

	act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
	first_pte = first_entry % I915_PPGTT_PT_ENTRIES;

	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

		sched_pin();
		sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
		pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);

		for (i = first_pte; i < last_pte; i++) {
			page_addr = VM_PAGE_TO_PHYS(*pages);
			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
			pt_vaddr[i] = pte | pte_flags;

			pages++;
		}

		sf_buf_free(sf);
		sched_unpin();

		num_entries -= last_pte - first_pte;
		first_pte = 0;
		act_pd++;
	}
}
Beispiel #5
0
/* PPGTT support for Sandybdrige/Gen6 and later */
static void
i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
    unsigned first_entry, unsigned num_entries)
{
	uint32_t *pt_vaddr;
	uint32_t scratch_pte;
	struct sf_buf *sf;
	unsigned act_pd, first_pte, last_pte, i;

	act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
	first_pte = first_entry % I915_PPGTT_PT_ENTRIES;

	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;

	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

		sched_pin();
		sf = sf_buf_alloc(ppgtt->pt_pages[act_pd], SFB_CPUPRIVATE);
		pt_vaddr = (uint32_t *)(uintptr_t)sf_buf_kva(sf);

		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;

		sf_buf_free(sf);
		sched_unpin();

		num_entries -= last_pte - first_pte;
		first_pte = 0;
		act_pd++;
	}

}
Beispiel #6
0
/*
 * Implement uiomove(9) from physical memory using a combination
 * of the direct mapping and sf_bufs to reduce the creation and
 * destruction of ephemeral mappings.  
 */
int
uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
{
	struct sf_buf *sf;
	struct thread *td = curthread;
	struct iovec *iov;
	void *cp;
	vm_offset_t page_offset;
	vm_paddr_t pa;
	vm_page_t m;
	size_t cnt;
	int error = 0;
	int save = 0;

	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
	    ("uiomove_fromphys: mode"));
	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
	    ("uiomove_fromphys proc"));
	save = td->td_pflags & TDP_DEADLKTREAT;
	td->td_pflags |= TDP_DEADLKTREAT;
	while (n > 0 && uio->uio_resid) {
		iov = uio->uio_iov;
		cnt = iov->iov_len;
		if (cnt == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			continue;
		}
		if (cnt > n)
			cnt = n;
		page_offset = offset & PAGE_MASK;
		cnt = ulmin(cnt, PAGE_SIZE - page_offset);
		m = ma[offset >> PAGE_SHIFT];
		pa = VM_PAGE_TO_PHYS(m);
		if (m->md.color != DCACHE_COLOR(pa)) {
			sf = sf_buf_alloc(m, 0);
			cp = (char *)sf_buf_kva(sf) + page_offset;
		} else {
			sf = NULL;
			cp = (char *)TLB_PHYS_TO_DIRECT(pa) + page_offset;
		}
		switch (uio->uio_segflg) {
		case UIO_USERSPACE:
			if (ticks - PCPU_GET(switchticks) >= hogticks)
				uio_yield();
			if (uio->uio_rw == UIO_READ)
				error = copyout(cp, iov->iov_base, cnt);
			else
				error = copyin(iov->iov_base, cp, cnt);
			if (error) {
				if (sf != NULL)
					sf_buf_free(sf);
				goto out;
			}
			break;
		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				bcopy(cp, iov->iov_base, cnt);
			else
				bcopy(iov->iov_base, cp, cnt);
			break;
		case UIO_NOCOPY:
			break;
		}
		if (sf != NULL)
			sf_buf_free(sf);
		iov->iov_base = (char *)iov->iov_base + cnt;
		iov->iov_len -= cnt;
		uio->uio_resid -= cnt;
		uio->uio_offset += cnt;
		offset += cnt;
		n -= cnt;
	}
out:
	if (save == 0)
		td->td_pflags &= ~TDP_DEADLKTREAT;
	return (error);
}
Beispiel #7
0
/*
 * small block filesystem vnode pager input
 */
static int
vnode_pager_input_smlfs(vm_object_t object, vm_page_t m)
{
	struct vnode *vp;
	struct bufobj *bo;
	struct buf *bp;
	struct sf_buf *sf;
	daddr_t fileaddr;
	vm_offset_t bsize;
	vm_page_bits_t bits;
	int error, i;

	error = 0;
	vp = object->handle;
	if (vp->v_iflag & VI_DOOMED)
		return VM_PAGER_BAD;

	bsize = vp->v_mount->mnt_stat.f_iosize;

	VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);

	sf = sf_buf_alloc(m, 0);

	for (i = 0; i < PAGE_SIZE / bsize; i++) {
		vm_ooffset_t address;

		bits = vm_page_bits(i * bsize, bsize);
		if (m->valid & bits)
			continue;

		address = IDX_TO_OFF(m->pindex) + i * bsize;
		if (address >= object->un_pager.vnp.vnp_size) {
			fileaddr = -1;
		} else {
			error = vnode_pager_addr(vp, address, &fileaddr, NULL);
			if (error)
				break;
		}
		if (fileaddr != -1) {
			bp = getpbuf(&vnode_pbuf_freecnt);

			/* build a minimal buffer header */
			bp->b_iocmd = BIO_READ;
			bp->b_iodone = bdone;
			KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
			KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
			bp->b_rcred = crhold(curthread->td_ucred);
			bp->b_wcred = crhold(curthread->td_ucred);
			bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
			bp->b_blkno = fileaddr;
			pbgetbo(bo, bp);
			bp->b_vp = vp;
			bp->b_bcount = bsize;
			bp->b_bufsize = bsize;
			bp->b_runningbufspace = bp->b_bufsize;
			atomic_add_long(&runningbufspace, bp->b_runningbufspace);

			/* do the input */
			bp->b_iooffset = dbtob(bp->b_blkno);
			bstrategy(bp);

			bwait(bp, PVM, "vnsrd");

			if ((bp->b_ioflags & BIO_ERROR) != 0)
				error = EIO;

			/*
			 * free the buffer header back to the swap buffer pool
			 */
			bp->b_vp = NULL;
			pbrelbo(bp);
			relpbuf(bp, &vnode_pbuf_freecnt);
			if (error)
				break;
		} else
			bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
		KASSERT((m->dirty & bits) == 0,
		    ("vnode_pager_input_smlfs: page %p is dirty", m));
		VM_OBJECT_WLOCK(object);
		m->valid |= bits;
		VM_OBJECT_WUNLOCK(object);
	}
	sf_buf_free(sf);
	if (error) {
		return VM_PAGER_ERROR;
	}
	return VM_PAGER_OK;
}
Beispiel #8
0
/*
 * Implement uiomove(9) from physical memory using a combination
 * of the direct mapping and sf_bufs to reduce the creation and
 * destruction of ephemeral mappings.  
 */
int
uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
{
	struct sf_buf *sf;
	struct thread *td = curthread;
	struct iovec *iov;
	void *cp;
	vm_offset_t page_offset;
	vm_paddr_t pa;
	vm_page_t m;
	size_t cnt;
	int error = 0;
	int save = 0;

	KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
	    ("uiomove_fromphys: mode"));
	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
	    ("uiomove_fromphys proc"));
	save = td->td_pflags & TDP_DEADLKTREAT;
	td->td_pflags |= TDP_DEADLKTREAT;
	while (n > 0 && uio->uio_resid) {
		iov = uio->uio_iov;
		cnt = iov->iov_len;
		if (cnt == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			continue;
		}
		if (cnt > n)
			cnt = n;
		page_offset = offset & PAGE_MASK;
		cnt = ulmin(cnt, PAGE_SIZE - page_offset);
		m = ma[offset >> PAGE_SHIFT];
		pa = VM_PAGE_TO_PHYS(m);
		if (MIPS_DIRECT_MAPPABLE(pa)) {
			sf = NULL;
			cp = (char *)MIPS_PHYS_TO_DIRECT(pa) + page_offset;
			/*
			 * flush all mappings to this page, KSEG0 address first
			 * in order to get it overwritten by correct data
			 */
			mips_dcache_wbinv_range((vm_offset_t)cp, cnt);
			pmap_flush_pvcache(m);
		} else {
			sf = sf_buf_alloc(m, 0);
			cp = (char *)sf_buf_kva(sf) + page_offset;
		}
		switch (uio->uio_segflg) {
		case UIO_USERSPACE:
			maybe_yield();
			if (uio->uio_rw == UIO_READ)
				error = copyout(cp, iov->iov_base, cnt);
			else
				error = copyin(iov->iov_base, cp, cnt);
			if (error) {
				if (sf != NULL)
					sf_buf_free(sf);
				goto out;
			}
			break;
		case UIO_SYSSPACE:
			if (uio->uio_rw == UIO_READ)
				bcopy(cp, iov->iov_base, cnt);
			else
				bcopy(iov->iov_base, cp, cnt);
			break;
		case UIO_NOCOPY:
			break;
		}
		if (sf != NULL)
			sf_buf_free(sf);
		else
			mips_dcache_wbinv_range((vm_offset_t)cp, cnt);
		iov->iov_base = (char *)iov->iov_base + cnt;
		iov->iov_len -= cnt;
		uio->uio_resid -= cnt;
		uio->uio_offset += cnt;
		offset += cnt;
		n -= cnt;
	}
out:
	if (save == 0)
		td->td_pflags &= ~TDP_DEADLKTREAT;
	return (error);
}
Beispiel #9
0
int
socow_setup(struct mbuf *m0, struct uio *uio)
{
	struct sf_buf *sf;
	vm_page_t pp;
	struct iovec *iov;
	struct vmspace *vmspace;
	struct vm_map *map;
	vm_offset_t offset, uva;

	socow_stats.attempted++;
	vmspace = curproc->p_vmspace;
	map = &vmspace->vm_map;
	uva = (vm_offset_t) uio->uio_iov->iov_base;
	offset = uva & PAGE_MASK;

	/*
	 * Verify that access to the given address is allowed from user-space.
	 */
	if (vm_fault_quick((caddr_t)uva, VM_PROT_READ) < 0)
		return (0);

       /* 
	* verify page is mapped & not already wired for i/o
	*/
	pp = pmap_extract_and_hold(map->pmap, uva, VM_PROT_READ);
	if (pp == NULL) {
		socow_stats.fail_not_mapped++;
		return(0);
	}

	/* 
	 * set up COW
	 */
	vm_page_lock(pp);
	if (vm_page_cowsetup(pp) != 0) {
		vm_page_unhold(pp);
		vm_page_unlock(pp);
		return (0);
	}

	/*
	 * wire the page for I/O
	 */
	vm_page_wire(pp);
	vm_page_unhold(pp);
	vm_page_unlock(pp);
	/*
	 * Allocate an sf buf
	 */
	sf = sf_buf_alloc(pp, SFB_CATCH);
	if (sf == NULL) {
		vm_page_lock(pp);
		vm_page_cowclear(pp);
		vm_page_unwire(pp, 0);
		/*
		 * Check for the object going away on us. This can
		 * happen since we don't hold a reference to it.
		 * If so, we're responsible for freeing the page.
		 */
		if (pp->wire_count == 0 && pp->object == NULL)
			vm_page_free(pp);
		vm_page_unlock(pp);
		socow_stats.fail_sf_buf++;
		return(0);
	}
	/* 
	 * attach to mbuf
	 */
	MEXTADD(m0, sf_buf_kva(sf), PAGE_SIZE, socow_iodone,
	    (void*)sf_buf_kva(sf), sf, M_RDONLY, EXT_SFBUF);
	m0->m_len = PAGE_SIZE - offset;
	m0->m_data = (caddr_t)sf_buf_kva(sf) + offset;
	socow_stats.success++;

	iov = uio->uio_iov;
	iov->iov_base = (char *)iov->iov_base + m0->m_len;
	iov->iov_len -= m0->m_len;
	uio->uio_resid -= m0->m_len;
	uio->uio_offset += m0->m_len;
	if (iov->iov_len == 0) {
		uio->uio_iov++;
		uio->uio_iovcnt--;
	}

	return(m0->m_len);
}
Beispiel #10
0
int
exec_map_first_page(struct image_params *imgp)
{
	int rv, i, after, initial_pagein;
	vm_page_t ma[VM_INITIAL_PAGEIN];
	vm_object_t object;

	if (imgp->firstpage != NULL)
		exec_unmap_first_page(imgp);

	object = imgp->vp->v_object;
	if (object == NULL)
		return (EACCES);
	VM_OBJECT_WLOCK(object);
#if VM_NRESERVLEVEL > 0
	vm_object_color(object, 0);
#endif
	ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
	if (ma[0]->valid != VM_PAGE_BITS_ALL) {
		vm_page_xbusy(ma[0]);
		if (!vm_pager_has_page(object, 0, NULL, &after)) {
			vm_page_lock(ma[0]);
			vm_page_free(ma[0]);
			vm_page_unlock(ma[0]);
			VM_OBJECT_WUNLOCK(object);
			return (EIO);
		}
		initial_pagein = min(after, VM_INITIAL_PAGEIN);
		KASSERT(initial_pagein <= object->size,
		    ("%s: initial_pagein %d object->size %ju",
		    __func__, initial_pagein, (uintmax_t )object->size));
		for (i = 1; i < initial_pagein; i++) {
			if ((ma[i] = vm_page_next(ma[i - 1])) != NULL) {
				if (ma[i]->valid)
					break;
				if (!vm_page_tryxbusy(ma[i]))
					break;
			} else {
				ma[i] = vm_page_alloc(object, i,
				    VM_ALLOC_NORMAL);
				if (ma[i] == NULL)
					break;
			}
		}
		initial_pagein = i;
		rv = vm_pager_get_pages(object, ma, initial_pagein, NULL, NULL);
		if (rv != VM_PAGER_OK) {
			for (i = 0; i < initial_pagein; i++) {
				vm_page_lock(ma[i]);
				vm_page_free(ma[i]);
				vm_page_unlock(ma[i]);
			}
			VM_OBJECT_WUNLOCK(object);
			return (EIO);
		}
		vm_page_xunbusy(ma[0]);
		for (i = 1; i < initial_pagein; i++)
			vm_page_readahead_finish(ma[i]);
	}
	vm_page_lock(ma[0]);
	vm_page_hold(ma[0]);
	vm_page_activate(ma[0]);
	vm_page_unlock(ma[0]);
	VM_OBJECT_WUNLOCK(object);

	imgp->firstpage = sf_buf_alloc(ma[0], 0);
	imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);

	return (0);
}
Beispiel #11
0
static int
tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
{
	struct sf_buf	*sf;
	vm_pindex_t	idx;
	vm_page_t	m;
	vm_offset_t	offset;
	off_t		addr;
	size_t		tlen;
	char		*ma;
	int		error;

	addr = uio->uio_offset;
	idx = OFF_TO_IDX(addr);
	offset = addr & PAGE_MASK;
	tlen = MIN(PAGE_SIZE - offset, len);

	if ((vobj == NULL) ||
	    (vobj->resident_page_count == 0 && vobj->cache == NULL))
		goto nocache;

	VM_OBJECT_LOCK(vobj);
lookupvpg:
	if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
	    vm_page_is_valid(m, offset, tlen)) {
		if ((m->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(m);
			vm_page_sleep(m, "tmfsmr");
			goto lookupvpg;
		}
		vm_page_busy(m);
		VM_OBJECT_UNLOCK(vobj);
		error = uiomove_fromphys(&m, offset, tlen, uio);
		VM_OBJECT_LOCK(vobj);
		vm_page_wakeup(m);
		VM_OBJECT_UNLOCK(vobj);
		return	(error);
	} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
		KASSERT(offset == 0,
		    ("unexpected offset in tmpfs_mappedread for sendfile"));
		if ((m->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(m);
			vm_page_sleep(m, "tmfsmr");
			goto lookupvpg;
		}
		vm_page_busy(m);
		VM_OBJECT_UNLOCK(vobj);
		sched_pin();
		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
		ma = (char *)sf_buf_kva(sf);
		error = tmpfs_nocacheread_buf(tobj, idx, 0, tlen, ma);
		if (error == 0) {
			if (tlen != PAGE_SIZE)
				bzero(ma + tlen, PAGE_SIZE - tlen);
			uio->uio_offset += tlen;
			uio->uio_resid -= tlen;
		}
		sf_buf_free(sf);
		sched_unpin();
		VM_OBJECT_LOCK(vobj);
		if (error == 0)
			m->valid = VM_PAGE_BITS_ALL;
		vm_page_wakeup(m);
		VM_OBJECT_UNLOCK(vobj);
		return	(error);
	}
	VM_OBJECT_UNLOCK(vobj);
nocache:
	error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio);

	return	(error);
}