Ejemplo n.º 1
0
static int
shm_dotruncate(struct shmfd *shmfd, off_t length)
{
	vm_object_t object;
	vm_page_t m, ma[1];
	vm_pindex_t idx, nobjsize;
	vm_ooffset_t delta;
	int base, rv;

	object = shmfd->shm_object;
	VM_OBJECT_LOCK(object);
	if (length == shmfd->shm_size) {
		VM_OBJECT_UNLOCK(object);
		return (0);
	}
	nobjsize = OFF_TO_IDX(length + PAGE_MASK);

	/* Are we shrinking?  If so, trim the end. */
	if (length < shmfd->shm_size) {
		/*
		 * Disallow any requests to shrink the size if this
		 * object is mapped into the kernel.
		 */
		if (shmfd->shm_kmappings > 0) {
			VM_OBJECT_UNLOCK(object);
			return (EBUSY);
		}

		/*
		 * Zero the truncated part of the last page.
		 */
		base = length & PAGE_MASK;
		if (base != 0) {
			idx = OFF_TO_IDX(length);
retry:
			m = vm_page_lookup(object, idx);
			if (m != NULL) {
				if ((m->oflags & VPO_BUSY) != 0 ||
				    m->busy != 0) {
					vm_page_sleep(m, "shmtrc");
					goto retry;
				}
			} else if (vm_pager_has_page(object, idx, NULL, NULL)) {
				m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
				if (m == NULL) {
					VM_OBJECT_UNLOCK(object);
					VM_WAIT;
					VM_OBJECT_LOCK(object);
					goto retry;
				} else if (m->valid != VM_PAGE_BITS_ALL) {
					ma[0] = m;
					rv = vm_pager_get_pages(object, ma, 1,
					    0);
					m = vm_page_lookup(object, idx);
				} else
					/* A cached page was reactivated. */
					rv = VM_PAGER_OK;
				vm_page_lock(m);
				if (rv == VM_PAGER_OK) {
					vm_page_deactivate(m);
					vm_page_unlock(m);
					vm_page_wakeup(m);
				} else {
					vm_page_free(m);
					vm_page_unlock(m);
					VM_OBJECT_UNLOCK(object);
					return (EIO);
				}
			}
			if (m != NULL) {
				pmap_zero_page_area(m, base, PAGE_SIZE - base);
				KASSERT(m->valid == VM_PAGE_BITS_ALL,
				    ("shm_dotruncate: page %p is invalid", m));
				vm_page_dirty(m);
				vm_pager_page_unswapped(m);
			}
		}
		delta = ptoa(object->size - nobjsize);

		/* Toss in memory pages. */
		if (nobjsize < object->size)
			vm_object_page_remove(object, nobjsize, object->size,
			    0);

		/* Toss pages from swap. */
		if (object->type == OBJT_SWAP)
			swap_pager_freespace(object, nobjsize, delta);

		/* Free the swap accounted for shm */
		swap_release_by_cred(delta, object->cred);
		object->charge -= delta;
	} else {
		/* Attempt to reserve the swap */
		delta = ptoa(nobjsize - object->size);
		if (!swap_reserve_by_cred(delta, object->cred)) {
			VM_OBJECT_UNLOCK(object);
			return (ENOMEM);
		}
		object->charge += delta;
	}
	shmfd->shm_size = length;
	mtx_lock(&shm_timestamp_lock);
	vfs_timestamp(&shmfd->shm_ctime);
	shmfd->shm_mtime = shmfd->shm_ctime;
	mtx_unlock(&shm_timestamp_lock);
	object->size = nobjsize;
	VM_OBJECT_UNLOCK(object);
	return (0);
}
Ejemplo n.º 2
0
static int
tmpfs_mappedwrite(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
{
	vm_pindex_t	idx;
	vm_page_t	vpg, tpg;
	vm_offset_t	offset;
	off_t		addr;
	size_t		tlen;
	int		error, rv;

	error = 0;
	
	addr = uio->uio_offset;
	idx = OFF_TO_IDX(addr);
	offset = addr & PAGE_MASK;
	tlen = MIN(PAGE_SIZE - offset, len);

	if ((vobj == NULL) ||
	    (vobj->resident_page_count == 0 && vobj->cache == NULL)) {
		vpg = NULL;
		goto nocache;
	}

	VM_OBJECT_LOCK(vobj);
lookupvpg:
	if (((vpg = vm_page_lookup(vobj, idx)) != NULL) &&
	    vm_page_is_valid(vpg, offset, tlen)) {
		if ((vpg->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(vpg);
			vm_page_sleep(vpg, "tmfsmw");
			goto lookupvpg;
		}
		vm_page_busy(vpg);
		vm_page_undirty(vpg);
		VM_OBJECT_UNLOCK(vobj);
		error = uiomove_fromphys(&vpg, offset, tlen, uio);
	} else {
		if (__predict_false(vobj->cache != NULL))
			vm_page_cache_free(vobj, idx, idx + 1);
		VM_OBJECT_UNLOCK(vobj);
		vpg = NULL;
	}
nocache:
	VM_OBJECT_LOCK(tobj);
	tpg = vm_page_grab(tobj, idx, VM_ALLOC_WIRED |
	    VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
	if (tpg->valid != VM_PAGE_BITS_ALL) {
		if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
			rv = vm_pager_get_pages(tobj, &tpg, 1, 0);
			if (rv != VM_PAGER_OK) {
				vm_page_lock(tpg);
				vm_page_free(tpg);
				vm_page_unlock(tpg);
				error = EIO;
				goto out;
			}
		} else
			vm_page_zero_invalid(tpg, TRUE);
	}
	VM_OBJECT_UNLOCK(tobj);
	if (vpg == NULL)
		error = uiomove_fromphys(&tpg, offset, tlen, uio);
	else {
		KASSERT(vpg->valid == VM_PAGE_BITS_ALL, ("parts of vpg invalid"));
		pmap_copy_page(vpg, tpg);
	}
	VM_OBJECT_LOCK(tobj);
	if (error == 0) {
		KASSERT(tpg->valid == VM_PAGE_BITS_ALL,
		    ("parts of tpg invalid"));
		vm_page_dirty(tpg);
	}
	vm_page_lock(tpg);
	vm_page_unwire(tpg, TRUE);
	vm_page_unlock(tpg);
	vm_page_wakeup(tpg);
out:
	VM_OBJECT_UNLOCK(tobj);
	if (vpg != NULL) {
		VM_OBJECT_LOCK(vobj);
		vm_page_wakeup(vpg);
		VM_OBJECT_UNLOCK(vobj);
	}

	return	(error);
}
Ejemplo n.º 3
0
static int
ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
    int prot, vm_page_t *mres)
{
	struct ttm_buffer_object *bo = vm_obj->handle;
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_tt *ttm = NULL;
	vm_page_t m, m1, oldm;
	int ret;
	int retval = VM_PAGER_OK;
	struct ttm_mem_type_manager *man =
		&bdev->man[bo->mem.mem_type];

	vm_object_pip_add(vm_obj, 1);
	oldm = *mres;
	if (oldm != NULL) {
		vm_page_remove(oldm);
		*mres = NULL;
	} else
		oldm = NULL;
retry:
	VM_OBJECT_WUNLOCK(vm_obj);
	m = NULL;

reserve:
	ret = ttm_bo_reserve(bo, false, false, false, 0);
	if (unlikely(ret != 0)) {
		if (ret == -EBUSY) {
			lwkt_yield();
			goto reserve;
		}
	}

	if (bdev->driver->fault_reserve_notify) {
		ret = bdev->driver->fault_reserve_notify(bo);
		switch (ret) {
		case 0:
			break;
		case -EBUSY:
		case -ERESTARTSYS:
		case -EINTR:
			lwkt_yield();
			goto reserve;
		default:
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	}

	/*
	 * Wait for buffer data in transit, due to a pipelined
	 * move.
	 */

	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
		/*
		 * Here, the behavior differs between Linux and FreeBSD.
		 *
		 * On Linux, the wait is interruptible (3rd argument to
		 * ttm_bo_wait). There must be some mechanism to resume
		 * page fault handling, once the signal is processed.
		 *
		 * On FreeBSD, the wait is uninteruptible. This is not a
		 * problem as we can't end up with an unkillable process
		 * here, because the wait will eventually time out.
		 *
		 * An example of this situation is the Xorg process
		 * which uses SIGALRM internally. The signal could
		 * interrupt the wait, causing the page fault to fail
		 * and the process to receive SIGSEGV.
		 */
		ret = ttm_bo_wait(bo, false, false, false);
		lockmgr(&bdev->fence_lock, LK_RELEASE);
		if (unlikely(ret != 0)) {
			retval = VM_PAGER_ERROR;
			goto out_unlock;
		}
	} else
		lockmgr(&bdev->fence_lock, LK_RELEASE);

	ret = ttm_mem_io_lock(man, true);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_unlock;
	}
	ret = ttm_mem_io_reserve_vm(bo);
	if (unlikely(ret != 0)) {
		retval = VM_PAGER_ERROR;
		goto out_io_unlock;
	}

	/*
	 * Strictly, we're not allowed to modify vma->vm_page_prot here,
	 * since the mmap_sem is only held in read mode. However, we
	 * modify only the caching bits of vma->vm_page_prot and
	 * consider those bits protected by
	 * the bo->mutex, as we should be the only writers.
	 * There shouldn't really be any readers of these bits except
	 * within vm_insert_mixed()? fork?
	 *
	 * TODO: Add a list of vmas to the bo, and change the
	 * vma->vm_page_prot when the object changes caching policy, with
	 * the correct locks held.
	 */
	if (!bo->mem.bus.is_iomem) {
		/* Allocate all page at once, most common usage */
		ttm = bo->ttm;
		if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
	}

	if (bo->mem.bus.is_iomem) {
		m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
		    bo->mem.bus.offset + offset);
		pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
	} else {
		ttm = bo->ttm;
		m = ttm->pages[OFF_TO_IDX(offset)];
		if (unlikely(!m)) {
			retval = VM_PAGER_ERROR;
			goto out_io_unlock;
		}
		pmap_page_set_memattr(m,
		    (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
		    VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
	}

	VM_OBJECT_WLOCK(vm_obj);
	if ((m->flags & PG_BUSY) != 0) {
#if 0
		vm_page_sleep(m, "ttmpbs");
#endif
		ttm_mem_io_unlock(man);
		ttm_bo_unreserve(bo);
		goto retry;
	}
	m->valid = VM_PAGE_BITS_ALL;
	*mres = m;
	m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
	if (m1 == NULL) {
		vm_page_insert(m, vm_obj, OFF_TO_IDX(offset));
	} else {
		KASSERT(m == m1,
		    ("inconsistent insert bo %p m %p m1 %p offset %jx",
		    bo, m, m1, (uintmax_t)offset));
	}
	vm_page_busy_try(m, FALSE);

	if (oldm != NULL) {
		vm_page_free(oldm);
	}

out_io_unlock1:
	ttm_mem_io_unlock(man);
out_unlock1:
	ttm_bo_unreserve(bo);
	vm_object_pip_wakeup(vm_obj);
	return (retval);

out_io_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_io_unlock1;

out_unlock:
	VM_OBJECT_WLOCK(vm_obj);
	goto out_unlock1;
}
Ejemplo n.º 4
0
static int
tmpfs_mappedread(vm_object_t vobj, vm_object_t tobj, size_t len, struct uio *uio)
{
	struct sf_buf	*sf;
	vm_pindex_t	idx;
	vm_page_t	m;
	vm_offset_t	offset;
	off_t		addr;
	size_t		tlen;
	char		*ma;
	int		error;

	addr = uio->uio_offset;
	idx = OFF_TO_IDX(addr);
	offset = addr & PAGE_MASK;
	tlen = MIN(PAGE_SIZE - offset, len);

	if ((vobj == NULL) ||
	    (vobj->resident_page_count == 0 && vobj->cache == NULL))
		goto nocache;

	VM_OBJECT_LOCK(vobj);
lookupvpg:
	if (((m = vm_page_lookup(vobj, idx)) != NULL) &&
	    vm_page_is_valid(m, offset, tlen)) {
		if ((m->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(m);
			vm_page_sleep(m, "tmfsmr");
			goto lookupvpg;
		}
		vm_page_busy(m);
		VM_OBJECT_UNLOCK(vobj);
		error = uiomove_fromphys(&m, offset, tlen, uio);
		VM_OBJECT_LOCK(vobj);
		vm_page_wakeup(m);
		VM_OBJECT_UNLOCK(vobj);
		return	(error);
	} else if (m != NULL && uio->uio_segflg == UIO_NOCOPY) {
		KASSERT(offset == 0,
		    ("unexpected offset in tmpfs_mappedread for sendfile"));
		if ((m->oflags & VPO_BUSY) != 0) {
			/*
			 * Reference the page before unlocking and sleeping so
			 * that the page daemon is less likely to reclaim it.  
			 */
			vm_page_reference(m);
			vm_page_sleep(m, "tmfsmr");
			goto lookupvpg;
		}
		vm_page_busy(m);
		VM_OBJECT_UNLOCK(vobj);
		sched_pin();
		sf = sf_buf_alloc(m, SFB_CPUPRIVATE);
		ma = (char *)sf_buf_kva(sf);
		error = tmpfs_nocacheread_buf(tobj, idx, 0, tlen, ma);
		if (error == 0) {
			if (tlen != PAGE_SIZE)
				bzero(ma + tlen, PAGE_SIZE - tlen);
			uio->uio_offset += tlen;
			uio->uio_resid -= tlen;
		}
		sf_buf_free(sf);
		sched_unpin();
		VM_OBJECT_LOCK(vobj);
		if (error == 0)
			m->valid = VM_PAGE_BITS_ALL;
		vm_page_wakeup(m);
		VM_OBJECT_UNLOCK(vobj);
		return	(error);
	}
	VM_OBJECT_UNLOCK(vobj);
nocache:
	error = tmpfs_nocacheread(tobj, idx, offset, tlen, uio);

	return	(error);
}