Example #1
0
/*
 * p->p_token is held on entry.
 */
static int
procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio)
{
	int error;
	int writing;
	struct vmspace *vm;
	vm_map_t map;
	vm_offset_t pageno = 0;		/* page number */
	vm_prot_t reqprot;
	vm_offset_t kva;

	/*
	 * if the vmspace is in the midst of being allocated or deallocated,
	 * or the process is exiting, don't try to grab anything.  The
	 * page table usage in that process may be messed up.
	 */
	vm = p->p_vmspace;
	if (p->p_stat == SIDL || p->p_stat == SZOMB)
		return EFAULT;
	if ((p->p_flags & (P_WEXIT | P_INEXEC)) ||
	    sysref_isinactive(&vm->vm_sysref))
		return EFAULT;

	/*
	 * The map we want...
	 */
	vmspace_hold(vm);
	map = &vm->vm_map;

	writing = (uio->uio_rw == UIO_WRITE);
	reqprot = VM_PROT_READ;
	if (writing)
		reqprot |= VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE;

	kva = kmem_alloc_pageable(&kernel_map, PAGE_SIZE);

	/*
	 * Only map in one page at a time.  We don't have to, but it
	 * makes things easier.  This way is trivial - right?
	 */
	do {
		vm_offset_t uva;
		vm_offset_t page_offset;	/* offset into page */
		size_t len;
		vm_page_t m;

		uva = (vm_offset_t) uio->uio_offset;

		/*
		 * Get the page number of this segment.
		 */
		pageno = trunc_page(uva);
		page_offset = uva - pageno;

		/*
		 * How many bytes to copy
		 */
		len = szmin(PAGE_SIZE - page_offset, uio->uio_resid);

		/*
		 * Fault the page on behalf of the process
		 */
		m = vm_fault_page(map, pageno, reqprot,
				  VM_FAULT_NORMAL, &error);
		if (error) {
			KKASSERT(m == NULL);
			error = EFAULT;
			break;
		}

		/*
		 * Cleanup tmap then create a temporary KVA mapping and
		 * do the I/O.  We can switch between cpus so don't bother
		 * synchronizing across all cores.
		 */
		pmap_kenter_quick(kva, VM_PAGE_TO_PHYS(m));
		error = uiomove((caddr_t)(kva + page_offset), len, uio);
		pmap_kremove_quick(kva);

		/*
		 * release the page and we are done
		 */
		vm_page_unhold(m);
	} while (error == 0 && uio->uio_resid > 0);

	vmspace_drop(vm);
	kmem_free(&kernel_map, kva, PAGE_SIZE);

	return (error);
}
Example #2
0
/*
 * Process only has its hold count bumped, we need the token
 * to safely scan the LWPs
 */
static int
scheduler_callback(struct proc *p, void *data)
{
	struct scheduler_info *info = data;
	struct vmspace *vm;
	struct lwp *lp;
	segsz_t pgs;
	int pri;

	/*
	 * We only care about processes in swap-wait.  Interlock test with
	 * token if the flag is found set.
	 */
	if ((p->p_flags & P_SWAPWAIT) == 0)
		return 0;
	lwkt_gettoken_shared(&p->p_token);
	if ((p->p_flags & P_SWAPWAIT) == 0) {
		lwkt_reltoken(&p->p_token);
		return 0;
	}

	/*
	 * Calculate priority for swap-in
	 */
	pri = 0;
	FOREACH_LWP_IN_PROC(lp, p) {
		/* XXX lwp might need a different metric */
		pri += lp->lwp_slptime;
	}
	pri += p->p_swtime - p->p_nice * 8;

	/*
	 * The more pages paged out while we were swapped,
	 * the more work we have to do to get up and running
	 * again and the lower our wakeup priority.
	 *
	 * Each second of sleep time is worth ~1MB
	 */
	if ((vm = p->p_vmspace) != NULL) {
		vmspace_hold(vm);
		pgs = vmspace_resident_count(vm);
		if (pgs < vm->vm_swrss) {
			pri -= (vm->vm_swrss - pgs) /
			       (1024 * 1024 / PAGE_SIZE);
		}
		vmspace_drop(vm);
	}
	lwkt_reltoken(&p->p_token);

	/*
	 * If this process is higher priority and there is
	 * enough space, then select this process instead of
	 * the previous selection.
	 */
	if (pri > info->ppri) {
		if (info->pp)
			PRELE(info->pp);
		PHOLD(p);
		info->pp = p;
		info->ppri = pri;
	}
	return(0);
}