Exemple #1
0
/*
 * The caller must hold p->p_token
 */
static void
swapout(struct proc *p)
{
#ifdef INVARIANTS
	if (swap_debug)
		kprintf("swapping out %d (%s)\n", p->p_pid, p->p_comm);
#endif
	++p->p_ru.ru_nswap;

	/*
	 * remember the process resident count
	 */
	p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace);
	p->p_flags |= P_SWAPPEDOUT;
	p->p_swtime = 0;
}
Exemple #2
0
static int
scheduler_callback(struct proc *p, void *data)
{
	struct scheduler_info *info = data;
	struct lwp *lp;
	segsz_t pgs;
	int pri;

	if (p->p_flags & P_SWAPWAIT) {
		pri = 0;
		FOREACH_LWP_IN_PROC(lp, p) {
			/* XXX lwp might need a different metric */
			pri += lp->lwp_slptime;
		}
		pri += p->p_swtime - p->p_nice * 8;

		/*
		 * The more pages paged out while we were swapped,
		 * the more work we have to do to get up and running
		 * again and the lower our wakeup priority.
		 *
		 * Each second of sleep time is worth ~1MB
		 */
		lwkt_gettoken(&p->p_vmspace->vm_map.token);
		pgs = vmspace_resident_count(p->p_vmspace);
		if (pgs < p->p_vmspace->vm_swrss) {
			pri -= (p->p_vmspace->vm_swrss - pgs) /
				(1024 * 1024 / PAGE_SIZE);
		}
		lwkt_reltoken(&p->p_vmspace->vm_map.token);

		/*
		 * If this process is higher priority and there is
		 * enough space, then select this process instead of
		 * the previous selection.
		 */
		if (pri > info->ppri) {
			if (info->pp)
				PRELE(info->pp);
			PHOLD(p);
			info->pp = p;
			info->ppri = pri;
		}
	}
Exemple #3
0
/*
* Original vm_pageout_oom, will be called if LRU pageout_oom will fail
*/
static void
original_vm_pageout_oom(int shortage)
{
	struct proc *p, *bigproc;
	vm_offset_t size, bigsize;
	struct thread *td;
	struct vmspace *vm;

	/*
	 * We keep the process bigproc locked once we find it to keep anyone
	 * from messing with it; however, there is a possibility of
	 * deadlock if process B is bigproc and one of it's child processes
	 * attempts to propagate a signal to B while we are waiting for A's
	 * lock while walking this list.  To avoid this, we don't block on
	 * the process lock but just skip a process if it is already locked.
	 */
	bigproc = NULL;
	bigsize = 0;
	sx_slock(&allproc_lock);
	FOREACH_PROC_IN_SYSTEM(p) {
		int breakout;

		if (PROC_TRYLOCK(p) == 0)
			continue;
		/*
		 * If this is a system, protected or killed process, skip it.
		 */
		if (p->p_state != PRS_NORMAL ||
		    (p->p_flag & (P_INEXEC | P_PROTECTED | P_SYSTEM)) ||
		    (p->p_pid == 1) || P_KILLED(p) ||
		    ((p->p_pid < 48) && (swap_pager_avail != 0))) {
			PROC_UNLOCK(p);
			continue;
		}
		/*
		 * If the process is in a non-running type state,
		 * don't touch it.  Check all the threads individually.
		 */
		breakout = 0;
		FOREACH_THREAD_IN_PROC(p, td) {
			thread_lock(td);
			if (!TD_ON_RUNQ(td) &&
			    !TD_IS_RUNNING(td) &&
			    !TD_IS_SLEEPING(td)) {
				thread_unlock(td);
				breakout = 1;
				break;
			}
			thread_unlock(td);
		}
		if (breakout) {
			PROC_UNLOCK(p);
			continue;
		}
		/*
		 * get the process size
		 */
		vm = vmspace_acquire_ref(p);
		if (vm == NULL) {
			PROC_UNLOCK(p);
			continue;
		}
		if (!vm_map_trylock_read(&vm->vm_map)) {
			vmspace_free(vm);
			PROC_UNLOCK(p);
			continue;
		}
		size = vmspace_swap_count(vm);
		vm_map_unlock_read(&vm->vm_map);
		if (shortage == VM_OOM_MEM)
			size += vmspace_resident_count(vm);
		vmspace_free(vm);
		/*
		 * if the this process is bigger than the biggest one
		 * remember it.
		 */
		if (size > bigsize) {
			if (bigproc != NULL)
				PROC_UNLOCK(bigproc);
			bigproc = p;
			bigsize = size;
		} else
			PROC_UNLOCK(p);
	}
Exemple #4
0
/*
 * Process only has its hold count bumped, we need the token
 * to safely scan the LWPs
 */
static int
scheduler_callback(struct proc *p, void *data)
{
	struct scheduler_info *info = data;
	struct vmspace *vm;
	struct lwp *lp;
	segsz_t pgs;
	int pri;

	/*
	 * We only care about processes in swap-wait.  Interlock test with
	 * token if the flag is found set.
	 */
	if ((p->p_flags & P_SWAPWAIT) == 0)
		return 0;
	lwkt_gettoken_shared(&p->p_token);
	if ((p->p_flags & P_SWAPWAIT) == 0) {
		lwkt_reltoken(&p->p_token);
		return 0;
	}

	/*
	 * Calculate priority for swap-in
	 */
	pri = 0;
	FOREACH_LWP_IN_PROC(lp, p) {
		/* XXX lwp might need a different metric */
		pri += lp->lwp_slptime;
	}
	pri += p->p_swtime - p->p_nice * 8;

	/*
	 * The more pages paged out while we were swapped,
	 * the more work we have to do to get up and running
	 * again and the lower our wakeup priority.
	 *
	 * Each second of sleep time is worth ~1MB
	 */
	if ((vm = p->p_vmspace) != NULL) {
		vmspace_hold(vm);
		pgs = vmspace_resident_count(vm);
		if (pgs < vm->vm_swrss) {
			pri -= (vm->vm_swrss - pgs) /
			       (1024 * 1024 / PAGE_SIZE);
		}
		vmspace_drop(vm);
	}
	lwkt_reltoken(&p->p_token);

	/*
	 * If this process is higher priority and there is
	 * enough space, then select this process instead of
	 * the previous selection.
	 */
	if (pri > info->ppri) {
		if (info->pp)
			PRELE(info->pp);
		PHOLD(p);
		info->pp = p;
		info->ppri = pri;
	}
	return(0);
}
Exemple #5
0
/*
 * Fill in a struct kinfo_proc.
 *
 * NOTE!  We may be asked to fill in kinfo_proc for a zombied process, and
 * the process may be in the middle of being deallocated.  Check all pointers
 * for NULL.
 *
 * Caller must hold p->p_token
 */
void
fill_kinfo_proc(struct proc *p, struct kinfo_proc *kp)
{
	struct session *sess;
	struct pgrp *pgrp;
	struct vmspace *vm;

	pgrp = p->p_pgrp;
	sess = pgrp ? pgrp->pg_session : NULL;

	bzero(kp, sizeof(*kp));

	kp->kp_paddr = (uintptr_t)p;
	kp->kp_fd = (uintptr_t)p->p_fd;

	kp->kp_flags = p->p_flags;
	kp->kp_stat = p->p_stat;
	kp->kp_lock = p->p_lock;
	kp->kp_acflag = p->p_acflag;
	kp->kp_traceflag = p->p_traceflag;
	kp->kp_siglist = p->p_siglist;
	if (p->p_sigacts) {
		kp->kp_sigignore = p->p_sigignore;	/* p_sigacts-> */
		kp->kp_sigcatch = p->p_sigcatch;	/* p_sigacts-> */
		kp->kp_sigflag = p->p_sigacts->ps_flag;
	}
	kp->kp_start = p->p_start;

	strncpy(kp->kp_comm, p->p_comm, sizeof(kp->kp_comm) - 1);
	kp->kp_comm[sizeof(kp->kp_comm) - 1] = 0;

	if (p->p_ucred) {
		kp->kp_uid = p->p_ucred->cr_uid;
		kp->kp_ngroups = p->p_ucred->cr_ngroups;
		if (p->p_ucred->cr_groups) {
			bcopy(p->p_ucred->cr_groups, kp->kp_groups,
			      NGROUPS * sizeof(kp->kp_groups[0]));
		}
		kp->kp_ruid = p->p_ucred->cr_ruid;
		kp->kp_svuid = p->p_ucred->cr_svuid;
		kp->kp_rgid = p->p_ucred->cr_rgid;
		kp->kp_svgid = p->p_ucred->cr_svgid;
	}

	kp->kp_pid = p->p_pid;
	if (p->p_oppid != 0)
		kp->kp_ppid = p->p_oppid;
	else
		kp->kp_ppid = p->p_pptr != NULL ? p->p_pptr->p_pid : -1;
	if (pgrp) {
		kp->kp_pgid = pgrp->pg_id;
		kp->kp_jobc = pgrp->pg_jobc;
	}
	if (sess) {
		kp->kp_sid = sess->s_sid;
		bcopy(sess->s_login, kp->kp_login, MAXLOGNAME);
		if (sess->s_ttyvp != NULL)
			kp->kp_auxflags |= KI_CTTY;
		if ((p->p_session != NULL) && SESS_LEADER(p))
			kp->kp_auxflags |= KI_SLEADER;
	}
	if (sess && (p->p_flags & P_CONTROLT) != 0 && sess->s_ttyp != NULL) {
		kp->kp_tdev = dev2udev(sess->s_ttyp->t_dev);
		if (sess->s_ttyp->t_pgrp != NULL)
			kp->kp_tpgid = sess->s_ttyp->t_pgrp->pg_id;
		else
			kp->kp_tpgid = -1;
		if (sess->s_ttyp->t_session != NULL)
			kp->kp_tsid = sess->s_ttyp->t_session->s_sid;
		else
			kp->kp_tsid = -1;
	} else {
		kp->kp_tdev = NOUDEV;
	}
	kp->kp_exitstat = p->p_xstat;
	kp->kp_nthreads = p->p_nthreads;
	kp->kp_nice = p->p_nice;
	kp->kp_swtime = p->p_swtime;

	if ((vm = p->p_vmspace) != NULL) {
#ifdef _KERNEL
		/*sysref_get(&vm->vm_sysref);*/
		/*lwkt_gettoken(&vm->vm_map.token);*/
#endif
		kp->kp_vm_map_size = vm->vm_map.size;
		kp->kp_vm_rssize = vmspace_resident_count(vm);
#ifdef _KERNEL
		/*XXX MP RACES */
		/*kp->kp_vm_prssize = vmspace_president_count(vm);*/
#endif
		kp->kp_vm_swrss = vm->vm_swrss;
		kp->kp_vm_tsize = vm->vm_tsize;
		kp->kp_vm_dsize = vm->vm_dsize;
		kp->kp_vm_ssize = vm->vm_ssize;
#ifdef _KERNEL
		/*lwkt_reltoken(&vm->vm_map.token);*/
		/*sysref_put(&vm->vm_sysref);*/
#endif
	}

	if (p->p_ucred && jailed(p->p_ucred))
		kp->kp_jailid = p->p_ucred->cr_prison->pr_id;

	kp->kp_ru = p->p_ru;
	kp->kp_cru = p->p_cru;
}