Esempio n. 1
0
	/* only swap out if there are at least min_active_tasks */
	if (nactive < min_active_tasks) {
		if (target_task != TASK_NULL) {
			task_deallocate(target_task);
			target_task = TASK_NULL;
		}
	}
	return(target_task);
}

#if	TASK_SW_DEBUG
void print_pid(task_t task, unsigned long n1, unsigned long n2,
	       const char *comp, const char *inout);	/* forward */
void
print_pid(
	task_t task,
	unsigned long n1,
	unsigned long n2,
	const char *comp,
	const char *inout)
{
	long rescount;
	task_lock(task);
	rescount = pmap_resident_count(task->map->pmap);
	task_unlock(task);
	printf("task_swapper: swapped %s task %x; %d %s %d; res=%d\n",
		inout, task, n1, comp, n2, rescount);
}
Esempio n. 2
0
/*
 * Update statistics after fault resolution.
 * - maxrss
 */
void
uvmfault_update_stats(struct uvm_faultinfo *ufi)
{
	struct vm_map		*map;
	struct proc		*p;
	vsize_t			 res;
#ifndef pmap_resident_count
	struct vm_space		*vm;
#endif

	map = ufi->orig_map;

	/*
	 * Update the maxrss for the process.
	 */
	if (map->flags & VM_MAP_ISVMSPACE) {
		p = curproc;
		KASSERT(p != NULL && &p->p_vmspace->vm_map == map);

#ifdef pmap_resident_count
		res = pmap_resident_count(map->pmap);
#else
		/*
		 * Rather inaccurate, but this is the current anon size
		 * of the vmspace.  It's basically the resident size
		 * minus the mmapped in files/text.
		 */
		vm = (struct vmspace*)map;
		res = vm->dsize;
#endif

		/* Convert res from pages to kilobytes. */
		res <<= (PAGE_SHIFT - 10);

		if (p->p_ru.ru_maxrss < res)
			p->p_ru.ru_maxrss = res;
	}
}
Esempio n. 3
0
/*
 * Return an array of virtual pages that are mapped to a task.
 */
kern_return_t
vm32_mapped_pages_info(
	__DEBUG_ONLY vm_map_t			map,
	__DEBUG_ONLY page_address_array_t	*pages,
	__DEBUG_ONLY mach_msg_type_number_t	*pages_count)
{
#if !MACH_VM_DEBUG
        return KERN_FAILURE;
#else
	pmap_t		pmap;
	vm_size_t	size, size_used;
	unsigned int	actual, space;
	page_address_array_t list;
	vm_offset_t	addr = 0;

	if (map == VM_MAP_NULL)
	    return (KERN_INVALID_ARGUMENT);

	pmap = map->pmap;
	size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
	size = vm_map_round_page(size,
				 VM_MAP_PAGE_MASK(ipc_kernel_map));

	for (;;) {
	    (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
	    (void) vm_map_unwire(
		    ipc_kernel_map,
		    vm_map_trunc_page(addr,
				      VM_MAP_PAGE_MASK(ipc_kernel_map)),
		    vm_map_round_page(addr + size,
				      VM_MAP_PAGE_MASK(ipc_kernel_map)),
		    FALSE);

	    list = (page_address_array_t) addr;
	    space = (unsigned int) (size / sizeof(vm_offset_t));

	    actual = pmap_list_resident_pages(pmap,
					list,
					space);
	    if (actual <= space)
		break;

	    /*
	     * Free memory if not enough
	     */
	    (void) kmem_free(ipc_kernel_map, addr, size);

	    /*
	     * Try again, doubling the size
	     */
	    size = vm_map_round_page(actual * sizeof(vm_offset_t),
				     VM_MAP_PAGE_MASK(ipc_kernel_map));
	}
	if (actual == 0) {
	    *pages = 0;
	    *pages_count = 0;
	    (void) kmem_free(ipc_kernel_map, addr, size);
	}
	else {
	    vm_size_t vmsize_used;
	    *pages_count = actual;
	    size_used = (actual * sizeof(vm_offset_t));
	    vmsize_used = vm_map_round_page(size_used,
					    VM_MAP_PAGE_MASK(ipc_kernel_map));
	    (void) vm_map_wire(
		    ipc_kernel_map,
		    vm_map_trunc_page(addr,
				      VM_MAP_PAGE_MASK(ipc_kernel_map)),
		    vm_map_round_page(addr + size,
				      VM_MAP_PAGE_MASK(ipc_kernel_map)), 
		    VM_PROT_READ|VM_PROT_WRITE,
		    FALSE);
	    (void) vm_map_copyin(ipc_kernel_map,
				(vm_map_address_t)addr,
				(vm_map_size_t)size_used,
				TRUE,
				(vm_map_copy_t *)pages);
	    if (vmsize_used != size) {
		(void) kmem_free(ipc_kernel_map,
				addr + vmsize_used,
				size - vmsize_used);
	    }
	}

	return (KERN_SUCCESS);
#endif /* MACH_VM_DEBUG */
}
Esempio n. 4
0
task_t
pick_outtask(void)
{
	register task_t		task;
	register task_t		target_task = TASK_NULL;
	unsigned long		task_rss;
	unsigned long		target_rss = 0;
	boolean_t		wired;
	boolean_t		active;
	int			nactive = 0;

	task_swapout_lock();
	if (queue_empty(&eligible_tasks)) {
		/* not likely to happen */
		task_swapout_unlock();
		return(TASK_NULL);
	}
	task = (task_t)queue_first(&eligible_tasks);
	while (!queue_end(&eligible_tasks, (queue_entry_t)task)) {
		int s;
		register thread_act_t thr_act;
		thread_t th;
		

		task_lock(task);
#if	MACH_RT
		/*
		 * Don't swap real-time tasks.
		 * XXX Should we enforce that or can we let really critical
		 * tasks use task_swappable() to make sure they never end up
		 * n the eligible list ?
		 */
		if (task->policy & POLICYCLASS_FIXEDPRI) {
			goto tryagain;
		}
#endif	/* MACH_RT */
		if (!task->active) {
			TASK_STATS_INCR(inactive_task_count);
			goto tryagain;
		}
		if (task->res_act_count == 0) {
			TASK_STATS_INCR(empty_task_count);
			goto tryagain;
		}
		assert(!queue_empty(&task->thr_acts));
		thr_act = (thread_act_t)queue_first(&task->thr_acts);
		active = FALSE;
		th = act_lock_thread(thr_act);
		s = splsched();
		if (th != THREAD_NULL)
			thread_lock(th);
		if ((th == THREAD_NULL) ||
		    (th->state == TH_RUN) ||
		    (th->state & TH_WAIT)) {
			/*
		 	 * thread is "active": either runnable 
			 * or sleeping.  Count it and examine 
			 * it further below.
	 		 */
			nactive++;
			active = TRUE;
		}
		if (th != THREAD_NULL)
			thread_unlock(th);
		splx(s);
		act_unlock_thread(thr_act);
		if (active &&
		    (task->swap_state == TASK_SW_IN) &&
		    ((sched_tick - task->swap_stamp) > min_res_time)) {
			long rescount = pmap_resident_count(task->map->pmap);
			/*
			 * thread must be "active", task must be swapped
			 * in and resident for at least min_res_time
			 */
#if 0
/* DEBUG Test round-robin strategy.  Picking biggest task could cause extreme
 * unfairness to such large interactive programs as xterm.  Instead, pick the
 * first task that has any pages resident:
 */
			if (rescount > 1) {
				task->ref_count++;
				target_task = task;
				task_unlock(task);
				task_swapout_unlock();
				return(target_task);
			}
#else
			if (rescount > target_rss) {
				/*
				 * task is not swapped, and it has the
				 * largest rss seen so far.
				 */
				task->ref_count++;
				target_rss = rescount;
				assert(target_task != task);
				if (target_task != TASK_NULL)
					task_deallocate(target_task);
				target_task = task;
			}
#endif
		}
tryagain:
		task_unlock(task);
		task = (task_t)queue_next(&task->swapped_tasks);
	}
	task_swapout_unlock();
	/* only swap out if there are at least min_active_tasks */
	if (nactive < min_active_tasks) {
		if (target_task != TASK_NULL) {
			task_deallocate(target_task);
			target_task = TASK_NULL;
		}
	}
	return(target_task);
}
Esempio n. 5
0
kern_return_t
task_swapin(task_t task, boolean_t make_unswappable)
{
	register queue_head_t	*list;
	register thread_act_t	thr_act, next;
	thread_t		thread;
	int			s;
	boolean_t		swappable = TRUE;

	task_lock(task);
	switch (task->swap_state) {
	    case TASK_SW_OUT:
			{
			vm_map_t map = task->map;
			/*
			 * Task has made it all the way out, which means
			 * that vm_map_res_deallocate has been done; set 
			 * state to TASK_SW_COMING_IN, then bring map
			 * back in.  We could actually be racing with
			 * the thread_swapout_enqueue, which does the
			 * vm_map_res_deallocate, but that race is covered.
			 */
			task->swap_state = TASK_SW_COMING_IN;
			assert(task->swap_ast_waiting == 0);
			assert(map->res_count >= 0);
			task_unlock(task);
			mutex_lock(&map->s_lock);
			vm_map_res_reference(map);
			mutex_unlock(&map->s_lock);
			task_lock(task);
			assert(task->swap_state == TASK_SW_COMING_IN);
			}
			break;

	    case TASK_SW_GOING_OUT:
			/*
			 * Task isn't all the way out yet.  There is
			 * still at least one thread not swapped, and
			 * vm_map_res_deallocate has not been done.
			 */
			task->swap_state = TASK_SW_COMING_IN;
			assert(task->swap_ast_waiting > 0 ||
			       (task->swap_ast_waiting == 0 &&
				task->thr_act_count == 0));
			assert(task->map->res_count > 0);
			TASK_STATS_INCR(task_sw_race_going_out);
			break;
	    case TASK_SW_IN:
			assert(task->map->res_count > 0);
#if	TASK_SW_DEBUG
			task_swapper_lock();
			if (task_swap_debug && on_swapped_list(task)) {
				printf("task 0x%X on list, state is SW_IN\n",
					task);
				Debugger("");
			}
			task_swapper_unlock();
#endif	/* TASK_SW_DEBUG */
			TASK_STATS_INCR(task_sw_race_in);
			if (make_unswappable) {
				task->swap_state = TASK_SW_UNSWAPPABLE;
				task_unlock(task);
				task_swapout_ineligible(task);
			} else
				task_unlock(task);
			return(KERN_SUCCESS);
	    case TASK_SW_COMING_IN:
			/* 
			 * Raced with another task_swapin and lost;
			 * wait for other one to complete first
			 */
			assert(task->map->res_count >= 0);
			/*
			 * set MAKE_UNSWAPPABLE so that whoever is swapping
			 * the task in will make it unswappable, and return
			 */
			if (make_unswappable)
				task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE;
			task->swap_flags |= TASK_SW_WANT_IN;
			assert_wait((event_t)&task->swap_state, FALSE);
			task_unlock(task);
			thread_block((void (*)(void)) 0);
			TASK_STATS_INCR(task_sw_race_coming_in);
			return(KERN_SUCCESS);
	    case TASK_SW_UNSWAPPABLE:
			/* 
			 * This can happen, since task_terminate 
			 * unconditionally calls task_swapin.
			 */
			task_unlock(task);
			return(KERN_SUCCESS);
	    default:
			panic("task_swapin bad state");
			break;
	}
	if (make_unswappable)
		task->swap_flags |= TASK_SW_MAKE_UNSWAPPABLE;
	assert(task->swap_state == TASK_SW_COMING_IN);
	task_swapper_lock();
#if	TASK_SW_DEBUG
	if (task_swap_debug && !on_swapped_list(task)) {
		printf("task 0x%X not on list\n", task);
		Debugger("");
	}
#endif	/* TASK_SW_DEBUG */
	queue_remove(&swapped_tasks, task, task_t, swapped_tasks);
	tasks_swapped_out--;
	task_swapins++;
	task_swapper_unlock();

	/*
	 * Iterate through all threads for this task and 
	 * release them, as required.  They may not have been swapped
	 * out yet.  The task remains locked throughout.
	 */
	list = &task->thr_acts;
	thr_act = (thread_act_t) queue_first(list);
	while (!queue_end(list, (queue_entry_t) thr_act)) {
		boolean_t need_to_release;
		next = (thread_act_t) queue_next(&thr_act->thr_acts);
		/*
		 * Keep task_swapper_lock across thread handling
		 * to synchronize with task_swap_swapout_thread
		 */
		task_swapper_lock();
		thread = act_lock_thread(thr_act);
		s = splsched();
		if (thr_act->ast & AST_SWAPOUT) {
			/* thread hasn't gotten the AST yet, just clear it */
			thread_ast_clear(thr_act, AST_SWAPOUT);
			need_to_release = FALSE;
			TASK_STATS_INCR(task_sw_before_ast);
			splx(s);
			act_unlock_thread(thr_act);
		} else {
			/*
			 * If AST_SWAPOUT was cleared, then thread_hold,
			 * or equivalent was done.
			 */
			need_to_release = TRUE;
			/*
			 * Thread has hit AST, but it may not have
			 * been dequeued yet, so we need to check.
			 * NOTE: the thread may have been dequeued, but
			 * has not yet been swapped (the task_swapper_lock
			 * has been dropped, but the thread is not yet
			 * locked), and the TH_SW_TASK_SWAPPING flag may 
			 * not have been cleared.  In this case, we will do 
			 * an extra remque, which the task_swap_swapout_thread
			 * has made safe, and clear the flag, which is also
			 * checked by the t_s_s_t before doing the swapout.
			 */
			if (thread)
				thread_lock(thread);
			if (thr_act->swap_state & TH_SW_TASK_SWAPPING) {
				/* 
				 * hasn't yet been dequeued for swapout,
				 * so clear flags and dequeue it first.
				 */
				thr_act->swap_state &= ~TH_SW_TASK_SWAPPING;
				assert(thr_act->thread == THREAD_NULL || 
				       !(thr_act->thread->state &
					 TH_SWAPPED_OUT));
				queue_remove(&swapout_thread_q, thr_act,
					     thread_act_t, swap_queue);
				TASK_STATS_INCR(task_sw_before_swap);
			} else {
				TASK_STATS_INCR(task_sw_after_swap);
				/*
				 * It's possible that the thread was
				 * made unswappable before hitting the
				 * AST, in which case it's still running.
				 */
				if (thr_act->swap_state == TH_SW_UNSWAPPABLE) {
					need_to_release = FALSE;
					TASK_STATS_INCR(task_sw_unswappable);
				}
			}
			if (thread)
				thread_unlock(thread);
			splx(s);
			act_unlock_thread(thr_act);
		}
		task_swapper_unlock();

		/* 
		 * thread_release will swap in the thread if it's been
		 * swapped out.
		 */
		if (need_to_release) {
			act_lock_thread(thr_act);
			thread_release(thr_act);
			act_unlock_thread(thr_act);
		}
		thr_act = next;
	}

	if (task->swap_flags & TASK_SW_MAKE_UNSWAPPABLE) {
		task->swap_flags &= ~TASK_SW_MAKE_UNSWAPPABLE;
		task->swap_state = TASK_SW_UNSWAPPABLE;
		swappable = FALSE;
	} else {
		task->swap_state = TASK_SW_IN;
	}

	task_swaprss_in += pmap_resident_count(task->map->pmap);
	task_swap_total_time += sched_tick - task->swap_stamp;
	/* note when task came back in */
	task->swap_stamp = sched_tick;
	if (task->swap_flags & TASK_SW_WANT_IN) {
		task->swap_flags &= ~TASK_SW_WANT_IN;
		thread_wakeup((event_t)&task->swap_state);
	}
	assert((task->swap_flags & TASK_SW_ELIGIBLE) == 0);
	task_unlock(task);
#if	TASK_SW_DEBUG
	task_swapper_lock();
	if (task_swap_debug && on_swapped_list(task)) {
		printf("task 0x%X on list at end of swap in\n", task);
		Debugger("");
	}
	task_swapper_unlock();
#endif	/* TASK_SW_DEBUG */
	/*
	 * Make the task eligible to be swapped again
	 */
	if (swappable)
		task_swapout_eligible(task);
	return(KERN_SUCCESS);
}
Esempio n. 6
0
/*
 *	task_swapout:
 * 	A reference to the task must be held.
 *
 *	Start swapping out a task by sending an AST_SWAPOUT to each thread.
 *	When the threads reach a clean point, they queue themselves up on the
 *	swapout_thread_q to be swapped out by the task_swap_swapout_thread.
 *	The task can be swapped in at any point in this process.
 *
 *	A task will not be fully swapped out (i.e. its map residence count
 *	at zero) until all currently-swapped threads run and reach
 *	a clean point, at which time they will be swapped again,
 *	decrementing the swap_ast_waiting count on the task.
 *
 *	Locking: no locks held upon entry and exit.
 *		 Task_lock is held throughout this function.
 */
kern_return_t
task_swapout(task_t task)
{
	thread_act_t thr_act;
	thread_t thread;
	queue_head_t *list;
	int s;

	task_swapout_lock();
	task_lock(task);
	/*
	 * NOTE: look into turning these into assertions if they
	 * are invariants.
	 */
	if ((task->swap_state != TASK_SW_IN) || (!task->active)) {
		task_unlock(task);
		task_swapout_unlock();
		return(KERN_FAILURE);
	}
	if (task->swap_flags & TASK_SW_ELIGIBLE) {
		queue_remove(&eligible_tasks, task, task_t, swapped_tasks);
		task->swap_flags &= ~TASK_SW_ELIGIBLE;
	}
	task_swapout_unlock();

	/* set state to avoid races with task_swappable(FALSE) */
	task->swap_state = TASK_SW_GOING_OUT;
	task->swap_rss = pmap_resident_count(task->map->pmap);
	task_swaprss_out += task->swap_rss;
	task->swap_ast_waiting = task->thr_act_count;

	/*
	 * halt all threads in this task:
	 * We don't need the thread list lock for traversal.
	 */
	list = &task->thr_acts;
	thr_act = (thread_act_t) queue_first(list);
	while (!queue_end(list, (queue_entry_t) thr_act)) {
		boolean_t swappable;
		thread_act_t ract;

		thread = act_lock_thread(thr_act);
		s = splsched();
		if (!thread)
			swappable = (thr_act->swap_state != TH_SW_UNSWAPPABLE);
		else {
			thread_lock(thread);
			swappable = TRUE;
			for (ract = thread->top_act; ract; ract = ract->lower)
				if (ract->swap_state == TH_SW_UNSWAPPABLE) {
					swappable = FALSE;
					break;
				}
		}
		if (swappable)
			thread_ast_set(thr_act, AST_SWAPOUT);
		if (thread)
			thread_unlock(thread);
		splx(s);
		assert((thr_act->ast & AST_TERMINATE) == 0);
		act_unlock_thread(thr_act);
		thr_act = (thread_act_t) queue_next(&thr_act->thr_acts);
	}

	task->swap_stamp = sched_tick;
	task->swap_nswap++;
	assert((task->swap_flags&TASK_SW_WANT_IN) == 0);
	/* put task on the queue of swapped out tasks */
	task_swapper_lock();
#if	TASK_SW_DEBUG
	if (task_swap_debug && on_swapped_list(task)) {
		printf("task 0x%X already on list\n", task);
		Debugger("");
	}
#endif	/* TASK_SW_DEBUG */
	queue_enter(&swapped_tasks, task, task_t, swapped_tasks);
	tasks_swapped_out++;
	task_swapouts++;
	task_swapper_unlock();
	task_unlock(task);

	return(KERN_SUCCESS);
}
Esempio n. 7
0
/*
 * Read proc's from memory file into buffer bp, which has space to hold
 * at most maxcnt procs.
 */
static int
kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p,
    struct kinfo_proc *bp, int maxcnt)
{
	int cnt = 0;
	struct kinfo_proc kinfo_proc, *kp;
	struct pgrp pgrp;
	struct session sess;
	struct cdev t_cdev;
	struct tty tty;
	struct vmspace vmspace;
	struct sigacts sigacts;
#if 0
	struct pstats pstats;
#endif
	struct ucred ucred;
	struct prison pr;
	struct thread mtd;
	struct proc proc;
	struct proc pproc;
	struct sysentvec sysent;
	char svname[KI_EMULNAMELEN];

	kp = &kinfo_proc;
	kp->ki_structsize = sizeof(kinfo_proc);
	/*
	 * Loop on the processes. this is completely broken because we need to be
	 * able to loop on the threads and merge the ones that are the same process some how.
	 */
	for (; cnt < maxcnt && p != NULL; p = LIST_NEXT(&proc, p_list)) {
		memset(kp, 0, sizeof *kp);
		if (KREAD(kd, (u_long)p, &proc)) {
			_kvm_err(kd, kd->program, "can't read proc at %p", p);
			return (-1);
		}
		if (proc.p_state == PRS_NEW)
			continue;
		if (proc.p_state != PRS_ZOMBIE) {
			if (KREAD(kd, (u_long)TAILQ_FIRST(&proc.p_threads),
			    &mtd)) {
				_kvm_err(kd, kd->program,
				    "can't read thread at %p",
				    TAILQ_FIRST(&proc.p_threads));
				return (-1);
			}
		}
		if (KREAD(kd, (u_long)proc.p_ucred, &ucred) == 0) {
			kp->ki_ruid = ucred.cr_ruid;
			kp->ki_svuid = ucred.cr_svuid;
			kp->ki_rgid = ucred.cr_rgid;
			kp->ki_svgid = ucred.cr_svgid;
			kp->ki_cr_flags = ucred.cr_flags;
			if (ucred.cr_ngroups > KI_NGROUPS) {
				kp->ki_ngroups = KI_NGROUPS;
				kp->ki_cr_flags |= KI_CRF_GRP_OVERFLOW;
			} else
				kp->ki_ngroups = ucred.cr_ngroups;
			kvm_read(kd, (u_long)ucred.cr_groups, kp->ki_groups,
			    kp->ki_ngroups * sizeof(gid_t));
			kp->ki_uid = ucred.cr_uid;
			if (ucred.cr_prison != NULL) {
				if (KREAD(kd, (u_long)ucred.cr_prison, &pr)) {
					_kvm_err(kd, kd->program,
					    "can't read prison at %p",
					    ucred.cr_prison);
					return (-1);
				}
				kp->ki_jid = pr.pr_id;
			}
		}

		switch(what & ~KERN_PROC_INC_THREAD) {

		case KERN_PROC_GID:
			if (kp->ki_groups[0] != (gid_t)arg)
				continue;
			break;

		case KERN_PROC_PID:
			if (proc.p_pid != (pid_t)arg)
				continue;
			break;

		case KERN_PROC_RGID:
			if (kp->ki_rgid != (gid_t)arg)
				continue;
			break;

		case KERN_PROC_UID:
			if (kp->ki_uid != (uid_t)arg)
				continue;
			break;

		case KERN_PROC_RUID:
			if (kp->ki_ruid != (uid_t)arg)
				continue;
			break;
		}
		/*
		 * We're going to add another proc to the set.  If this
		 * will overflow the buffer, assume the reason is because
		 * nprocs (or the proc list) is corrupt and declare an error.
		 */
		if (cnt >= maxcnt) {
			_kvm_err(kd, kd->program, "nprocs corrupt");
			return (-1);
		}
		/*
		 * gather kinfo_proc
		 */
		kp->ki_paddr = p;
		kp->ki_addr = 0;	/* XXX uarea */
		/* kp->ki_kstack = proc.p_thread.td_kstack; XXXKSE */
		kp->ki_args = proc.p_args;
		kp->ki_tracep = proc.p_tracevp;
		kp->ki_textvp = proc.p_textvp;
		kp->ki_fd = proc.p_fd;
		kp->ki_vmspace = proc.p_vmspace;
		if (proc.p_sigacts != NULL) {
			if (KREAD(kd, (u_long)proc.p_sigacts, &sigacts)) {
				_kvm_err(kd, kd->program,
				    "can't read sigacts at %p", proc.p_sigacts);
				return (-1);
			}
			kp->ki_sigignore = sigacts.ps_sigignore;
			kp->ki_sigcatch = sigacts.ps_sigcatch;
		}
#if 0
		if ((proc.p_flag & P_INMEM) && proc.p_stats != NULL) {
			if (KREAD(kd, (u_long)proc.p_stats, &pstats)) {
				_kvm_err(kd, kd->program,
				    "can't read stats at %x", proc.p_stats);
				return (-1);
			}
			kp->ki_start = pstats.p_start;

			/*
			 * XXX: The times here are probably zero and need
			 * to be calculated from the raw data in p_rux and
			 * p_crux.
			 */
			kp->ki_rusage = pstats.p_ru;
			kp->ki_childstime = pstats.p_cru.ru_stime;
			kp->ki_childutime = pstats.p_cru.ru_utime;
			/* Some callers want child-times in a single value */
			timeradd(&kp->ki_childstime, &kp->ki_childutime,
			    &kp->ki_childtime);
		}
#endif
		if (proc.p_oppid)
			kp->ki_ppid = proc.p_oppid;
		else if (proc.p_pptr) {
			if (KREAD(kd, (u_long)proc.p_pptr, &pproc)) {
				_kvm_err(kd, kd->program,
				    "can't read pproc at %p", proc.p_pptr);
				return (-1);
			}
			kp->ki_ppid = pproc.p_pid;
		} else
			kp->ki_ppid = 0;
		if (proc.p_pgrp == NULL)
			goto nopgrp;
		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
			_kvm_err(kd, kd->program, "can't read pgrp at %p",
				 proc.p_pgrp);
			return (-1);
		}
		kp->ki_pgid = pgrp.pg_id;
		kp->ki_jobc = pgrp.pg_jobc;
		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
			_kvm_err(kd, kd->program, "can't read session at %p",
				pgrp.pg_session);
			return (-1);
		}
		kp->ki_sid = sess.s_sid;
		(void)memcpy(kp->ki_login, sess.s_login,
						sizeof(kp->ki_login));
		kp->ki_kiflag = sess.s_ttyvp ? KI_CTTY : 0;
		if (sess.s_leader == p)
			kp->ki_kiflag |= KI_SLEADER;
		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
				_kvm_err(kd, kd->program,
					 "can't read tty at %p", sess.s_ttyp);
				return (-1);
			}
			if (tty.t_dev != NULL) {
				if (KREAD(kd, (u_long)tty.t_dev, &t_cdev)) {
					_kvm_err(kd, kd->program,
						 "can't read cdev at %p",
						tty.t_dev);
					return (-1);
				}
#if 0
				kp->ki_tdev = t_cdev.si_udev;
#else
				kp->ki_tdev = NODEV;
#endif
			}
			if (tty.t_pgrp != NULL) {
				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
					_kvm_err(kd, kd->program,
						 "can't read tpgrp at %p",
						tty.t_pgrp);
					return (-1);
				}
				kp->ki_tpgid = pgrp.pg_id;
			} else
				kp->ki_tpgid = -1;
			if (tty.t_session != NULL) {
				if (KREAD(kd, (u_long)tty.t_session, &sess)) {
					_kvm_err(kd, kd->program,
					    "can't read session at %p",
					    tty.t_session);
					return (-1);
				}
				kp->ki_tsid = sess.s_sid;
			}
		} else {
nopgrp:
			kp->ki_tdev = NODEV;
		}
		if ((proc.p_state != PRS_ZOMBIE) && mtd.td_wmesg)
			(void)kvm_read(kd, (u_long)mtd.td_wmesg,
			    kp->ki_wmesg, WMESGLEN);

		(void)kvm_read(kd, (u_long)proc.p_vmspace,
		    (char *)&vmspace, sizeof(vmspace));
		kp->ki_size = vmspace.vm_map.size;
		/*
		 * Approximate the kernel's method of calculating
		 * this field.
		 */
#define		pmap_resident_count(pm) ((pm)->pm_stats.resident_count)
		kp->ki_rssize = pmap_resident_count(&vmspace.vm_pmap);
		kp->ki_swrss = vmspace.vm_swrss;
		kp->ki_tsize = vmspace.vm_tsize;
		kp->ki_dsize = vmspace.vm_dsize;
		kp->ki_ssize = vmspace.vm_ssize;

		switch (what & ~KERN_PROC_INC_THREAD) {

		case KERN_PROC_PGRP:
			if (kp->ki_pgid != (pid_t)arg)
				continue;
			break;

		case KERN_PROC_SESSION:
			if (kp->ki_sid != (pid_t)arg)
				continue;
			break;

		case KERN_PROC_TTY:
			if ((proc.p_flag & P_CONTROLT) == 0 ||
			     kp->ki_tdev != (dev_t)arg)
				continue;
			break;
		}
		if (proc.p_comm[0] != 0)
			strlcpy(kp->ki_comm, proc.p_comm, MAXCOMLEN);
		(void)kvm_read(kd, (u_long)proc.p_sysent, (char *)&sysent,
		    sizeof(sysent));
		(void)kvm_read(kd, (u_long)sysent.sv_name, (char *)&svname,
		    sizeof(svname));
		if (svname[0] != 0)
			strlcpy(kp->ki_emul, svname, KI_EMULNAMELEN);
		if ((proc.p_state != PRS_ZOMBIE) &&
		    (mtd.td_blocked != 0)) {
			kp->ki_kiflag |= KI_LOCKBLOCK;
			if (mtd.td_lockname)
				(void)kvm_read(kd,
				    (u_long)mtd.td_lockname,
				    kp->ki_lockname, LOCKNAMELEN);
			kp->ki_lockname[LOCKNAMELEN] = 0;
		}
		kp->ki_runtime = cputick2usec(proc.p_rux.rux_runtime);
		kp->ki_pid = proc.p_pid;
		kp->ki_siglist = proc.p_siglist;
		SIGSETOR(kp->ki_siglist, mtd.td_siglist);
		kp->ki_sigmask = mtd.td_sigmask;
		kp->ki_xstat = KW_EXITCODE(proc.p_xexit, proc.p_xsig);
		kp->ki_acflag = proc.p_acflag;
		kp->ki_lock = proc.p_lock;
		if (proc.p_state != PRS_ZOMBIE) {
			kp->ki_swtime = (ticks - proc.p_swtick) / hz;
			kp->ki_flag = proc.p_flag;
			kp->ki_sflag = 0;
			kp->ki_nice = proc.p_nice;
			kp->ki_traceflag = proc.p_traceflag;
			if (proc.p_state == PRS_NORMAL) {
				if (TD_ON_RUNQ(&mtd) ||
				    TD_CAN_RUN(&mtd) ||
				    TD_IS_RUNNING(&mtd)) {
					kp->ki_stat = SRUN;
				} else if (mtd.td_state ==
				    TDS_INHIBITED) {
					if (P_SHOULDSTOP(&proc)) {
						kp->ki_stat = SSTOP;
					} else if (
					    TD_IS_SLEEPING(&mtd)) {
						kp->ki_stat = SSLEEP;
					} else if (TD_ON_LOCK(&mtd)) {
						kp->ki_stat = SLOCK;
					} else {
						kp->ki_stat = SWAIT;
					}
				}
			} else {
				kp->ki_stat = SIDL;
			}
			/* Stuff from the thread */
			kp->ki_pri.pri_level = mtd.td_priority;
			kp->ki_pri.pri_native = mtd.td_base_pri;
			kp->ki_lastcpu = mtd.td_lastcpu;
			kp->ki_wchan = mtd.td_wchan;
			kp->ki_oncpu = mtd.td_oncpu;
			if (mtd.td_name[0] != '\0')
				strlcpy(kp->ki_tdname, mtd.td_name, sizeof(kp->ki_tdname));
			kp->ki_pctcpu = 0;
			kp->ki_rqindex = 0;

			/*
			 * Note: legacy fields; wraps at NO_CPU_OLD or the
			 * old max CPU value as appropriate
			 */
			if (mtd.td_lastcpu == NOCPU)
				kp->ki_lastcpu_old = NOCPU_OLD;
			else if (mtd.td_lastcpu > MAXCPU_OLD)
				kp->ki_lastcpu_old = MAXCPU_OLD;
			else
				kp->ki_lastcpu_old = mtd.td_lastcpu;

			if (mtd.td_oncpu == NOCPU)
				kp->ki_oncpu_old = NOCPU_OLD;
			else if (mtd.td_oncpu > MAXCPU_OLD)
				kp->ki_oncpu_old = MAXCPU_OLD;
			else
				kp->ki_oncpu_old = mtd.td_oncpu;
		} else {
			kp->ki_stat = SZOMB;
		}
		kp->ki_tdev_freebsd11 = kp->ki_tdev; /* truncate */
		bcopy(&kinfo_proc, bp, sizeof(kinfo_proc));
		++bp;
		++cnt;
	}
	return (cnt);
}