コード例 #1
0
ファイル: vm_glue.c プロジェクト: kusumi/DragonFlyBSD
static void
scheduler(void *dummy)
{
	struct scheduler_info info;
	struct proc *p;

	KKASSERT(!IN_CRITICAL_SECT(curthread));
loop:
	scheduler_notify = 0;
	/*
	 * Don't try to swap anything in if we are low on memory.
	 */
	if (vm_page_count_severe()) {
		vm_wait(0);
		goto loop;
	}

	/*
	 * Look for a good candidate to wake up
	 *
	 * XXX we should make the schedule thread pcpu and then use a
	 * segmented allproc scan.
	 */
	info.pp = NULL;
	info.ppri = INT_MIN;
	allproc_scan(scheduler_callback, &info, 0);

	/*
	 * Nothing to do, back to sleep for at least 1/10 of a second.  If
	 * we are woken up, immediately process the next request.  If
	 * multiple requests have built up the first is processed 
	 * immediately and the rest are staggered.
	 */
	if ((p = info.pp) == NULL) {
		tsleep(&proc0, 0, "nowork", hz / 10);
		if (scheduler_notify == 0)
			tsleep(&scheduler_notify, 0, "nowork", 0);
		goto loop;
	}

	/*
	 * Fault the selected process in, then wait for a short period of
	 * time and loop up.
	 *
	 * XXX we need a heuristic to get a measure of system stress and
	 * then adjust our stagger wakeup delay accordingly.
	 */
	lwkt_gettoken(&p->p_token);
	faultin(p);
	p->p_swtime = 0;
	lwkt_reltoken(&p->p_token);
	PRELE(p);
	tsleep(&proc0, 0, "swapin", hz / 10);
	goto loop;
}
コード例 #2
0
ファイル: vm_glue.c プロジェクト: mihaicarabas/dragonfly
/*
 * Implement fork's actions on an address space.
 * Here we arrange for the address space to be copied or referenced,
 * allocate a user struct (pcb and kernel stack), then call the
 * machine-dependent layer to fill those in and make the new process
 * ready to run.  The new process is set up so that it returns directly
 * to user mode to avoid stack copying and relocation problems.
 *
 * No requirements.
 */
void
vm_fork(struct proc *p1, struct proc *p2, int flags)
{
	if ((flags & RFPROC) == 0) {
		/*
		 * Divorce the memory, if it is shared, essentially
		 * this changes shared memory amongst threads, into
		 * COW locally.
		 */
		if ((flags & RFMEM) == 0) {
			if (p1->p_vmspace->vm_sysref.refcnt > 1) {
				vmspace_unshare(p1);
			}
		}
		cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags);
		return;
	}

	if (flags & RFMEM) {
		vmspace_ref(p1->p_vmspace);
		p2->p_vmspace = p1->p_vmspace;
	}

	while (vm_page_count_severe()) {
		vm_wait(0);
	}

	if ((flags & RFMEM) == 0) {
		p2->p_vmspace = vmspace_fork(p1->p_vmspace);

		pmap_pinit2(vmspace_pmap(p2->p_vmspace));

		if (p1->p_vmspace->vm_shm)
			shmfork(p1, p2);
	}

	pmap_init_proc(p2);
}