void swapin_thread(void) { stack_privilege(current_thread()); swapin_thread_continue(); /*NOTREACHED*/ }
/* * task_swap_swapout_thread: [exported] * * Executes as a separate kernel thread. * Its job is to swap out threads that have been halted by AST_SWAPOUT. */ void task_swap_swapout_thread(void) { thread_act_t thr_act; thread_t thread, nthread; task_t task; int s; thread_swappable(current_act(), FALSE); stack_privilege(current_thread()); spllo(); while (TRUE) { task_swapper_lock(); while (! queue_empty(&swapout_thread_q)) { queue_remove_first(&swapout_thread_q, thr_act, thread_act_t, swap_queue); /* * If we're racing with task_swapin, we need * to make it safe for it to do remque on the * thread, so make its links point to itself. * Allowing this ugliness is cheaper than * making task_swapin search the entire queue. */ act_lock(thr_act); queue_init((queue_t) &thr_act->swap_queue); act_unlock(thr_act); task_swapper_unlock(); /* * Wait for thread's RUN bit to be deasserted. */ thread = act_lock_thread(thr_act); if (thread == THREAD_NULL) act_unlock_thread(thr_act); else { boolean_t r; thread_reference(thread); thread_hold(thr_act); act_unlock_thread(thr_act); r = thread_stop_wait(thread); nthread = act_lock_thread(thr_act); thread_release(thr_act); thread_deallocate(thread); act_unlock_thread(thr_act); if (!r || nthread != thread) { task_swapper_lock(); continue; } } task = thr_act->task; task_lock(task); /* * we can race with swapin, which would set the * state to TASK_SW_IN. */ if ((task->swap_state != TASK_SW_OUT) && (task->swap_state != TASK_SW_GOING_OUT)) { task_unlock(task); task_swapper_lock(); TASK_STATS_INCR(task_sw_race_in_won); if (thread != THREAD_NULL) thread_unstop(thread); continue; } nthread = act_lock_thread(thr_act); if (nthread != thread || thr_act->active == FALSE) { act_unlock_thread(thr_act); task_unlock(task); task_swapper_lock(); TASK_STATS_INCR(task_sw_act_inactive); if (thread != THREAD_NULL) thread_unstop(thread); continue; } s = splsched(); if (thread != THREAD_NULL) thread_lock(thread); /* * Thread cannot have been swapped out yet because * TH_SW_TASK_SWAPPING was set in AST. If task_swapin * beat us here, we either wouldn't have found it on * the queue, or the task->swap_state would have * changed. The synchronization is on the * task's swap_state and the task_lock. * The thread can't be swapped in any other way * because its task has been swapped. */ assert(thr_act->swap_state & TH_SW_TASK_SWAPPING); assert(thread == THREAD_NULL || !(thread->state & (TH_SWAPPED_OUT|TH_RUN))); assert((thr_act->swap_state & TH_SW_STATE) == TH_SW_IN); /* assert(thread->state & TH_HALTED); */ /* this also clears TH_SW_TASK_SWAPPING flag */ thr_act->swap_state = TH_SW_GOING_OUT; if (thread != THREAD_NULL) { if (thread->top_act == thr_act) { thread->state |= TH_SWAPPED_OUT; /* * Once we unlock the task, things can happen * to the thread, so make sure it's consistent * for thread_swapout. */ } thread->ref_count++; thread_unlock(thread); thread_unstop(thread); } splx(s); act_locked_act_reference(thr_act); act_unlock_thread(thr_act); task_unlock(task); thread_swapout(thr_act); /* do the work */ if (thread != THREAD_NULL) thread_deallocate(thread); act_deallocate(thr_act); task_swapper_lock(); } assert_wait((event_t)&swapout_thread_q, FALSE); task_swapper_unlock(); thread_block((void (*)(void)) 0); } }
void task_swapper(void) { task_t outtask, intask; int timeout; int loopcnt = 0; boolean_t start_swapping; boolean_t stop_swapping; int local_page_free_avg; extern int hz; thread_swappable(current_act(), FALSE); stack_privilege(current_thread()); spllo(); for (;;) { local_page_free_avg = vm_page_free_avg; while (TRUE) { #if 0 if (task_swap_debug) printf("task_swapper: top of loop; cnt = %d\n",loopcnt); #endif intask = pick_intask(); start_swapping = ((vm_pageout_rate_avg > swap_start_pageout_rate) || (vm_grab_rate_avg > max_grab_rate)); stop_swapping = (vm_pageout_rate_avg < swap_stop_pageout_rate); /* * If a lot of paging is going on, or another task should come * in but memory is tight, find something to swap out and start * it. Don't swap any task out if task swapping is disabled. * vm_page_queue_free_lock protects the vm globals. */ outtask = TASK_NULL; if (start_swapping || (!stop_swapping && intask && ((local_page_free_avg / AVE_SCALE) < vm_page_free_target)) ) { if (task_swap_enable && (outtask = pick_outtask()) && (task_swapout(outtask) == KERN_SUCCESS)) { unsigned long rss; #if TASK_SW_DEBUG if (task_swap_debug) print_pid(outtask, local_page_free_avg / AVE_SCALE, vm_page_free_target, "<", "out"); #endif rss = outtask->swap_rss; if (outtask->swap_nswap == 1) rss /= 2; /* divide by 2 if never out */ local_page_free_avg += (rss/short_avg_interval) * AVE_SCALE; } if (outtask != TASK_NULL) task_deallocate(outtask); } /* * If there is an eligible task to bring in and there are at * least vm_page_free_target free pages, swap it in. If task * swapping has been disabled, bring the task in anyway. */ if (intask && ((local_page_free_avg / AVE_SCALE) >= vm_page_free_target || stop_swapping || !task_swap_enable)) { if (task_swapin(intask, FALSE) == KERN_SUCCESS) { unsigned long rss; #if TASK_SW_DEBUG if (task_swap_debug) print_pid(intask, local_page_free_avg / AVE_SCALE, vm_page_free_target, ">=", "in"); #endif rss = intask->swap_rss; if (intask->swap_nswap == 1) rss /= 2; /* divide by 2 if never out */ local_page_free_avg -= (rss/short_avg_interval) * AVE_SCALE; } } /* * XXX * Here we have to decide whether to continue swapping * in and/or out before sleeping. The decision should * be made based on the previous action (swapin/out) and * current system parameters, such as paging rates and * demand. * The function, compute_vm_averages, which does these * calculations, depends on being called every second, * so we can't just do the same thing. */ if (++loopcnt < MAX_LOOP) continue; /* * Arrange to be awakened if paging is still heavy or there are * any tasks partially or completely swapped out. (Otherwise, * the wakeup will come from the external trigger(s).) */ timeout = 0; if (start_swapping) timeout = task_swap_cycle_time; else { task_swapper_lock(); if (!queue_empty(&swapped_tasks)) timeout = min_swap_time; task_swapper_unlock(); } assert_wait((event_t)&swapped_tasks, FALSE); if (timeout) { if (task_swap_debug) printf("task_swapper: set timeout of %d\n", timeout); thread_set_timeout(timeout*hz); } if (task_swap_debug) printf("task_swapper: blocking\n"); thread_block((void (*)(void)) 0); if (timeout) { reset_timeout_check(¤t_thread()->timer); } /* reset locals */ loopcnt = 0; local_page_free_avg = vm_page_free_avg; } } }