/* Called by vm_page_free to hint that a new page is available */ void vm_page_zero_idle_wakeup(void) { if (idlezero_enable && vm_page_zero_check()) wakeup(&zero_state); }
/* Called by vm_page_free to hint that a new page is available. */ void vm_page_zero_idle_wakeup(void) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (wakeup_needed && vm_page_zero_check()) { wakeup_needed = FALSE; wakeup(&zero_state); } }
static void vm_pagezero(void __unused *arg) { struct rtprio rtp; struct thread *td; int pages, pri; td = curthread; rtp.prio = RTP_PRIO_MAX; rtp.type = RTP_PRIO_IDLE; pages = 0; mtx_lock_spin(&sched_lock); rtp_to_pri(&rtp, td->td_ksegrp); pri = td->td_priority; mtx_unlock_spin(&sched_lock); idlezero_enable = idlezero_enable_default; for (;;) { if (vm_page_zero_check()) { pages += vm_page_zero_idle(); #ifndef PREEMPTION if (pages > idlezero_maxrun || sched_runnable()) { mtx_lock_spin(&sched_lock); mi_switch(SW_VOL, NULL); mtx_unlock_spin(&sched_lock); pages = 0; } #endif } else { vm_page_lock_queues(); wakeup_needed = TRUE; msleep(&zero_state, &vm_page_queue_mtx, PDROP | pri, "pgzero", hz * 300); pages = 0; } } }
static void vm_pagezero(void) { struct thread *td; struct proc *p; struct rtprio rtp; int pages = 0; int pri; td = curthread; p = td->td_proc; rtp.prio = RTP_PRIO_MAX; rtp.type = RTP_PRIO_IDLE; mtx_lock_spin(&sched_lock); rtp_to_pri(&rtp, td->td_ksegrp); pri = td->td_priority; mtx_unlock_spin(&sched_lock); PROC_LOCK(p); p->p_flag |= P_NOLOAD; PROC_UNLOCK(p); for (;;) { if (vm_page_zero_check()) { pages += vm_page_zero_idle(); if (pages > idlezero_maxrun || sched_runnable()) { mtx_lock_spin(&sched_lock); td->td_proc->p_stats->p_ru.ru_nvcsw++; mi_switch(); mtx_unlock_spin(&sched_lock); pages = 0; } } else { tsleep(&zero_state, pri, "pgzero", hz * 300); pages = 0; } } }
static void vm_pagezero(void __unused *arg) { idlezero_enable = idlezero_enable_default; mtx_lock(&vm_page_queue_free_mtx); for (;;) { if (vm_page_zero_check()) { vm_page_zero_idle(); #ifndef PREEMPTION if (sched_runnable()) { thread_lock(curthread); mi_switch(SW_VOL | SWT_IDLE, NULL); thread_unlock(curthread); } #endif } else { wakeup_needed = TRUE; msleep(&zero_state, &vm_page_queue_free_mtx, 0, "pgzero", hz * 300); } } }
/* * MPSAFE thread */ static void vm_pagezero(void *arg) { vm_page_t m = NULL; struct lwbuf *lwb = NULL; struct lwbuf lwb_cache; enum zeroidle_state state = STATE_IDLE; char *pg = NULL; int npages = 0; int sleep_time; int i = 0; int cpu = (int)(intptr_t)arg; int zero_state = 0; /* * Adjust thread parameters before entering our loop. The thread * is started with the MP lock held and with normal kernel thread * priority. * * Also put us on the last cpu for now. * * For now leave the MP lock held, the VM routines cannot be called * with it released until tokenization is finished. */ lwkt_setpri_self(TDPRI_IDLE_WORK); lwkt_setcpu_self(globaldata_find(cpu)); sleep_time = DEFAULT_SLEEP_TIME; /* * Loop forever */ for (;;) { int zero_count; switch(state) { case STATE_IDLE: /* * Wait for work. */ tsleep(&zero_state, 0, "pgzero", sleep_time); if (vm_page_zero_check(&zero_count, &zero_state)) npages = idlezero_rate / 10; sleep_time = vm_page_zero_time(zero_count); if (npages) state = STATE_GET_PAGE; /* Fallthrough */ break; case STATE_GET_PAGE: /* * Acquire page to zero */ if (--npages == 0) { state = STATE_IDLE; } else { m = vm_page_free_fromq_fast(); if (m == NULL) { state = STATE_IDLE; } else { state = STATE_ZERO_PAGE; lwb = lwbuf_alloc(m, &lwb_cache); pg = (char *)lwbuf_kva(lwb); i = 0; } } break; case STATE_ZERO_PAGE: /* * Zero-out the page */ while (i < PAGE_SIZE) { if (idlezero_nocache == 1) bzeront(&pg[i], IDLEZERO_RUN); else bzero(&pg[i], IDLEZERO_RUN); i += IDLEZERO_RUN; lwkt_yield(); } state = STATE_RELEASE_PAGE; break; case STATE_RELEASE_PAGE: lwbuf_free(lwb); vm_page_flag_set(m, PG_ZERO); vm_page_free_toq(m); state = STATE_GET_PAGE; ++idlezero_count; /* non-locked, SMP race ok */ break; } lwkt_yield(); } }