/* * This routine is called from the hardclock() (basically a FASTint/IPI) on * each cpu in the system. sc->curticks is this cpu's notion of the timebase. * It IS NOT NECESSARILY SYNCHRONIZED WITH 'ticks'! sc->softticks is where * the callwheel is currently indexed. * * WARNING! The MP lock is not necessarily held on call, nor can it be * safely obtained. * * sc->softticks is adjusted by either this routine or our helper thread * depending on whether the helper thread is running or not. */ void hardclock_softtick(globaldata_t gd) { softclock_pcpu_t sc; sc = &softclock_pcpu_ary[gd->gd_cpuid]; ++sc->curticks; if (sc->isrunning) return; if (sc->softticks == sc->curticks) { /* * in sync, only wakeup the thread if there is something to * do. */ if (TAILQ_FIRST(&sc->callwheel[sc->softticks & callwheelmask])) { sc->isrunning = 1; lwkt_schedule(&sc->thread); } else { ++sc->softticks; } } else { /* * out of sync, wakeup the thread unconditionally so it can * catch up. */ sc->isrunning = 1; lwkt_schedule(&sc->thread); } }
static void sched_ithd_intern(struct intr_info *info) { ++info->i_count; if (info->i_state != ISTATE_NOTHREAD) { if (info->i_reclist == NULL) { report_stray_interrupt(info, "sched_ithd"); } else { #ifdef SMP if (info->i_thread.td_gd == mycpu) { if (info->i_running == 0) { info->i_running = 1; if (info->i_state != ISTATE_LIVELOCKED) lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ } } else { lwkt_send_ipiq(info->i_thread.td_gd, sched_ithd_remote, info); } #else if (info->i_running == 0) { info->i_running = 1; if (info->i_state != ISTATE_LIVELOCKED) lwkt_schedule(&info->i_thread); /* MIGHT PREEMPT */ } #endif } } else { report_stray_interrupt(info, "sched_ithd"); } }
int kthread_create_cpu(void (*func)(void *), void *arg, struct thread **tdp, int cpu, const char *fmt, ...) { thread_t td; __va_list ap; td = lwkt_alloc_thread(NULL, LWKT_THREAD_STACK, cpu, TDF_VERBOSE); if (tdp) *tdp = td; cpu_set_thread_handler(td, kthread_exit, func, arg); /* * Set up arg0 for 'ps' etc */ __va_start(ap, fmt); kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); __va_end(ap); td->td_ucred = crhold(proc0.p_ucred); /* * Schedule the thread to run */ lwkt_schedule(td); return 0; }
/* * Same as kthread_create() but you can specify a custom stack size. */ int kthread_create_stk(void (*func)(void *), void *arg, struct thread **tdp, int stksize, const char *fmt, ...) { thread_t td; __va_list ap; td = lwkt_alloc_thread(NULL, stksize, -1, TDF_VERBOSE); if (tdp) *tdp = td; cpu_set_thread_handler(td, kthread_exit, func, arg); __va_start(ap, fmt); kvsnprintf(td->td_comm, sizeof(td->td_comm), fmt, ap); __va_end(ap); lwkt_schedule(td); return 0; }
static void lwkt_idleloop(void *dummy) { globaldata_t gd = mycpu; DBPRINTF(("idlestart cpu %d pri %d (should be < 32) mpcount %d (should be 0)\n", gd->gd_cpuid, curthread->td_pri, curthread->td_mpcount)); gd->gd_pid = getpid(); for (;;) { /* * If only our 'main' thread is left, schedule it. */ if (gd->gd_num_threads == gd->gd_sys_threads) { int i; globaldata_t tgd; for (i = 0; i < ncpus; ++i) { tgd = globaldata_find(i); if (tgd->gd_num_threads != tgd->gd_sys_threads) break; } if (i == ncpus && (main_td.td_flags & TDF_RUNQ) == 0) lwkt_schedule(&main_td); } /* * Wait for an interrupt, aka wait for a signal or an upcall to * occur, then switch away. */ crit_enter(); if (gd->gd_runqmask || (curthread->td_flags & TDF_IDLE_NOHLT)) { curthread->td_flags &= ~TDF_IDLE_NOHLT; } else { printf("cpu %d halting\n", gd->gd_cpuid); cpu_halt(); printf("cpu %d resuming\n", gd->gd_cpuid); } crit_exit(); lwkt_switch(); } }
/* * Schedule the target thread. If the message flags contains MSGF_NORESCHED * we tell the scheduler not to reschedule if td is at a higher priority. * * This routine is called even if the thread is already scheduled. */ static __inline void _lwkt_schedule_msg(thread_t td, int flags) { lwkt_schedule(td); }
void acpi_task_thread_schedule(void) { lwkt_schedule(acpi_task_td); }