/* * Initialize the suspplied thread structure, starting the specified * thread. */ void hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp, const char *id, int clindex, int repidx, void (*func)(void *arg)) { lockinit(&thr->lk, "h2thr", 0, 0); thr->pmp = pmp; thr->clindex = clindex; thr->repidx = repidx; TAILQ_INIT(&thr->xopq); if (repidx >= 0) { lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx); } else { lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s-%s", id, pmp->pfs_names[clindex]); } }
/* * Initialize the ACPI helper thread. */ int acpi_task_thread_init(void) { lwkt_initport_replyonly(&acpi_afree_rport, acpi_autofree_reply); acpi_task_td = kmalloc(sizeof(struct thread), M_DEVBUF, M_INTWAIT | M_ZERO); lwkt_create(acpi_task_thread, NULL, NULL, acpi_task_td, TDF_NOSTART, 0, "acpi_task"); return (0); }
int main(int ac, char **av) { thread_t td; thread_t td1; thread_t td2; uthread_init(); td = curthread; printf("mainthread %p crit_count %d nest %d mp_lock %d\n", td, td->td_pri / TDPRI_CRIT, td->td_mpcount, mp_lock ); lwkt_create(thread1, NULL, &td1, NULL, 0, -1, "thread1"); lwkt_create(thread2, NULL, &td2, NULL, 0, -1, "thread2"); printf("thread #1 %p #2 %p\n", td1, td2); printf("switching away from main (should come back before exit)\n"); lwkt_switch(); printf("Switched back to main, main Exiting\n"); exit(1); }
void hammer_flusher_create(hammer_mount_t hmp) { hammer_flusher_info_t info; int i; hmp->flusher.signal = 0; hmp->flusher.done = 0; hmp->flusher.next = 1; hammer_ref(&hmp->flusher.finalize_lock); TAILQ_INIT(&hmp->flusher.run_list); TAILQ_INIT(&hmp->flusher.ready_list); lwkt_create(hammer_flusher_master_thread, hmp, &hmp->flusher.td, NULL, 0, -1, "hammer-M"); for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) { info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO); info->hmp = hmp; TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry); lwkt_create(hammer_flusher_slave_thread, info, &info->td, NULL, 0, -1, "hammer-S%d", i); } }
static void swi_softclock_setup(void *arg) { int cpu; int i; int target; /* * Figure out how large a callwheel we need. It must be a power of 2. * * ncallout is primarily based on available memory, don't explode * the allocations if the system has a lot of cpus. */ target = ncallout / ncpus + 16; callwheelsize = 1; while (callwheelsize < target) callwheelsize <<= 1; callwheelmask = callwheelsize - 1; /* * Initialize per-cpu data structures. */ for (cpu = 0; cpu < ncpus; ++cpu) { softclock_pcpu_t sc; sc = &softclock_pcpu_ary[cpu]; sc->callwheel = kmalloc(sizeof(*sc->callwheel) * callwheelsize, M_CALLOUT, M_WAITOK|M_ZERO); for (i = 0; i < callwheelsize; ++i) TAILQ_INIT(&sc->callwheel[i]); /* * Mark the softclock handler as being an interrupt thread * even though it really isn't, but do not allow it to * preempt other threads (do not assign td_preemptable). * * Kernel code now assumes that callouts do not preempt * the cpu they were scheduled on. */ lwkt_create(softclock_handler, sc, NULL, &sc->thread, TDF_NOSTART | TDF_INTTHREAD, cpu, "softclock %d", cpu); } }
static void kcollect_thread_init(void) { thread_t td = NULL; /* * Autosize sample retention (10 second interval) */ if ((int)kcollect_samples < 0) { if (kmem_lim_size() < 1024) kcollect_samples = 1024; else kcollect_samples = 8192; } if (kcollect_samples) { kcollect_ary = kmalloc(kcollect_samples * sizeof(kcollect_t), M_KCOLLECT, M_WAITOK | M_ZERO); lwkt_create(kcollect_thread, NULL, &td, NULL, 0, 0, "kcollect"); } }
void * register_int(int intr, inthand2_t *handler, void *arg, const char *name, struct lwkt_serialize *serializer, int intr_flags, int cpuid) { struct intr_info *info; struct intrec **list; intrec_t rec; int orig_cpuid; KKASSERT(cpuid >= 0 && cpuid < ncpus); if (intr < 0 || intr >= MAX_INTS) panic("register_int: bad intr %d", intr); if (name == NULL) name = "???"; info = &intr_info_ary[cpuid][intr]; /* * Construct an interrupt handler record */ rec = kmalloc(sizeof(struct intrec), M_DEVBUF, M_INTWAIT); rec->name = kmalloc(strlen(name) + 1, M_DEVBUF, M_INTWAIT); strcpy(rec->name, name); rec->info = info; rec->handler = handler; rec->argument = arg; rec->intr = intr; rec->intr_flags = intr_flags; rec->next = NULL; rec->serializer = serializer; int_moveto_destcpu(&orig_cpuid, cpuid); /* * Create an emergency polling thread and set up a systimer to wake * it up. */ if (emergency_intr_thread[cpuid].td_kstack == NULL) { lwkt_create(ithread_emergency, NULL, NULL, &emergency_intr_thread[cpuid], TDF_NOSTART | TDF_INTTHREAD, cpuid, "ithreadE %d", cpuid); systimer_init_periodic_nq(&emergency_intr_timer[cpuid], emergency_intr_timer_callback, &emergency_intr_thread[cpuid], (emergency_intr_enable ? emergency_intr_freq : 1)); } /* * Create an interrupt thread if necessary, leave it in an unscheduled * state. */ if (info->i_state == ISTATE_NOTHREAD) { info->i_state = ISTATE_NORMAL; lwkt_create(ithread_handler, (void *)(intptr_t)intr, NULL, &info->i_thread, TDF_NOSTART | TDF_INTTHREAD, cpuid, "ithread%d %d", intr, cpuid); if (intr >= FIRST_SOFTINT) lwkt_setpri(&info->i_thread, TDPRI_SOFT_NORM); else lwkt_setpri(&info->i_thread, TDPRI_INT_MED); info->i_thread.td_preemptable = lwkt_preempt; } list = &info->i_reclist; /* * Keep track of how many fast and slow interrupts we have. * Set i_mplock_required if any handler in the chain requires * the MP lock to operate. */ if ((intr_flags & INTR_MPSAFE) == 0) info->i_mplock_required = 1; if (intr_flags & INTR_CLOCK) ++info->i_fast; else ++info->i_slow; /* * Enable random number generation keying off of this interrupt. */ if ((intr_flags & INTR_NOENTROPY) == 0 && info->i_random.sc_enabled == 0) { info->i_random.sc_enabled = 1; info->i_random.sc_intr = intr; } /* * Add the record to the interrupt list. */ crit_enter(); while (*list != NULL) list = &(*list)->next; *list = rec; crit_exit(); /* * Update max_installed_hard_intr to make the emergency intr poll * a bit more efficient. */ if (intr < FIRST_SOFTINT) { if (max_installed_hard_intr[cpuid] <= intr) max_installed_hard_intr[cpuid] = intr + 1; } if (intr >= FIRST_SOFTINT) swi_info_ary[intr - FIRST_SOFTINT] = info; /* * Setup the machine level interrupt vector */ if (intr < FIRST_SOFTINT && info->i_slow + info->i_fast == 1) machintr_intr_setup(intr, intr_flags); int_moveto_origcpu(orig_cpuid, cpuid); return(rec); }