/* * dmio_cleaner_init: * * Create cleaner thread. */ static int dmio_cleaner_init(void) { return workqueue_create(&dmio_cleaner, "dmioclean", dmio_usrreq_fini1, NULL, PWAIT, IPL_SOFTCLOCK, 0); }
void cxgb_make_task(void *context) { struct cxgb_task *w = (struct cxgb_task *)context; // we can only use workqueue_create() once the system is up and running workqueue_create(&w->wq, w->name, w->func, w->context, PRIBIO, IPL_NET, 0); // printf("======>> create workqueue for %s %p\n", w->name, w->wq); }
static int physio_init(void) { int error; KASSERT(physio_workqueue == NULL); error = workqueue_create(&physio_workqueue, "physiod", physio_done, NULL, PRI_BIO, IPL_BIO, WQ_MPSAFE); return error; }
/* * Reinitialize inode hash table. */ void nfs_node_init(void) { pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl", &pool_allocator_nointr, IPL_NONE); pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl", &pool_allocator_nointr, IPL_NONE); if (workqueue_create(&nfs_sillyworkq, "nfssilly", nfs_sillyworker, NULL, PRI_NONE, IPL_NONE, 0) != 0) { panic("nfs_node_init"); } }
void kbd_init() { assert(!did_init); preemption_disable(); g_kbd_irq_thread = kernel_thread_create(THREAD_PRIORITY_MAX, kbd_irq_thread_proc, NULL, "kbd-irq"); kbd_queue = workqueue_create(g_kbd_irq_thread); g_kbd_test_thread = kernel_thread_create(THREAD_PRIORITY_MAX, test_thread, NULL, "kbd-test-thread"); if (!g_kbd_irq_thread || !kbd_queue || !g_kbd_test_thread) fatal("out of memory"); list_init(&g_kbd_waiters, offsetof(KbdKeyMessage, kbd_waiters)); spinlock_init(&buffer_lock, "Keyboard buffer lock."); buffer_vc = virtual_console_get_current(); thread_start(g_kbd_irq_thread); thread_start(g_kbd_test_thread); did_init = true; preemption_enable(); }
void emdtv_ir_attach(struct emdtv_softc *sc) { struct ir_attach_args ia; usb_endpoint_descriptor_t *ed; usbd_status status; int err; ed = usbd_interface2endpoint_descriptor(sc->sc_iface, 0); if (ed == NULL) return; status = usbd_open_pipe_intr(sc->sc_iface, ed->bEndpointAddress, USBD_EXCLUSIVE_USE, &sc->sc_intr_pipe, sc, &sc->sc_intr_buf, 1, emdtv_ir_intr, USBD_DEFAULT_INTERVAL); if (status != USBD_NORMAL_COMPLETION) { aprint_error_dev(sc->sc_dev, "couldn't open intr pipe: %s\n", usbd_errstr(status)); return; } mutex_init(&sc->sc_ir_mutex, MUTEX_DEFAULT, IPL_VM); err = workqueue_create(&sc->sc_ir_wq, "emdtvir", emdtv_ir_worker, sc, PRI_NONE, IPL_VM, 0); if (err) aprint_error_dev(sc->sc_dev, "couldn't create workqueue: %d\n", err); ia.ia_type = IR_TYPE_CIR; ia.ia_methods = &emdtv_ir_methods; ia.ia_handle = sc; sc->sc_cirdev = config_found_ia(sc->sc_dev, "irbus", &ia, ir_print); }
struct workqueue_struct * alloc_ordered_workqueue(const char *name, int linux_flags) { struct workqueue_struct *wq; int flags = WQ_MPSAFE; int error; KASSERT(linux_flags == 0); wq = kmem_alloc(sizeof(*wq), KM_SLEEP); error = workqueue_create(&wq->wq_workqueue, name, &linux_worker, wq, PRI_NONE, IPL_VM, flags); if (error) { kmem_free(wq, sizeof(*wq)); return NULL; } mutex_init(&wq->wq_lock, MUTEX_DEFAULT, IPL_VM); cv_init(&wq->wq_cv, name); TAILQ_INIT(&wq->wq_delayed); wq->wq_current_work = NULL; return wq; }
void tm_init_multitasking(void) { printk(KERN_DEBUG, "[sched]: Starting multitasking system...\n"); sysgate_page = mm_physical_allocate(PAGE_SIZE, true); mm_physical_memcpy((void *)sysgate_page, (void *)signal_return_injector, MEMMAP_SYSGATE_ADDRESS_SIZE, PHYS_MEMCPY_MODE_DEST); process_table = hash_create(0, 0, 128); process_list = linkedlist_create(0, LINKEDLIST_MUTEX); mutex_create(&process_refs_lock, 0); mutex_create(&thread_refs_lock, 0); thread_table = hash_create(0, 0, 128); struct thread *thread = kmalloc(sizeof(struct thread)); struct process *proc = kernel_process = kmalloc(sizeof(struct process)); proc->refs = 2; thread->refs = 1; hash_insert(process_table, &proc->pid, sizeof(proc->pid), &proc->hash_elem, proc); hash_insert(thread_table, &thread->tid, sizeof(thread->tid), &thread->hash_elem, thread); linkedlist_insert(process_list, &proc->listnode, proc); valloc_create(&proc->mmf_valloc, MEMMAP_MMAP_BEGIN, MEMMAP_MMAP_END, PAGE_SIZE, 0); linkedlist_create(&proc->threadlist, 0); mutex_create(&proc->map_lock, 0); mutex_create(&proc->stacks_lock, 0); mutex_create(&proc->fdlock, 0); hash_create(&proc->files, HASH_LOCKLESS, 64); proc->magic = PROCESS_MAGIC; blocklist_create(&proc->waitlist, 0, "process-waitlist"); mutex_create(&proc->fdlock, 0); memcpy(&proc->vmm_context, &kernel_context, sizeof(kernel_context)); thread->process = proc; /* we have to do this early, so that the vmm system can use the lock... */ thread->state = THREADSTATE_RUNNING; thread->magic = THREAD_MAGIC; workqueue_create(&thread->resume_work, 0); thread->kernel_stack = (addr_t)&initial_kernel_stack; spinlock_create(&thread->status_lock); primary_cpu->active_queue = tqueue_create(0, 0); primary_cpu->idle_thread = thread; primary_cpu->numtasks=1; ticker_create(&primary_cpu->ticker, 0); workqueue_create(&primary_cpu->work, 0); tm_thread_add_to_process(thread, proc); tm_thread_add_to_cpu(thread, primary_cpu); atomic_fetch_add_explicit(&running_processes, 1, memory_order_relaxed); atomic_fetch_add_explicit(&running_threads, 1, memory_order_relaxed); set_ksf(KSF_THREADING); *(struct thread **)(thread->kernel_stack) = thread; primary_cpu->flags |= CPU_RUNNING; #if CONFIG_MODULES loader_add_kernel_symbol(tm_thread_delay_sleep); loader_add_kernel_symbol(tm_thread_delay); loader_add_kernel_symbol(tm_timing_get_microseconds); loader_add_kernel_symbol(tm_thread_set_state); loader_add_kernel_symbol(tm_thread_exit); loader_add_kernel_symbol(tm_thread_poke); loader_add_kernel_symbol(tm_thread_block); loader_add_kernel_symbol(tm_thread_got_signal); loader_add_kernel_symbol(tm_thread_unblock); loader_add_kernel_symbol(tm_blocklist_wakeall); loader_add_kernel_symbol(kthread_create); loader_add_kernel_symbol(kthread_wait); loader_add_kernel_symbol(kthread_join); loader_add_kernel_symbol(kthread_kill); loader_add_kernel_symbol(tm_schedule); loader_add_kernel_symbol(arch_tm_get_current_thread); #endif }