void kmain(s64 magic, s64 info) { //vga_clear(COLOR_BLACK); idt_init(); isr_init(); serial_init(); set_debug_traps(); BREAKPOINT(); cpuid_print(); multiboot(magic, info); kmem_map(); page_init(); kmalloc_init(); //vesa_init(); root_init(); pci_init(); vm_init(); syscall_init(); timer_init(); kbd_init(); //mouse_init(); console_init(); create_kthread(NULL, idle_thread, THREAD_PRI_LOW, NULL, NULL); create_kthread(NULL, init_thread, THREAD_PRI_NORMAL, NULL, NULL); thread_schedule(); }
void init_proc() { list_init(&ready); list_init(&block); list_init(&free); list_add_after(&ready, &idle.list); int i=0; for ( ; i < KERNEL_PCB_MAX; i++) { list_add_after(&free, &(PCB_pool[i].list)); } PCB_of_thread_A = create_kthread(A, &ready); PCB_of_thread_B = create_kthread(B, &block); PCB_of_thread_C = create_kthread(C, &block); PCB_of_thread_D = create_kthread(D, &block); /* for(i = 0; i < 7; i ++) { create_kthread(print_ch, 'a' + i); } */ }
/* * Functionality for pthread_create() */ int mypthread_create(mypthread_t *thread, const mypthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { if (mypthread_count == 0) { //mypthread_create was called for the first time //Init locks that is going to be used by the entire thread library init_locks(); //Creating a thread for calling thread (assuming it is main function) main_thread.th_id = ++mypthread_th_gen; mypthread_count++; main_thread.ctx = (ucontext_t*) malloc(sizeof(ucontext_t)); main_thread.ctx->uc_stack.ss_sp = (char*) malloc(sizeof(char) * 4096); main_thread.ctx->uc_stack.ss_size = 4096; main_thread.state = PS_RUNNING; //Insert into kthread mykthread_t* k_th = (mykthread_t *) malloc(sizeof(mykthread_t)); k_th->kth_id = gettid(); k_th->th = &main_thread; //Add to the kthread list sem_wait(&kthread_sem); //Assuming initially there will be only one kthread current_kthread_count = 1; mykthread_add(k_th); sem_post(&kthread_sem); } // Create node for thread ucontext_t* context = (ucontext_t*) malloc(sizeof(ucontext_t)); thread->ctx = context; getcontext(thread->ctx); (*thread).ctx->uc_stack.ss_sp = (char*) malloc(sizeof(char) * 4096); (*thread).ctx->uc_stack.ss_size = 4096; (*thread).state = PS_ACTIVE; makecontext(thread->ctx, (void (*)()) start_routine, 1, arg); sem_wait(&mypthread_sem); thread->th_id = ++mypthread_th_gen; mypthread_count++; mypthread_enqueue(thread); sem_post(&mypthread_sem); //Create new kthread bool create_thread_flag; //Create Kthread as per Pthread library policy if (mypthread_policy == KLMATCHCORES || mypthread_policy == KLMATCHHYPER) { sem_wait(&kthread_sem); create_thread_flag = false; if (current_kthread_count < max_kthread_count) { current_kthread_count++; create_thread_flag = true; } sem_post(&kthread_sem); if (create_thread_flag) { create_kthread(); } } else if (mypthread_policy == KLALWAYS) { sem_wait(&kthread_sem); current_kthread_count++; sem_post(&kthread_sem); create_kthread(); } return 0; }
void test_setup(void) { init_sem(&full,0); init_sem(&empty,NBUF); init_sem(&mutex_r,1); init_sem(&mutex_w,1); int i; for(i=0;i<NR_PROD;i++) { wakeup(create_kthread(test_producer)); } for(i=0;i<NR_CONS;i++) { wakeup(create_kthread(test_consumer)); } }
void init_getty(void) { int i; for (i = 0; i < NR_TTY - 1; i++) { add2wake(create_kthread(getty)); } }
int kthreadd(void *unused) { struct task_struct *tsk = current; /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); set_cpus_allowed_ptr(tsk, cpu_all_mask); current->flags |= PF_NOFREEZE; for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; }
void init_pm() { PCB *p = create_kthread(pm_thread); PM = p->pid; hal_register("pm",PM,0); wakeup(p); }
void init_ide(void) { cache_init(); add_irq_handle(14, ide_intr); add_irq_handle(0 , time_intr); PCB *p = create_kthread(ide_driver_thread,0,NULL); IDE = p->pid; hal_register("hda", IDE, 0); wakeup(p); }
static pid_t _do_fork(PCB* pcb) { PCB *new_pcb = create_kthread(NULL); //Sem *old_sem,*new_sem; //Msg *old_msg,*new_msg; //struct ListHead *ptr; //head, no /*the guide said that all semaphore is empty and msg queue is also empty for a user process. So we do nothing here */ //sem_list, copy //:msg_list copy //:msg_free, copy //state, block OK, new_pcb->intr_counter = pcb->intr_counter; //pid new_pcb->parent = pcb->pid; //TODO:CR3 if(copy_vm_space(new_pcb,pcb) != 0 ) return -1; //error new_pcb->counter = pcb->counter; new_pcb->ppcb = pcb; //msg_pool, no nead //kstack copy memcpy(new_pcb->kstack,pcb->kstack,sizeof(pcb->kstack)); //complete copying context new_pcb->tf = new_pcb->kstack + ((char*)(pcb->tf) - pcb->kstack); //relative offset //set ebp of each trapframe struct TrapFrame *otf = (struct TrapFrame*) pcb->tf; struct TrapFrame *ntf = (struct TrapFrame*) new_pcb->tf; uint32_t *nebp, *oebp, offset; oebp = &otf->ebp; nebp = &ntf->ebp; while(*oebp > KOFFSET /*Condition: util to user space stack*/) { offset = *oebp - (uint32_t)pcb->kstack; *nebp = (uint32_t)(new_pcb->kstack) + offset; oebp = (uint32_t*)*oebp; nebp = (uint32_t*)*nebp; } return new_pcb->pid; }
void kmain(struct multiboot_info *mbt) { vga_init(); gdt_install(); idt_install(); isr_install(); irq_install(); syscalls_install(); puts_c(__kernel_name " kernel v" __kernel_version_str "\n\n", COLOR_LIGHT_BLUE, COLOR_DEFAULT_BG); uint64_t mem; get_multiboot_info(mbt, &mem); extern uint32_t _kernel_memory_end[]; kprintf("End of kernel's memory: 0x%x\n", (uint64_t) (uint32_t) _kernel_memory_end); kprintf("Memory:\n%l B\n%l KB\n%l MB\n%l GB\n", mem, mem / 1024, mem / 1024 / 1024, mem / 1024 / 1024 / 1024); init_paging(); map_page(0xFD7FF000, 0x60000, 3); int *p = (int *) 0xFD7FF000; *p = 12; kprintf("*(0x%x) = %i\n", (uint64_t) (uint32_t) p, *p); map_page(0x10000000, 0x60000, 3); int *p2 = (int *) 0x10000000; kprintf("*(0x%x) = %i\n", (uint64_t) (uint32_t) p2, *p2); print_next_available_page(); uint32_t ap = allocate_page(203); map_page(ap, 0x60000, 3); int *p3 = (int *) ap; kprintf("*(0x%x) = %i\n", (uint64_t) ap, *p3); print_next_available_page(); ap = allocate_page(203); kprintf("ap = 0x%x\n", (uint32_t) ap); struct kthread thread; create_kthread(thread_test, &thread); start_kthread(&thread); kprintf("Returned from thread.\n"); _asm_print_test(); return; }
//#define ENTRY 0X8048074 PCB* create_process(uint8_t *buf) { struct ELFHeader *elf = (struct ELFHeader*) buf; struct ProgramHeader *ph, *eph; uint32_t va, eva, pa, len, sublen, tmpva, offset; uint8_t *kst; int ret; PCB* pcb = create_kthread((void*)NULL); ph = (struct ProgramHeader*)((char*)elf + elf->phoff); eph = ph + elf->phnum; assert(pcb != NULL); printk("in create process pid=%d\n",pcb->pid); for(; ph < eph; ph++) { if(ph->type != PT_LOAD) continue; va = ph->vaddr; //in boot/main.c is paddr, it may be a bug len = ph->memsz; eva = va + len; printk("segment start = %x\n",va); printk("segment end = %x\n",eva); //request new page here //start= ph->vaddr, len=ph->memsz // printk("before alloc pages...va=%x\n",va); ret = alloc_pages(pcb,va,len); printk("after alloc pages\n"); assert(ret == 0); // must be successful printk("after alloc pages and assertion\n"); //attention: virtual adrress to physical address offset = ph->off; for(; va < eva;) { pa = pcb_va_to_pa(pcb,va); printk("physical address = %x\n",pa); kst = (uint8_t*)pa_to_va(pa); //virtual address printk("virtual address = %x\n",kst); tmpva = va; va = to_next_page(va) < eva ? to_next_page(va) : eva; //when va == ea this loop end sublen = va - tmpva; memcpy(kst, (char*)elf + offset,sublen); offset += sublen; } } printk("before alloc stack\n"); ret = alloc_user_stack(pcb); assert(ret==0); printk("after alloc user stack\n"); struct TrapFrame* tf = ((struct TrapFrame*)(pcb->tf)); tf->eip = elf->entry;//ENTRY; //set user stack here tf->cs = SELECTOR_USER(SEG_USER_CODE); tf->ds = SELECTOR_USER(SEG_USER_DATA); tf->irq = 1000; tf->ss = SELECTOR_USER(SEG_USER_DATA); tf->esp = USER_STACK_END; //should I set tf->ebp? return pcb; }
void init_tty_echo(void) { int i; for (i = 0; i < NR_TTY; i ++) { create_kthread(echo); } }
/** * kthread_create - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(), kthread_create_on_cpu(). * * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which noone will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM). */ struct task_struct *kthread_create(int (*threadfn)(void *data), void *data, const char namefmt[], ...) { struct kthread_create_info create; create.threadfn = threadfn; create.data = data; init_completion(&create.done); spin_lock(&kthread_create_lock); list_add_tail(&create.list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); wait_for_completion(&create.done); if (!IS_ERR(create.result)) { struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(create.result->comm, sizeof(create.result->comm), namefmt, args); va_end(args); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(create.result, cpu_all_mask); } return create.result; } EXPORT_SYMBOL(kthread_create); /** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and * waits for it to exit. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * * If threadfn() may call do_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ int kthread_stop(struct task_struct *k) { struct kthread *kthread; int ret; trace_sched_kthread_stop(k); get_task_struct(k); kthread = to_kthread(k); barrier(); /* it might have exited */ if (k->vfork_done != NULL) { kthread->should_stop = 1; wake_up_process(k); wait_for_completion(&kthread->exited); } ret = k->exit_code; put_task_struct(k); trace_sched_kthread_stop_ret(ret); return ret; } EXPORT_SYMBOL(kthread_stop); int kthreadd(void *unused) { struct task_struct *tsk = current; /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); set_cpus_allowed_ptr(tsk, cpu_all_mask); set_mems_allowed(node_states[N_HIGH_MEMORY]); current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; } /** * kthread_worker_fn - kthread function to process kthread_worker * @worker_ptr: pointer to initialized kthread_worker * * This function can be used as @threadfn to kthread_create() or * kthread_run() with @worker_ptr argument pointing to an initialized * kthread_worker. The started kthread will process work_list until * the it is stopped with kthread_stop(). A kthread can also call * this function directly after extra initialization. * * Different kthreads can be used for the same kthread_worker as long * as there's only one kthread attached to it at any given time. A * kthread_worker without an attached kthread simply collects queued * kthread_works. */ int kthread_worker_fn(void *worker_ptr) { struct kthread_worker *worker = worker_ptr; struct kthread_work *work; WARN_ON(worker->task); worker->task = current; repeat: set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); spin_lock_irq(&worker->lock); worker->task = NULL; spin_unlock_irq(&worker->lock); return 0; } work = NULL; spin_lock_irq(&worker->lock); if (!list_empty(&worker->work_list)) { work = list_first_entry(&worker->work_list, struct kthread_work, node); list_del_init(&work->node); } worker->current_work = work; spin_unlock_irq(&worker->lock); if (work) { __set_current_state(TASK_RUNNING); work->func(work); } else if (!freezing(current)) schedule(); try_to_freeze(); goto repeat; }
/** * kthread_create - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(), kthread_create_on_cpu(). * * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which noone will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM). */ struct task_struct *kthread_create(int (*threadfn)(void *data), void *data, const char namefmt[], ...) { struct kthread_create_info create; create.threadfn = threadfn; create.data = data; init_completion(&create.done); spin_lock(&kthread_create_lock); list_add_tail(&create.list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); wait_for_completion(&create.done); if (!IS_ERR(create.result)) { struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(create.result->comm, sizeof(create.result->comm), namefmt, args); va_end(args); int policy = SCHED_NORMAL; #ifdef CONFIG_TIVO int i; int bFound = 0; for (i=0; i<sizeof(s_tvKthreadInfoTable)/sizeof(TvKthreadInfo); i++) { if (!strcmp(s_tvKthreadInfoTable[i].name, create.result->comm)) { if (s_tvKthreadInfoTable[i].policy != -1) { policy = s_tvKthreadInfoTable[i].policy; param.sched_priority = s_tvKthreadInfoTable[i].rt_priority; } bFound = 1; break; } } if (!bFound) { printk("--- Unknown kthread %s is lanched?\n", create.result->comm); } #endif /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties, and should * use specific RT priorities for some threads. */ sched_setscheduler_nocheck(create.result, policy, ¶m); set_user_nice(create.result, KTHREAD_NICE_LEVEL); set_cpus_allowed_ptr(create.result, cpu_all_mask); } return create.result; } EXPORT_SYMBOL(kthread_create); /** * kthread_bind - bind a just-created kthread to a cpu. * @k: thread created by kthread_create(). * @cpu: cpu (might not be online, must be possible) for @k to run on. * * Description: This function is equivalent to set_cpus_allowed(), * except that @cpu doesn't need to be online, and the thread must be * stopped (i.e., just returned from kthread_create()). */ void kthread_bind(struct task_struct *k, unsigned int cpu) { /* Must have done schedule() in kthread() before we set_task_cpu */ if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) { WARN_ON(1); return; } set_task_cpu(k, cpu); k->cpus_allowed = cpumask_of_cpu(cpu); k->rt.nr_cpus_allowed = 1; k->flags |= PF_THREAD_BOUND; } EXPORT_SYMBOL(kthread_bind); /** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and * waits for it to exit. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * * If threadfn() may call do_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ int kthread_stop(struct task_struct *k) { struct kthread *kthread; int ret; trace_sched_kthread_stop(k); get_task_struct(k); kthread = to_kthread(k); barrier(); /* it might have exited */ if (k->vfork_done != NULL) { kthread->should_stop = 1; wake_up_process(k); wait_for_completion(&kthread->exited); } ret = k->exit_code; put_task_struct(k); trace_sched_kthread_stop_ret(ret); return ret; } EXPORT_SYMBOL(kthread_stop); int kthreadd(void *unused) { struct task_struct *tsk = current; /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); set_user_nice(tsk, KTHREAD_NICE_LEVEL); set_cpus_allowed_ptr(tsk, cpu_all_mask); set_mems_allowed(node_possible_map); current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; }
void init_proc() { list_init(&ready); list_init(&block); list_init(&free); int i=0; for(;i<PCB_NUM;i++) list_add_before(&free,&proc_pool[i].list); init_driver(); init_fm(); //PCB *ptest = create_kthread(drivertest,0,NULL); //wakeup(ptest); /**pa = create_kthread(A,0,NULL); pb = create_kthread(B,0,NULL); pc = create_kthread(C,0,NULL); pd = create_kthread(D,0,NULL); pe = create_kthread(E,0,NULL);**/ timertest = create_kthread(timer_test,0,NULL); fmtest = create_kthread(fm_test,0,NULL); //(fmtest->pool_mutex).pid = fmtest->pid; /**wakeup(pa); wakeup(pb); wakeup(pc); wakeup(pd); wakeup(pe);**/ wakeup(timertest); wakeup(fmtest); /** pa = create_kthread(print_ch,'a',&pb); printk("pa : %d ,pa->tf : %x\n",pa->pid,pa->tf); list_add_before(&ready,&(pa->list)); printk("ready:\n"); ListHead *p1; list_foreach(p1,&ready) printk("id : %d ,tf : %x\n",((PCB *)(list_entry(p1,PCB,list)))->pid, ((PCB *)(list_entry(p1,PCB,list)))->tf); printk("\n-----------\n"); pb = create_kthread(print_ch,'b',&pc); printk("pb : %d ,pb->tf : %x\n",pb->pid,pb->tf); list_add_before(&block,&(pb->list)); pc = create_kthread(print_ch,'c',&pd); printk("pc : %d ,pc->tf : %x\n",pc->pid,pc->tf); list_add_before(&block,&(pc->list)); pd = create_kthread(print_ch,'d',&pa); printk("pd : %d ,pd->tf : %x\n",pd->pid,pd->tf); list_add_before(&block,&(pd->list)); printk("block:\n"); ListHead *p2; list_foreach(p2,&block) printk("id : %d ,tf : %x\n",((PCB *)(list_entry(p2,PCB,list)))->pid, ((PCB *)(list_entry(p2,PCB,list)))->tf); printk("\n---------\n"); **/ //test_setup(); }
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ...) { struct kthread_create_info create; create.threadfn = threadfn; create.data = data; create.node = node; init_completion(&create.done); spin_lock(&kthread_create_lock); list_add_tail(&create.list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); wait_for_completion(&create.done); if (!IS_ERR(create.result)) { static const struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(create.result->comm, sizeof(create.result->comm), namefmt, args); va_end(args); /* */ sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(create.result, cpu_all_mask); } return create.result; } EXPORT_SYMBOL(kthread_create_on_node); /* */ void kthread_bind(struct task_struct *p, unsigned int cpu) { /* */ if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { WARN_ON(1); return; } /* */ do_set_cpus_allowed(p, cpumask_of(cpu)); p->flags |= PF_THREAD_BOUND; } EXPORT_SYMBOL(kthread_bind); /* */ int kthread_stop(struct task_struct *k) { struct kthread *kthread; int ret; trace_sched_kthread_stop(k); get_task_struct(k); kthread = to_kthread(k); barrier(); /* */ if (k->vfork_done != NULL) { kthread->should_stop = 1; wake_up_process(k); wait_for_completion(&kthread->exited); } ret = k->exit_code; put_task_struct(k); trace_sched_kthread_stop_ret(ret); return ret; } EXPORT_SYMBOL(kthread_stop); int kthreadd(void *unused) { struct task_struct *tsk = current; /* */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); set_cpus_allowed_ptr(tsk, cpu_all_mask); set_mems_allowed(node_states[N_HIGH_MEMORY]); current->flags |= PF_NOFREEZE; for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; } void __init_kthread_worker(struct kthread_worker *worker, const char *name, struct lock_class_key *key) { spin_lock_init(&worker->lock); lockdep_set_class_and_name(&worker->lock, key, name); INIT_LIST_HEAD(&worker->work_list); worker->task = NULL; }
/** * kthread_create - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(), kthread_create_on_cpu(). * * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which noone will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM). */ struct task_struct *kthread_create(int (*threadfn)(void *data), void *data, const char namefmt[], ...) { struct kthread_create_info create; create.threadfn = threadfn; create.data = data; init_completion(&create.done); spin_lock(&kthread_create_lock); list_add_tail(&create.list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); wait_for_completion(&create.done); if (!IS_ERR(create.result)) { struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(create.result->comm, sizeof(create.result->comm), namefmt, args); va_end(args); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(create.result, cpu_all_mask); } return create.result; } EXPORT_SYMBOL(kthread_create); /** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and * waits for it to exit. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * * If threadfn() may call do_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ int kthread_stop(struct task_struct *k) { struct kthread *kthread; int ret; trace_sched_kthread_stop(k); get_task_struct(k); kthread = to_kthread(k); barrier(); /* it might have exited */ if (k->vfork_done != NULL) { kthread->should_stop = 1; wake_up_process(k); wait_for_completion(&kthread->exited); } ret = k->exit_code; put_task_struct(k); trace_sched_kthread_stop_ret(ret); return ret; } EXPORT_SYMBOL(kthread_stop); int kthreadd(void *unused) { struct task_struct *tsk = current; /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); set_cpus_allowed_ptr(tsk, cpu_all_mask); set_mems_allowed(node_states[N_HIGH_MEMORY]); current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; }
kmem_cache_free(ide_bio_cache, bio); } void bio_submit(struct bio *bio) { mutex_lock(&ide_bio_queue_mutex); list_add_tail(&bio->link, &ide_bio_queue); condition_notify(&ide_bio_queue_condition); mutex_unlock(&ide_bio_queue_mutex); } void bio_wait(struct bio *bio) { mutex_lock(&bio->mutex); while (bio->status == BIO_NONE) condition_wait(&bio->mutex, &bio->cond); mutex_unlock(&bio->mutex); } void setup_ide(void) { DBG_ASSERT((ide_bio_cache = KMEM_CACHE(struct bio)) != 0); DBG_ASSERT(create_kthread(&process_bio_queue, 0) >= 0); #ifdef CONFIG_IDE_TEST void ide_test(void); ide_test(); #endif /* CONFIG_IDE_TEST */ }
void init_tty(void) { add_irq_handle(1, send_keymsg); // 在irq1时,调用send_keymsg函数 init_console(); TTY = create_kthread(ttyd)->pid; }
/** * kthread_create_on_node - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @node: memory node number. * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(). * * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give -1. * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM). */ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ...) { struct kthread_create_info create; create.threadfn = threadfn; create.data = data; create.node = node; init_completion(&create.done); spin_lock(&kthread_create_lock); list_add_tail(&create.list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); wait_for_completion(&create.done); if (!IS_ERR(create.result)) { static const struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(create.result->comm, sizeof(create.result->comm), namefmt, args); va_end(args); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(create.result, cpu_all_mask); } return create.result; } EXPORT_SYMBOL(kthread_create_on_node); /** * kthread_bind - bind a just-created kthread to a cpu. * @p: thread created by kthread_create(). * @cpu: cpu (might not be online, must be possible) for @k to run on. * * Description: This function is equivalent to set_cpus_allowed(), * except that @cpu doesn't need to be online, and the thread must be * stopped (i.e., just returned from kthread_create()). */ void kthread_bind(struct task_struct *p, unsigned int cpu) { /* Must have done schedule() in kthread() before we set_task_cpu */ if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { WARN_ON(1); return; } /* It's safe because the task is inactive. */ do_set_cpus_allowed(p, cpumask_of(cpu)); p->flags |= PF_THREAD_BOUND; } EXPORT_SYMBOL(kthread_bind); /** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and * waits for it to exit. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * * If threadfn() may call do_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ int kthread_stop(struct task_struct *k) { struct kthread *kthread; int ret; trace_sched_kthread_stop(k); get_task_struct(k); kthread = to_kthread(k); barrier(); /* it might have exited */ if (k->vfork_done != NULL) { kthread->should_stop = 1; wake_up_process(k); wait_for_completion(&kthread->exited); } ret = k->exit_code; put_task_struct(k); trace_sched_kthread_stop_ret(ret); return ret; } EXPORT_SYMBOL(kthread_stop); int kthreadd(void *unused) { struct task_struct *tsk = current; /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); set_cpus_allowed_ptr(tsk, cpu_all_mask); set_mems_allowed(node_states[N_HIGH_MEMORY]); current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; } void __init_kthread_worker(struct kthread_worker *worker, const char *name, struct lock_class_key *key) { spin_lock_init(&worker->lock); lockdep_set_class_and_name(&worker->lock, key, name); INIT_LIST_HEAD(&worker->work_list); worker->task = NULL; }
/** * kthread_create_ve - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(), kthread_create_on_cpu(). * * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which noone will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM). */ struct task_struct *kthread_create_ve(struct ve_struct *ve, int (*threadfn)(void *data), void *data, const char namefmt[], ...) { struct kthread_create_info create; struct ve_struct *old_ve; old_ve = set_exec_env(ve); create.threadfn = threadfn; create.data = data; init_completion(&create.done); spin_lock(&kthread_create_lock); list_add_tail(&create.list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); wait_for_completion(&create.done); if (!IS_ERR(create.result)) { struct sched_param param = { .sched_priority = 0 }; va_list args; va_start(args, namefmt); vsnprintf(create.result->comm, sizeof(create.result->comm), namefmt, args); va_end(args); /* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ sched_setscheduler_nocheck(create.result, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(create.result, cpu_all_mask); } set_exec_env(old_ve); return create.result; } EXPORT_SYMBOL(kthread_create_ve); /** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and * waits for it to exit. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * * If threadfn() may call do_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ int kthread_stop(struct task_struct *k) { struct kthread *kthread; int ret; trace_sched_kthread_stop(k); get_task_struct(k); kthread = to_kthread(k); barrier(); /* it might have exited */ if (k->vfork_done != NULL) { kthread->should_stop = 1; wake_up_process(k); wait_for_completion(&kthread->exited); } ret = k->exit_code; put_task_struct(k); trace_sched_kthread_stop_ret(ret); return ret; } EXPORT_SYMBOL(kthread_stop); int kthreadd(void *data) { struct task_struct *tsk = current; struct kthreadd_create_info *kcreate; struct kthread self; int rc; self.should_stop = 0; kcreate = (struct kthreadd_create_info *) data; if (kcreate) { daemonize("kthreadd/%d", get_exec_env()->veid); kcreate->result = current; set_fs(KERNEL_DS); init_completion(&self.exited); current->vfork_done = &self.exited; } else set_task_comm(tsk, "kthreadd"); /* Setup a clean context for our children to inherit. */ ignore_signals(tsk); set_cpus_allowed_ptr(tsk, cpu_all_mask); set_mems_allowed(node_states[N_HIGH_MEMORY]); current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; if (kcreate) complete(&kcreate->done); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) { if (self.should_stop) break; else schedule(); } __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } do { clear_thread_flag(TIF_SIGPENDING); rc = sys_wait4(-1, NULL, __WALL, NULL); } while (rc != -ECHILD); do_exit(0); } int kthreadd_create() { struct kthreadd_create_info create; int ret; struct ve_struct *ve = get_exec_env(); BUG_ON(ve->_kthreadd_task); INIT_LIST_HEAD(&ve->_kthread_create_list); init_completion(&create.done); ret = kernel_thread(kthreadd, (void *) &create, CLONE_FS); if (ret < 0) { return ret; } wait_for_completion(&create.done); ve->_kthreadd_task = create.result; return 0; } EXPORT_SYMBOL(kthreadd_create); void kthreadd_stop(struct ve_struct *ve) { struct kthread *kthread; int ret; struct task_struct *k; if (!ve->_kthreadd_task) return; k = ve->_kthreadd_task; trace_sched_kthread_stop(k); get_task_struct(k); BUG_ON(!k->vfork_done); kthread = container_of(k->vfork_done, struct kthread, exited); kthread->should_stop = 1; wake_up_process(k); wait_for_completion(&kthread->exited); ret = k->exit_code; put_task_struct(k); trace_sched_kthread_stop_ret(ret); } EXPORT_SYMBOL(kthreadd_stop);