/** * Create a new execution context and create a pointer to it. */ task_t* task_spawn(void* fct_ptr, void* arguments, void *return_val) { task_t* new_task; task_t* current_task = task_current(); ALLOCATE_TASK(new_task); pthread_mutex_init(&new_task->lock, NULL); pthread_mutex_init(&new_task->running_lock, NULL); if (inside_main()) { debug("Spawn called from main."); } else { int parent_id = (current_task->parent) ? (current_task->parent->id) : 0; if (parent_id < 0 ) exit(1); debug("Spawn called from a worker %d, parent %d", current_task->id, parent_id); } *((int*)return_val) = -1; new_task->arguments = (void*) arguments; new_task->function = (function_t) fct_ptr; new_task->result = return_val; new_task->status = STARTED; new_task->context->uc_link = &go_home; new_task->parent = current_task; if (!inside_main()) { task_inc_children_count(current_task); } getcontext(new_task->context); if (new_task->status == STARTED) { makecontext(new_task->context, (void (*) (void)) sched_wrapper_function, 0); sched_add_task(new_task); } return new_task; }
void task_yield() { disable_irq(); struct task_t *current_task = task_current(); struct list_node_t *next_task_it; struct task_t *next_task; if ( task_is_active(current_task) ) { next_task_it = current_task_it->next; } else { next_task_it = list_first(active_tasks); } next_task = (struct task_t *)next_task_it->data; task_print("task_yield:", current_task, next_task); if ( current_task == next_task ) { return; }; if ( next_task == 0 ) { print_buf("ERROR: next_task=0\n"); }; current_task_it = next_task_it; task_switch(¤t_task->sp, next_task->sp); }
static void enqueue_pjob_rtws(struct rq *rq, struct task_struct *p, int flags) { struct sched_rtws_entity *rtws_se = &p->rtws; __enqueue_pjob_rtws(&rq->rtws, rtws_se); if (!task_current(rq, p) && rq->rtws.nr_running > 1 && !(flags & ENQUEUE_HEAD)) enqueue_stealable_pjob_rtws(&rq->rtws, rtws_se); }
void channel_transfer_to_output( struct channel_t *ch ) { struct task_t *current_task = task_current(); struct list_node_t *output_task_it = list_first(&ch->output_tasks); struct task_t *output_task = (struct task_t *)list_get(output_task_it); msg_move(current_task->msg, output_task->msg); channel_activate_task(output_task); }
void syscall_handler(regs_t* registers) { if(registers->eax >= syscall_max) { kprintf("[#%d] Unknown syscall %x\n", task_current()->pid, registers->eax); return; } syscall_vectors[registers->eax](registers); }
task_struct *next_task(/*uint tick*/) { if (quit) return (task_struct *)def_task; task_struct *current = task_current(); if (current == def_task) return &task0; //if (tick % 1000) return NULL; if (current == &task0) return &task1; if (current == &task1) return &task0; panic("Invalid task"); return null; }
static int internal_new_fd(int fd, int enable) { struct poll_note *note; void *oldp = NULL; note = cs_malloc(sizeof(struct poll_note)); memset(note, 0, sizeof(struct poll_note)); note->reader = enable ? (void*)task_current() : NULL; if (cs_poll_add(g_poll_fd, fd, POLL_EV_READ, (void *)note) == -1) { cs_free(note); return -1; } hashmap_upsert(g_fd_map, (void*)&fd, sizeof(int), (void*)note, &oldp); return 0; }
static struct task_struct *pick_next_stealable_pjob_rtws(struct rtws_rq *rtws_rq) { struct sched_rtws_entity *rtws_se; struct task_struct *p; struct rq *rq = rq_of_rtws_rq(rtws_rq); if (!has_stealable_pjobs(rtws_rq)) return NULL; rtws_se = rb_entry(rtws_rq->leftmost_stealable_pjob, struct sched_rtws_entity, stealable_pjob_node); BUG_ON(!rtws_se); p = task_of_rtws_se(rtws_se); if(rq->cpu != task_cpu(p) || task_current(rq, p) || !p->se.on_rq || !rtws_task(p)){ printk(KERN_INFO "RTWS :: cannot steal this task \n"); return NULL; } BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); BUG_ON(!p->se.on_rq); BUG_ON(!rtws_task(p)); return p; }
int task_sync(task_t** execution_context, int count) { int i; int someone_not_done = 1; while (someone_not_done) { someone_not_done = 0; for (i = 0; i < count; i++) { if (execution_context[i]->status != COMPLETED) { //if (execution_context[i]->status != COMPLETED || execution_context[i]->result == NULL || *((int*)execution_context[i]->result) == -1) { someone_not_done = 1; } } if (someone_not_done) { if (!inside_main()) { debug("%d - Yielding...", sched_get()->id); sched_yield_current(); } else { debug("Main waiting"); pthread_mutex_lock(&wake_up_main_lock); pthread_cond_wait(&wake_up_main, &wake_up_main_lock); pthread_mutex_unlock(&wake_up_main_lock); debug("Main woken up"); } } } //if control is here, child tasks have finished //let's clean-up for (i = 0; i < count; i++) { task_t* task = execution_context[i]; task_destroy(task); } if (inside_main()) { debug("Going outside of sync in main %d", (unsigned int ) pthread_self()); } else { debug("Going outside of sync in task %d", task_current()->id); } return 0; }
static int internal_release_fd(int fd) { struct poll_note *notep; void *task = (void*)task_current(); notep = hashmap_get(g_fd_map, &fd, sizeof(int)); if (!notep) { return -1; //not exist; } if (notep->reader == task) { notep->reader = NULL; } if (notep->writer == task) { notep->writer = NULL; } if (notep->reader == NULL && notep->writer == NULL) { hashmap_delete(g_fd_map, (void*)&fd, sizeof(int), (void*)¬ep); cs_free(notep); cs_poll_del(g_poll_fd, fd); return 0; // a normal close } return 1; //remain reference; }
static int do_wait_fd(int fd, unsigned events) { int needsyscall; struct poll_note *note; void *current = task_current(); note = hashmap_get(g_fd_map, &fd, sizeof(int)); needsyscall = 0; /* if ((events | POLL_EV_READ) && note->reader && note->reader != current) { ERR("dup reader"); return -1; } if ((events | POLL_EV_WRITE) && note->writer && note->writer != current) { ERR("dup writer"); return -1; } */ if (events | POLL_EV_READ) { note->reader = current; needsyscall = (needsyscall || note->reader == NULL); } if (events | POLL_EV_WRITE) { note->writer = current; needsyscall = (needsyscall || note->writer == NULL); } if (note->reader) { events |= POLL_EV_READ; } if (note->writer) { events |= POLL_EV_WRITE; } if (needsyscall && (cs_poll_mod(g_poll_fd, fd, events, (void*)note) == -1)) { SYS_ERR("cs_poll_mod failed"); return -1; } task_yield(0); return 0; }
void test_tasks(void) { def_task = task_current(); #if 0 task_kthread_init(&task0, (void *)do_task0, (void *)((ptr_t)task0_stack + TASK_KERNSTACK_SIZE - 0x20)); task_kthread_init(&task1, (void *)do_task1, (void *)((ptr_t)task1_stack + TASK_KERNSTACK_SIZE - 0x20)); #else const segment_selector ucs = { .as.word = SEL_USER_CS }; const segment_selector uds = { .as.word = SEL_USER_DS }; uint espU0 = ((uint)task0_usr_stack + R3_STACK_SIZE - 0x18); uint espK0 = ((uint)task0_stack + TASK_KERNSTACK_SIZE - CONTEXT_SIZE - 0x14); uint espU1 = ((ptr_t)task1_usr_stack + R3_STACK_SIZE); uint espK1 = ((ptr_t)task1_stack + TASK_KERNSTACK_SIZE - CONTEXT_SIZE - 0x14); task_init((task_struct *)&task0, (void *)do_task0, (void *)espK0, (void *)espU0, ucs, uds); task_init((task_struct *)&task1, (void *)do_task1, (void *)espK1, (void *)espU1, ucs, uds); #endif /* allow tasks to update cursor with `outl` */ task0.tss.eflags |= eflags_iopl(PL_USER); task1.tss.eflags |= eflags_iopl(PL_USER); quit = false; kbd_set_onpress((kbd_event_f)key_press); task_set_scheduler(next_task); /* wait for first timer tick, when execution will be transferred to do_task0 */ cpu_halt(); task_set_scheduler(null); kbd_set_onpress(null); k_printf("\nBye.\n"); } /***********************************************************/ void run_userspace(void) { char buf[100]; snprintf(buf, 100, "Current privilege level = %d\n", i386_current_privlevel()); size_t nbuf = strlen(buf); asm("int $0x80 \n" :: "a"(SYS_WRITE), "c"(STDOUT_FILENO), "d"((uint)buf), "b"(nbuf)); while (1); } extern void start_userspace(uint eip3, uint cs3, uint eflags, uint esp3, uint ss3); task_struct task3; void test_userspace(void) { /* init task */ task3.tss.eflags = x86_eflags(); // | eflags_iopl(PL_USER); task3.tss.cs = SEL_USER_CS; task3.tss.ds = task3.tss.es = task3.tss.fs = task3.tss.gs = SEL_USER_DS; task3.tss.ss = SEL_USER_DS; task3.tss.esp = (uint)task0_usr_stack + R3_STACK_SIZE - CONTEXT_SIZE - 0x20; task3.tss.eip = (uint)run_userspace; task3.tss.ss0 = SEL_KERN_DS; task3.tss.esp0 = (uint)task0_stack + R0_STACK_SIZE - CONTEXT_SIZE - 0x20; /* make a GDT task descriptor */ segment_descriptor taskdescr; segdescr_taskstate_init(taskdescr, (uint)&(task3.tss), PL_USER); segdescr_taskstate_busy(taskdescr, 0); index_t taskdescr_index = gdt_alloc_entry(taskdescr); segment_selector tss_sel; tss_sel.as.word = make_selector(taskdescr_index, 0, taskdescr.as.strct.dpl); kbd_set_onpress((kbd_event_f)key_press); uint efl = 0x00203202; test_eflags(); logmsgf("efl = 0x%x\n", efl); logmsgf("tss_sel = 0x%x\n", (uint)tss_sel.as.word); logmsgf("tssd = %x %x\n", taskdescr.as.ints[0], taskdescr.as.ints[1]); logmsgf("tssd.base = %x\n", taskdescr.as.strct.base_l + (taskdescr.as.strct.base_m << 16) + (taskdescr.as.strct.base_h << 24)); /* load TR and LDT */ i386_load_task_reg(tss_sel); //asm ("lldt %%ax \n\t"::"a"( SEL_DEF_LDT )); /* go userspace */ start_userspace( (uint)run_userspace, task3.tss.cs, //(uint)run_userspace, (uint)tss_sel.as.word, efl, task3.tss.esp, task3.tss.ss); }
static uint syscall_max = sizeof(syscall_vectors) / sizeof(*syscall_vectors); void syscall_handler(regs_t* registers) { if(registers->eax >= syscall_max) { kprintf("[#%d] Unknown syscall %x\n", task_current()->pid, registers->eax); return; } syscall_vectors[registers->eax](registers); } void sys_alloc_page(__attribute__((unused)) regs_t* registers) { uint page = alloc_page(); task_t* t = task_current(); uint d_ent = 0; uint* tbl = 0; uint dir; for(dir = 0x10000000 / 4096 / 1024; dir < 0xffffffff / 4096 / 1024; dir++) { if(t->page_directory[dir] & 1) d_ent = t->page_directory[dir] & 0xfffff000; else continue; } tbl = (uint*)d_ent; uint tbl_i; for(tbl_i = 0; tbl_i < 1024; tbl_i++) { if((tbl[tbl_i] & 1) == 0)