int thread_run_until(void (*func)(void *), void *arg, boot_state_t state, boot_state_sequence_t seq) { struct thread *current; struct thread *t; struct block_boot_state *bbs; current = current_thread(); if (!thread_can_yield(current)) { printk(BIOS_ERR, "thread_run() called from non-yielding context!\n"); return -1; } t = get_free_thread(); if (t == NULL) { printk(BIOS_ERR, "thread_run() No more threads!\n"); return -1; } bbs = thread_alloc_space(t, sizeof(*bbs)); bbs->state = state; bbs->seq = seq; prepare_thread(t, func, arg, call_wrapper_block_state, bbs); schedule(t); return 0; }
struct parasite_ctl *compel_prepare_noctx(int pid) { struct parasite_ctl *ctl = NULL; /* * Control block early setup. */ ctl = xzalloc(sizeof(*ctl)); if (!ctl) { pr_err("Parasite control block allocation failed (pid: %d)\n", pid); goto err; } ctl->tsock = -1; ctl->ictx.log_fd = -1; if (prepare_thread(pid, &ctl->orig)) goto err; ctl->rpid = pid; BUILD_BUG_ON(PARASITE_START_AREA_MIN < BUILTIN_SYSCALL_SIZE + MEMFD_FNAME_SZ); return ctl; err: xfree(ctl); return NULL; }
int thread_run(void (*func)(void *), void *arg) { struct thread *current; struct thread *t; current = current_thread(); if (!thread_can_yield(current)) { printk(BIOS_ERR, "thread_run() called from non-yielding context!\n"); return -1; } t = get_free_thread(); if (t == NULL) { printk(BIOS_ERR, "thread_run() No more threads!\n"); return -1; } prepare_thread(t, func, arg, call_wrapper_block_current, NULL); schedule(t); return 0; }
static void idle_thread_init(void) { struct thread *t; t = get_free_thread(); if (t == NULL) die("No threads available for idle thread!\n"); /* Queue idle thread to run once all other threads have yielded. */ prepare_thread(t, idle_thread, NULL, call_wrapper, NULL); push_runnable(t); /* Mark the currently executing thread to cooperate. */ thread_cooperate(); }
struct parasite_thread_ctl *compel_prepare_thread(struct parasite_ctl *ctl, int pid) { struct parasite_thread_ctl *tctl; tctl = xmalloc(sizeof(*tctl)); if (tctl) { if (prepare_thread(pid, &tctl->th)) { xfree(tctl); tctl = NULL; } else { tctl->tid = pid; tctl->ctl = ctl; } } return tctl; }
/** @brief Creates a new thread to run func(arg). * * Steps: * 1. Allocate a stack and a thread item(contain thread information)for * the new thread. * 2. Invoke the thread fork system call. * 3. Run the func(arg) in the child thread. * * @param func the function to be run by the child thread. * @param arg the arg fo the func. * @return returns zero on success, and a negative number on error. */ int thr_create(void *(*func)(void *), void * arg) { thread_t *new_thread; int ret; /* * Allocate a stack and a thread item. */ new_thread = prepare_thread(func, arg); if(new_thread == NULL) return ERROR; /* * Invoke the thread fork system call. */ /* Child thread */ if ((ret = thread_fork(GET_STACK(new_thread), (void *)new_thread)) == 0){ /* Run the child thread. */ do_thread(); } /* Parent thread */ else if(ret > 0){ /* * Thread start code */ new_thread->tid = ret; /* Make the child thread running */ make_thread_running(ret, new_thread); mutex_lock(&new_thread->thr_mutex); new_thread->status = RUNNING; mutex_unlock(&new_thread->thr_mutex); /* Return child thread tid */ return ret; } /* * Error happened when fork new thread , recover error . */ /* Recycle stack, new thread */ prepare_thread_rollback(new_thread); return ERROR; }
int dispatch_syscall(struct kernel_dispatch_info *disp_info) { int resched = 0; struct thread *t = NULL; struct kernel_dispatch_info *dup_disp_info = NULL; // Do the actual dispatch switch (disp_info->syscall.num) { // Internal case SYSCALL_KPUTS: dup_disp_info = prepare_thread(disp_info); t = create_thread(kernel_proc, (ulong)&kputs_worker_thread, (ulong)dup_disp_info, -1, 0, 0); assert(t); break; case SYSCALL_YIELD: break; // IO Ports case SYSCALL_IO_IN: io_in_worker(disp_info); break; case SYSCALL_IO_OUT: io_out_worker(disp_info); break; // IPC case SYSCALL_REG_MSG_HANDLER: //kprintf("syscall reg msg handler\n"); reg_msg_handler_worker(disp_info); break; case SYSCALL_UNREG_MSG_HANDLER: unreg_msg_handler_worker(disp_info); break; case SYSCALL_SEND: send_worker(disp_info); break; case SYSCALL_REPLY: reply_worker(disp_info); break; case SYSCALL_RECV: dup_disp_info = prepare_thread(disp_info); t = create_thread(kernel_proc, (ulong)&recv_worker_thread, (ulong)dup_disp_info, -1, 0, 0); assert(t); break; case SYSCALL_REQUEST: // kprintf("syscall request\n"); dup_disp_info = prepare_thread(disp_info); t = create_thread(kernel_proc, (ulong)&request_worker_thread, (ulong)dup_disp_info, -1, 0, 0); // kprintf("syscall done\n"); assert(t); break; case SYSCALL_RESPOND: //kprintf("syscall respond\n"); dup_disp_info = prepare_thread(disp_info); t = create_thread(kernel_proc, (ulong)&respond_worker_thread, (ulong)dup_disp_info, -1, 0, 0); assert(t); break; // KAPI case SYSCALL_REG_KAPI_SERVER: // kprintf("syscall reg kapi server\n"); reg_kapi_server_worker(disp_info); break; case SYSCALL_UNREG_KAPI_SERVER: unreg_kapi_server_worker(disp_info); break; // Invalid syscall default: break; } // Take care of reschedule flag if (t) { dup_disp_info->worker = t; run_thread(t); resched = 1; } return resched; }