void rust_task::new_stack(size_t requested_sz) { LOG(this, mem, "creating new stack for task %" PRIxPTR, this); if (stk) { ::check_stack_canary(stk); } // The minimum stack size, in bytes, of a Rust stack, excluding red zone size_t min_sz = thread->min_stack_size; // Try to reuse an existing stack segment while (stk != NULL && stk->next != NULL) { size_t next_sz = user_stack_size(stk->next); if (min_sz <= next_sz && requested_sz <= next_sz) { LOG(this, mem, "reusing existing stack"); stk = stk->next; return; } else { LOG(this, mem, "existing stack is not big enough"); stk_seg *new_next = stk->next->next; free_stack(stk->next); stk->next = new_next; if (new_next) { new_next->prev = stk; } } } // The size of the current stack segment, excluding red zone size_t current_sz = 0; if (stk != NULL) { current_sz = user_stack_size(stk); } // The calculated size of the new stack, excluding red zone size_t rust_stk_sz = get_next_stack_size(min_sz, current_sz, requested_sz); if (total_stack_sz + rust_stk_sz > thread->env->max_stack_size) { LOG_ERR(this, task, "task %" PRIxPTR " ran out of stack", this); fail(); } size_t sz = rust_stk_sz + RED_ZONE_SIZE; stk_seg *new_stk = create_stack(&local_region, sz); LOGPTR(thread, "new stk", (uintptr_t)new_stk); new_stk->task = this; new_stk->next = NULL; new_stk->prev = stk; if (stk) { stk->next = new_stk; } LOGPTR(thread, "stk end", new_stk->end); stk = new_stk; total_stack_sz += user_stack_size(new_stk); }
rust_sched_loop::rust_sched_loop(rust_scheduler *sched, int id, bool killed) : _log(this), id(id), should_exit(false), cached_c_stack(NULL), extra_c_stack(NULL), cached_big_stack(NULL), extra_big_stack(NULL), dead_task(NULL), killed(killed), pump_signal(NULL), kernel(sched->kernel), sched(sched), log_lvl(log_debug), min_stack_size(kernel->env->min_stack_size), local_region(false, kernel->env->detailed_leaks, kernel->env->poison_on_free), // FIXME #2891: calculate a per-scheduler name. name("main") { LOGPTR(this, "new dom", (uintptr_t)this); rng_init(&rng, kernel->env->rust_seed, NULL, 0); if (!tls_initialized) init_tls(); }
rust_scheduler::rust_scheduler(rust_kernel *kernel, rust_message_queue *message_queue, rust_srv *srv, const char *name) : interrupt_flag(0), _log(srv, this), log_lvl(log_note), srv(srv), name(name), newborn_tasks(this, "newborn"), running_tasks(this, "running"), blocked_tasks(this, "blocked"), dead_tasks(this, "dead"), cache(this), root_task(NULL), curr_task(NULL), rval(0), kernel(kernel), message_queue(message_queue) { LOGPTR(this, "new dom", (uintptr_t)this); isaac_init(this, &rctx); #ifndef __WIN32__ pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 1024 * 1024); pthread_attr_setdetachstate(&attr, true); #endif root_task = create_task(NULL, name); }
rust_scheduler::rust_scheduler(rust_kernel *kernel, rust_srv *srv, int id) : ref_count(1), interrupt_flag(0), _log(srv, this), log_lvl(log_debug), srv(srv), // TODO: calculate a per scheduler name. name("main"), newborn_tasks(this, "newborn"), running_tasks(this, "running"), blocked_tasks(this, "blocked"), dead_tasks(this, "dead"), cache(this), kernel(kernel), id(id), min_stack_size(kernel->env->min_stack_size), env(kernel->env) { LOGPTR(this, "new dom", (uintptr_t)this); isaac_init(this, &rctx); #ifndef __WIN32__ pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, 1024 * 1024); pthread_attr_setdetachstate(&attr, true); #endif if (!tls_initialized) init_tls(); }
rust_task::rust_task(rust_scheduler *sched, rust_task_list *state, rust_task *spawner, const char *name) : ref_count(1), stk(NULL), runtime_sp(0), rust_sp(0), gc_alloc_chain(0), sched(sched), cache(NULL), kernel(sched->kernel), name(name), state(state), cond(NULL), cond_name("none"), supervisor(spawner), list_index(-1), rendezvous_ptr(0), running_on(-1), pinned_on(-1), local_region(&sched->srv->local_region), _on_wakeup(NULL), failed(false), propagate_failure(true) { LOGPTR(sched, "new task", (uintptr_t)this); DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this); stk = new_stk(sched, this, 0); rust_sp = stk->limit; }
static void del_stk(rust_task *task, stk_seg *stk) { VALGRIND_STACK_DEREGISTER(stk->valgrind_id); LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk); task->free(stk); }
void rust_task::start(uintptr_t spawnee_fn, uintptr_t args) { LOGPTR(sched, "from spawnee", spawnee_fn); I(sched, stk->data != NULL); char *sp = (char *)rust_sp; sp -= sizeof(spawn_args); spawn_args *a = (spawn_args *)sp; a->task = this; a->a3 = 0; a->a4 = args; void **f = (void **)&a->f; *f = (void *)spawnee_fn; ctx.call((void *)task_start_wrapper, a, sp); yield_timer.reset_us(0); transition(&sched->newborn_tasks, &sched->running_tasks); sched->lock.signal(); }
static stk_seg* new_stk(rust_scheduler *sched, rust_task *task, size_t minsz) { size_t min_stk_bytes = get_min_stk_size(sched->min_stack_size); if (minsz < min_stk_bytes) minsz = min_stk_bytes; size_t sz = sizeof(stk_seg) + minsz; stk_seg *stk = (stk_seg *)task->malloc(sz, "stack"); LOGPTR(task->sched, "new stk", (uintptr_t)stk); memset(stk, 0, sizeof(stk_seg)); stk->limit = (uintptr_t) &stk->data[minsz]; LOGPTR(task->sched, "stk limit", stk->limit); stk->valgrind_id = VALGRIND_STACK_REGISTER(&stk->data[0], &stk->data[minsz]); return stk; }
// Tasks rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state, rust_task *spawner, const char *name, size_t init_stack_sz) : ref_count(1), id(0), notify_enabled(false), stk(NULL), runtime_sp(0), sched(sched_loop->sched), sched_loop(sched_loop), kernel(sched_loop->kernel), name(name), list_index(-1), rendezvous_ptr(0), local_region(&sched_loop->local_region), boxed(sched_loop->kernel->env, &local_region), unwinding(false), propagate_failure(true), cc_counter(0), total_stack_sz(0), task_local_data(NULL), task_local_data_cleanup(NULL), state(state), cond(NULL), cond_name("none"), event_reject(false), event(NULL), killed(false), reentered_rust_stack(false), disallow_kill(0), c_stack(NULL), next_c_sp(0), next_rust_sp(0), supervisor(spawner) { LOGPTR(sched_loop, "new task", (uintptr_t)this); DLOG(sched_loop, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this); new_stack(init_stack_sz); if (supervisor) { supervisor->ref(); } }
// Tasks rust_task::rust_task(rust_task_thread *thread, rust_task_state state, rust_task *spawner, const char *name, size_t init_stack_sz) : ref_count(1), id(0), notify_enabled(false), stk(NULL), runtime_sp(0), sched(thread->sched), thread(thread), kernel(thread->kernel), name(name), list_index(-1), rendezvous_ptr(0), local_region(&thread->srv->local_region), boxed(&local_region), unwinding(false), propagate_failure(true), cc_counter(0), total_stack_sz(0), state(state), cond(NULL), cond_name("none"), killed(false), reentered_rust_stack(false), c_stack(NULL), next_c_sp(0), next_rust_sp(0), supervisor(spawner) { LOGPTR(thread, "new task", (uintptr_t)this); DLOG(thread, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this); new_stack(init_stack_sz); if (supervisor) { supervisor->ref(); } }
// Tasks rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state, const char *name, size_t init_stack_sz) : ref_count(1), id(0), stk(NULL), runtime_sp(0), sched(sched_loop->sched), sched_loop(sched_loop), kernel(sched_loop->kernel), name(name), list_index(-1), boxed(sched_loop->kernel->env, &local_region), local_region(&sched_loop->local_region), unwinding(false), total_stack_sz(0), task_local_data(NULL), task_local_data_cleanup(NULL), state(state), cond(NULL), cond_name("none"), event_reject(false), event(NULL), killed(false), reentered_rust_stack(false), disallow_kill(0), disallow_yield(0), c_stack(NULL), next_c_sp(0), next_rust_sp(0), big_stack(NULL) { LOGPTR(sched_loop, "new task", (uintptr_t)this); DLOG(sched_loop, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this); new_stack(init_stack_sz); }
void rust_task::new_stack(size_t requested_sz) { LOG(this, mem, "creating new stack for task %" PRIxPTR, this); if (stk) { ::check_stack_canary(stk); } // The minimum stack size, in bytes, of a Rust stack, excluding red zone size_t min_sz = sched_loop->min_stack_size; // Try to reuse an existing stack segment while (stk != NULL && stk->next != NULL) { size_t next_sz = user_stack_size(stk->next); if (min_sz <= next_sz && requested_sz <= next_sz) { LOG(this, mem, "reusing existing stack"); stk = stk->next; return; } else { LOG(this, mem, "existing stack is not big enough"); stk_seg *new_next = stk->next->next; free_stack(stk->next); stk->next = new_next; if (new_next) { new_next->prev = stk; } } } // The size of the current stack segment, excluding red zone size_t current_sz = 0; if (stk != NULL) { current_sz = user_stack_size(stk); } // The calculated size of the new stack, excluding red zone size_t rust_stk_sz = get_next_stack_size(min_sz, current_sz, requested_sz); size_t max_stack = kernel->env->max_stack_size; size_t used_stack = total_stack_sz + rust_stk_sz; // Don't allow stacks to grow forever. During unwinding we have to allow // for more stack than normal in order to allow destructors room to run, // arbitrarily selected as 2x the maximum stack size. if (!unwinding && used_stack > max_stack) { LOG_ERR(this, task, "task %" PRIxPTR " ran out of stack", this); abort(); } else if (unwinding && used_stack > max_stack * 2) { LOG_ERR(this, task, "task %" PRIxPTR " ran out of stack during unwinding", this); abort(); } size_t sz = rust_stk_sz + RED_ZONE_SIZE; stk_seg *new_stk = create_stack(&local_region, sz); LOGPTR(sched_loop, "new stk", (uintptr_t)new_stk); new_stk->task = this; new_stk->next = NULL; new_stk->prev = stk; if (stk) { stk->next = new_stk; } LOGPTR(sched_loop, "stk end", new_stk->end); stk = new_stk; total_stack_sz += user_stack_size(new_stk); }
void rust_task::free_stack(stk_seg *stk) { LOGPTR(sched_loop, "freeing stk segment", (uintptr_t)stk); total_stack_sz -= user_stack_size(stk); destroy_stack(&local_region, stk); }