void Scheduler_CreateAndScheduleNewTask(Scheduler * scheduler, KernelState * k_state, int priority, void (*code)( )) { int rtn = ERR_K_DEFAULT; if (scheduler->num_tasks >= scheduler->max_tasks) { assert(0,"out of tds"); rtn = ERR_K_OUT_OF_TD; }else if (!Queue_IsValidPriority(priority)) { assert(0,"invalid priority"); rtn = ERR_K_INVALID_PRIORITY; }else{ int new_task_id = scheduler->num_tasks; int parent_id = scheduler->current_task_descriptor->id; TD * td = &(scheduler->task_descriptors[new_task_id]); int stack_end = ((int)get_stack_base(new_task_id) - USER_TASK_STACK_SIZE) + 4; assertf(stack_end > (int)&_EndOfProgram, "Attempted to create a new task, but this task's stack space goes down to %x, but the kernel ends at %x. This means we the stack will overwrite the kernel. There are currently %d tasks.\n", stack_end, (int)&_EndOfProgram, scheduler->num_tasks); TD_Initialize(td, new_task_id, priority, parent_id, get_stack_base(new_task_id), code); scheduler->num_tasks += 1; scheduler->inited_td[new_task_id] = 1; scheduler->num_ready += 1; safely_add_task_to_priority_queue(&scheduler->task_queue, td, priority); rtn = td->id; } Scheduler_SaveCurrentTaskState(scheduler, k_state); scheduler->current_task_descriptor->return_value = rtn; Scheduler_ChangeTDState(scheduler, scheduler->current_task_descriptor, READY); Scheduler_ScheduleAndSetNextTaskState(scheduler, k_state); }
inline static void mark_threads(NewGC *gc, int owner) { GC_Thread_Info *work; Mark2_Proc thread_mark = gc->mark_table[btc_redirect_thread]; for(work = gc->thread_infos; work; work = work->next) { if (work->owner == owner) { if (((Scheme_Object *)work->thread)->type == scheme_thread_type) { /* thread */ if (((Scheme_Thread *)work->thread)->running) { thread_mark(work->thread, gc); if (work->thread == scheme_current_thread) { GC_mark_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL); } } } else { /* place */ #ifdef MZ_USE_PLACES /* add in the memory used by the place's GC */ intptr_t sz; Scheme_Place_Object *place_obj = ((Scheme_Place *)work->thread)->place_obj; if (place_obj) { mzrt_mutex_lock(place_obj->lock); sz = place_obj->memory_use; mzrt_mutex_unlock(place_obj->lock); account_memory(gc, owner, gcBYTES_TO_WORDS(sz), 0); } #endif } } } }
void Scheduler_InitAndSetKernelTask(Scheduler * scheduler, KernelState * k_state) { TD * task_descriptor = &(scheduler->task_descriptors[0]); /* Initialize so it does not point at garbage, will be set again later. */ scheduler->current_task_descriptor = task_descriptor; int task_priority = LOWEST; int task_id = 0; /* TODO: add define special case for partent of first task */ TD_Initialize(task_descriptor, task_id, task_priority, 99, get_stack_base(task_id), (void *)&KernelTask_Start); scheduler->num_ready += 1; scheduler->num_tasks++; safely_add_task_to_priority_queue(&scheduler->task_queue, task_descriptor, task_priority); Scheduler_ScheduleAndSetNextTaskState(scheduler, k_state); print_memory_status(); }
inline static void mark_threads(NewGC *gc, int owner) { GC_Thread_Info *work; Mark2_Proc thread_mark = gc->mark_table[btc_redirect_thread]; for(work = gc->thread_infos; work; work = work->next) if(work->owner == owner) { if (((Scheme_Thread *)work->thread)->running) { thread_mark(work->thread, gc); if (work->thread == scheme_current_thread) { GC_mark_variable_stack(GC_variable_stack, 0, get_stack_base(gc), NULL); } } } }
void validate_stack_value(TD * td){ int empty_stack_value = (int)get_stack_base(td->id); int full_stack_value = empty_stack_value - USER_TASK_STACK_SIZE; assertf( ((int)td->stack_pointer) <= empty_stack_value, "User task id %d has stack underflow. SP is %x, but shouldn't be more than %x.", td->id, td->stack_pointer, empty_stack_value ); assertf( ((int)td->stack_pointer) >= full_stack_value, "User task id %d has stack overflow. SP is %x, but shouldn't be less than %x.", td->id, td->stack_pointer, full_stack_value ); }