/* * void task_attach(task_t *, proc_t *) * * Overview * task_attach() is used to attach a process to a task; this operation is only * performed as a result of a fork() or settaskid() system call. The proc_t's * p_tasknext and p_taskprev fields will be set such that the proc_t is a * member of the doubly-linked list of proc_t's that make up the task. * * Return values * None. * * Caller's context * pidlock and p->p_lock must be held on entry. */ void task_attach(task_t *tk, proc_t *p) { proc_t *first, *prev; ASSERT(tk != NULL); ASSERT(p != NULL); ASSERT(MUTEX_HELD(&pidlock)); ASSERT(MUTEX_HELD(&p->p_lock)); if (tk->tk_memb_list == NULL) { p->p_tasknext = p; p->p_taskprev = p; } else { first = tk->tk_memb_list; prev = first->p_taskprev; first->p_taskprev = p; p->p_tasknext = first; p->p_taskprev = prev; prev->p_tasknext = p; } tk->tk_memb_list = p; task_hold(tk); p->p_task = tk; }
/** Kernel initialization thread. * * kinit takes care of higher level kernel * initialization (i.e. thread creation, * userspace initialization etc.). * * @param arg Not used. */ void kinit(void *arg) { thread_t *thread; /* * Detach kinit as nobody will call thread_join_timeout() on it. */ thread_detach(THREAD); interrupts_disable(); /* Start processing RCU callbacks. RCU is fully functional afterwards. */ rcu_kinit_init(); /* * Start processing work queue items. Some may have been queued during boot. */ workq_global_worker_init(); #ifdef CONFIG_SMP if (config.cpu_count > 1) { waitq_initialize(&ap_completion_wq); /* * Create the kmp thread and wait for its completion. * cpu1 through cpuN-1 will come up consecutively and * not mess together with kcpulb threads. * Just a beautification. */ thread = thread_create(kmp, NULL, TASK, THREAD_FLAG_UNCOUNTED, "kmp"); if (thread != NULL) { thread_wire(thread, &cpus[0]); thread_ready(thread); } else panic("Unable to create kmp thread."); thread_join(thread); thread_detach(thread); /* * For each CPU, create its load balancing thread. */ unsigned int i; for (i = 0; i < config.cpu_count; i++) { thread = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_UNCOUNTED, "kcpulb"); if (thread != NULL) { thread_wire(thread, &cpus[i]); thread_ready(thread); } else log(LF_OTHER, LVL_ERROR, "Unable to create kcpulb thread for cpu%u", i); } } #endif /* CONFIG_SMP */ /* * At this point SMP, if present, is configured. */ ARCH_OP(post_smp_init); /* Start thread computing system load */ thread = thread_create(kload, NULL, TASK, THREAD_FLAG_NONE, "kload"); if (thread != NULL) thread_ready(thread); else log(LF_OTHER, LVL_ERROR, "Unable to create kload thread"); #ifdef CONFIG_KCONSOLE if (stdin) { /* * Create kernel console. */ thread = thread_create(kconsole_thread, NULL, TASK, THREAD_FLAG_NONE, "kconsole"); if (thread != NULL) thread_ready(thread); else log(LF_OTHER, LVL_ERROR, "Unable to create kconsole thread"); } #endif /* CONFIG_KCONSOLE */ /* * Store the default stack size in sysinfo so that uspace can create * stack with this default size. */ sysinfo_set_item_val("default.stack_size", NULL, STACK_SIZE_USER); interrupts_enable(); /* * Create user tasks, load RAM disk images. */ size_t i; program_t programs[CONFIG_INIT_TASKS]; // FIXME: do not propagate arguments through sysinfo // but pass them directly to the tasks for (i = 0; i < init.cnt; i++) { const char *arguments = init.tasks[i].arguments; if (str_length(arguments) == 0) continue; if (str_length(init.tasks[i].name) == 0) continue; size_t arguments_size = str_size(arguments); void *arguments_copy = malloc(arguments_size, 0); if (arguments_copy == NULL) continue; memcpy(arguments_copy, arguments, arguments_size); char item_name[CONFIG_TASK_NAME_BUFLEN + 15]; snprintf(item_name, CONFIG_TASK_NAME_BUFLEN + 15, "init_args.%s", init.tasks[i].name); sysinfo_set_item_data(item_name, NULL, arguments_copy, arguments_size); } for (i = 0; i < init.cnt; i++) { if (init.tasks[i].paddr % FRAME_SIZE) { log(LF_OTHER, LVL_ERROR, "init[%zu]: Address is not frame aligned", i); programs[i].task = NULL; continue; } /* * Construct task name from the 'init:' prefix and the * name stored in the init structure (if any). */ char namebuf[TASK_NAME_BUFLEN]; const char *name = init.tasks[i].name; if (name[0] == 0) name = "<unknown>"; STATIC_ASSERT(TASK_NAME_BUFLEN >= INIT_PREFIX_LEN); str_cpy(namebuf, TASK_NAME_BUFLEN, INIT_PREFIX); str_cpy(namebuf + INIT_PREFIX_LEN, TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name); /* * Create virtual memory mappings for init task images. */ uintptr_t page = km_map(init.tasks[i].paddr, init.tasks[i].size, PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); ASSERT(page); int rc = program_create_from_image((void *) page, namebuf, &programs[i]); if (rc == 0) { if (programs[i].task != NULL) { /* * Set capabilities to init userspace tasks. */ cap_set(programs[i].task, CAP_CAP | CAP_MEM_MANAGER | CAP_IO_MANAGER | CAP_IRQ_REG); if (!ipc_phone_0) { ipc_phone_0 = &programs[i].task->answerbox; /* * Hold the first task so that the * ipc_phone_0 remains a valid pointer * even if the first task exits for * whatever reason. */ task_hold(programs[i].task); } } /* * If programs[i].task == NULL then it is * the program loader and it was registered * successfully. */ } else if (i == init.cnt - 1) { /* * Assume the last task is the RAM disk. */ init_rd((void *) init.tasks[i].paddr, init.tasks[i].size); } else log(LF_OTHER, LVL_ERROR, "init[%zu]: Init binary load failed " "(error %d, loader status %u)", i, rc, programs[i].loader_status); } /* * Run user tasks. */ for (i = 0; i < init.cnt; i++) { if (programs[i].task != NULL) program_ready(&programs[i]); } #ifdef CONFIG_KCONSOLE if (!stdin) { thread_sleep(10); printf("kinit: No stdin\nKernel alive: ."); unsigned int i = 0; while (true) { printf("\b%c", alive[i % ALIVE_CHARS]); thread_sleep(1); i++; } } #endif /* CONFIG_KCONSOLE */ }
/* * void task_init(void) * * Overview * task_init() initializes task-related hashes, caches, and the task id * space. Additionally, task_init() establishes p0 as a member of task0. * Called by main(). * * Return values * None. * * Caller's context * task_init() must be called prior to MP startup. */ void task_init(void) { proc_t *p = &p0; mod_hash_hndl_t hndl; rctl_set_t *set; rctl_alloc_gp_t *gp; rctl_entity_p_t e; /* * Initialize task_cache and taskid_space. */ task_cache = kmem_cache_create("task_cache", sizeof (task_t), 0, NULL, NULL, NULL, NULL, NULL, 0); taskid_space = id_space_create("taskid_space", 0, MAX_TASKID); /* * Initialize task hash table. */ task_hash = mod_hash_create_idhash("task_hash", task_hash_size, mod_hash_null_valdtor); /* * Initialize task-based rctls. */ rc_task_lwps = rctl_register("task.max-lwps", RCENTITY_TASK, RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX, &task_lwps_ops); rc_task_nprocs = rctl_register("task.max-processes", RCENTITY_TASK, RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_COUNT, INT_MAX, INT_MAX, &task_procs_ops); rc_task_cpu_time = rctl_register("task.max-cpu-time", RCENTITY_TASK, RCTL_GLOBAL_NOACTION | RCTL_GLOBAL_DENY_NEVER | RCTL_GLOBAL_CPU_TIME | RCTL_GLOBAL_INFINITE | RCTL_GLOBAL_UNOBSERVABLE | RCTL_GLOBAL_SECONDS, UINT64_MAX, UINT64_MAX, &task_cpu_time_ops); /* * Create task0 and place p0 in it as a member. */ task0p = kmem_cache_alloc(task_cache, KM_SLEEP); bzero(task0p, sizeof (task_t)); task0p->tk_tkid = id_alloc(taskid_space); task0p->tk_usage = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP); task0p->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP); task0p->tk_proj = project_hold_by_id(0, &zone0, PROJECT_HOLD_INSERT); task0p->tk_flags = TASK_NORMAL; task0p->tk_nlwps = p->p_lwpcnt; task0p->tk_nprocs = 1; task0p->tk_zone = global_zone; task0p->tk_commit_next = NULL; set = rctl_set_create(); gp = rctl_set_init_prealloc(RCENTITY_TASK); mutex_enter(&curproc->p_lock); e.rcep_p.task = task0p; e.rcep_t = RCENTITY_TASK; task0p->tk_rctls = rctl_set_init(RCENTITY_TASK, curproc, &e, set, gp); mutex_exit(&curproc->p_lock); rctl_prealloc_destroy(gp); (void) mod_hash_reserve(task_hash, &hndl); mutex_enter(&task_hash_lock); ASSERT(task_find(task0p->tk_tkid, GLOBAL_ZONEID) == NULL); if (mod_hash_insert_reserve(task_hash, (mod_hash_key_t)(uintptr_t)task0p->tk_tkid, (mod_hash_val_t *)task0p, hndl) != 0) { mod_hash_cancel(task_hash, &hndl); panic("unable to insert task %d(%p)", task0p->tk_tkid, (void *)task0p); } mutex_exit(&task_hash_lock); task0p->tk_memb_list = p; task0p->tk_nprocs_kstat = task_kstat_create(task0p, task0p->tk_zone); /* * Initialize task pointers for p0, including doubly linked list of task * members. */ p->p_task = task0p; p->p_taskprev = p->p_tasknext = p; task_hold(task0p); }
/* * task_t *task_create(projid_t, zone *) * * Overview * A process constructing a new task calls task_create() to construct and * preinitialize the task for the appropriate destination project. Only one * task, the primordial task0, is not created with task_create(). * * Return values * None. * * Caller's context * Caller's context should be safe for KM_SLEEP allocations. * The caller should appropriately bump the kpj_ntasks counter on the * project that contains this task. */ task_t * task_create(projid_t projid, zone_t *zone) { task_t *tk = kmem_cache_alloc(task_cache, KM_SLEEP); task_t *ancestor_tk; taskid_t tkid; task_usage_t *tu = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP); mod_hash_hndl_t hndl; rctl_set_t *set = rctl_set_create(); rctl_alloc_gp_t *gp; rctl_entity_p_t e; bzero(tk, sizeof (task_t)); tk->tk_tkid = tkid = id_alloc(taskid_space); tk->tk_nlwps = 0; tk->tk_nlwps_ctl = INT_MAX; tk->tk_nprocs = 0; tk->tk_nprocs_ctl = INT_MAX; tk->tk_usage = tu; tk->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP); tk->tk_proj = project_hold_by_id(projid, zone, PROJECT_HOLD_INSERT); tk->tk_flags = TASK_NORMAL; tk->tk_commit_next = NULL; /* * Copy ancestor task's resource controls. */ zone_task_hold(zone); mutex_enter(&curproc->p_lock); ancestor_tk = curproc->p_task; task_hold(ancestor_tk); tk->tk_zone = zone; mutex_exit(&curproc->p_lock); for (;;) { gp = rctl_set_dup_prealloc(ancestor_tk->tk_rctls); mutex_enter(&ancestor_tk->tk_rctls->rcs_lock); if (rctl_set_dup_ready(ancestor_tk->tk_rctls, gp)) break; mutex_exit(&ancestor_tk->tk_rctls->rcs_lock); rctl_prealloc_destroy(gp); } /* * At this point, curproc does not have the appropriate linkage * through the task to the project. So, rctl_set_dup should only * copy the rctls, and leave the callbacks for later. */ e.rcep_p.task = tk; e.rcep_t = RCENTITY_TASK; tk->tk_rctls = rctl_set_dup(ancestor_tk->tk_rctls, curproc, curproc, &e, set, gp, RCD_DUP); mutex_exit(&ancestor_tk->tk_rctls->rcs_lock); rctl_prealloc_destroy(gp); /* * Record the ancestor task's ID for use by extended accounting. */ tu->tu_anctaskid = ancestor_tk->tk_tkid; task_rele(ancestor_tk); /* * Put new task structure in the hash table. */ (void) mod_hash_reserve(task_hash, &hndl); mutex_enter(&task_hash_lock); ASSERT(task_find(tkid, zone->zone_id) == NULL); if (mod_hash_insert_reserve(task_hash, (mod_hash_key_t)(uintptr_t)tkid, (mod_hash_val_t *)tk, hndl) != 0) { mod_hash_cancel(task_hash, &hndl); panic("unable to insert task %d(%p)", tkid, (void *)tk); } mutex_exit(&task_hash_lock); tk->tk_nprocs_kstat = task_kstat_create(tk, zone); return (tk); }