void bootloader_app_start(uint32_t app_addr) { // If the applications CRC has been checked and passed, the magic number will be written and we // can start the application safely. uint32_t err_code = sd_softdevice_disable(); APP_ERROR_CHECK(err_code); interrupts_disable(); #if defined (S210_V3_STACK) err_code = sd_softdevice_forward_to_application(); #else err_code = sd_softdevice_vector_table_base_set(CODE_REGION_1_START); #endif APP_ERROR_CHECK(err_code); bootloader_util_app_start(CODE_REGION_1_START); }
void __attribute__((optimize("O0"))) panic(char* error, ...) { if(unlikely(!spinlock_get(&lock, 30))) { freeze(); } va_list va; va_start(va, error); interrupts_disable(); panic_printf("\nKernel Panic: "); vprintf(error, va); serial_vprintf(error, va); panic_printf("\n"); va_end(va); panic_printf("Last PIT tick: %d (rate %d, uptime: %d seconds)\n", (uint32_t)timer_tick, timer_rate, uptime()); task_t* task = scheduler_get_current(); if(task) { panic_printf("Running task: %d <%s>", task->pid, task->name); /* uint32_t task_offset = task->state->eip - task->entry; if(task_offset >= 0) { panic_printf("+%x at 0x%x", task_offset, task->state->eip); } */ panic_printf("\n"); } else { panic_printf("Running task: [No task running]\n"); } panic_printf("Paging context: %s\n\n", vmem_get_name(vmem_currentContext)); panic_printf("Call trace:\n"); intptr_t addresses[10]; int read = walk_stack(addresses, 10); for(int i = 0; i < read; i++) { panic_printf("#%-6d %s <%#x>\n", i, addr2name(addresses[i]), addresses[i]); } freeze(); }
void init_rtc (void) { struct tm tim; intr_install_handler (8, do_rtc); interrupts_disable (); /* --------------- */ /* x : update in progress bit * xxx : leave alone: 0b010 means 32768Hz * xxxx : 32768Hz >> (0bxxxx - 1), defaults to: 0b0110, 1024Hz * Defaults are good enough: */ /* rtc_write (0xa, 0b); */ /* x : PIE: periodic interrupt enable * xxx : alarm, update-ended interrupt, square wave * x : binary mode (1) or bcd (0) * x : 24 hour format (1) or 12 (0) * x : daylight savings enable */ rtc_write (0xb, rtc_read (0xb) | 0b1000110); interrupts_enable (); /* --------------- */ /* FIXME: It takes some time to have effect.. ¿? */ msleep (500); rtc_get_tm (&tim); /* The int handler is already running, this should overwrite it */ unixtime = mktime (&tim); /* TODO: zero pad this... */ kprintf ("System clock set to: %d-%d-%d %d:%d:%d (unix: %lld)\n", tim.tm_year, tim.tm_mon, tim.tm_mday, tim.tm_hour, tim.tm_min, tim.tm_sec, unixtime); return; }
// Add new task to schedule. void scheduler_add(task_t *task) { interrupts_disable(); // No task yet? if(currentTask == NULL) { currentTask = task; task->next = task; task->last = task; } else { task->next = currentTask->next; task->last = currentTask; currentTask->next = task; } interrupts_enable(); log(LOG_INFO, "scheduler: Registered new task with PID %d <%s>\n", task->pid, task->name); }
void *sched_bsp_idle_thread (void *notused) { interrupts_disable(); clockeventer_subsystem_init(); pit_timer_init(); lapic_bsp_pre_init(); pci_init(); rtc_init(); keyboard_init(); lapic_common_init(); #ifdef CONFIG_ACPICA acpica_sub_system_init (); pci_scan_devices(); #endif clockcounter_subsystem_init(); timerchain_subsystem_init(); real_wall_time_init(); tick_eventer_init(); lapic_bsp_post_init(); interrupts_enable(); thread_create_test(); lapic_ipi(1, 0, INTR_LAPIC_RESCHEDULE); cpu_heart_beat(this_cpu()); }
void nrfx_uart_uninit(nrfx_uart_t const * p_instance) { uart_control_block_t * p_cb = &m_cb[p_instance->drv_inst_idx]; nrf_uart_disable(p_instance->p_reg); if (p_cb->handler) { interrupts_disable(p_instance); } pins_to_default(p_instance); #if NRFX_CHECK(NRFX_PRS_ENABLED) nrfx_prs_release(p_instance->p_reg); #endif p_cb->state = NRFX_DRV_STATE_UNINITIALIZED; p_cb->handler = NULL; NRFX_LOG_INFO("Instance uninitialized: %d.", p_instance->drv_inst_idx); }
/* * pthread_spin_trylock - lock a spin lock object * * SEE pthread_spin_lock() for more infromation. */ int pthread_spin_trylock ( pthread_spinlock_t *lock ) { ipl_t flags; flags = interrupts_disable(); if (spinlock_trylock(&lock->lock)) { interrupts_restore(flags); return EBUSY; } else { lock->flags = flags; return OK; } }
void bootloader_abort(bl_end_t end_reason) { uint32_t app_length = m_bl_info_pointers.p_segment_app->length; if (m_transaction.transaction_id != 0) { app_length = m_transaction.length; } switch (end_reason) { case BL_END_SUCCESS: case BL_END_ERROR_TIMEOUT: case BL_END_FWID_VALID: if (app_is_valid((uint32_t*) m_bl_info_pointers.p_segment_app->start, app_length)) { interrupts_disable(); #ifdef DEBUG_LEDS NRF_GPIO->OUTCLR = (1 << 22); #endif sd_mbr_command_t com = {SD_MBR_COMMAND_INIT_SD, }; volatile uint32_t err_code = sd_mbr_command(&com); APP_ERROR_CHECK(err_code); err_code = sd_softdevice_vector_table_base_set(m_bl_info_pointers.p_segment_app->start); APP_ERROR_CHECK(err_code); #ifdef DEBUG_LEDS NRF_GPIO->OUTSET = (1 << 21); NRF_GPIO->OUTSET = (1 << 22); #endif bootloader_util_app_start(m_bl_info_pointers.p_segment_app->start); } break; case BL_END_ERROR_INVALID_PERSISTENT_STORAGE: APP_ERROR_CHECK_BOOL(false); default: NVIC_SystemReset(); break; } }
/**@brief Function for preparing the reset, disabling SoftDevice and jump to the bootloader. */ static void bootloader_start(void) { m_reset_prepare(); uint32_t err_code = sd_power_gpregret_set(BOOTLOADER_DFU_START); APP_ERROR_CHECK(err_code); err_code = sd_softdevice_disable(); APP_ERROR_CHECK(err_code); err_code = sd_softdevice_vector_table_base_set(NRF_UICR->BOOTLOADERADDR); APP_ERROR_CHECK(err_code); if (m_dm_handle_valid) { dfu_app_set_peer_data(); } NVIC_ClearPendingIRQ(SWI2_IRQn); interrupts_disable(); bootloader_util_app_start(NRF_UICR->BOOTLOADERADDR); }
uint32_t dfu_jump_to_bootloader(void) { if (NRF_UICR->BOOTLOADERADDR != 0xFFFFFFFF) { interrupts_disable(); #ifdef SOFTDEVICE_PRESENT sd_power_reset_reason_clr(0x0F000F); sd_power_gpregret_set(RBC_MESH_GPREGRET_CODE_FORCED_REBOOT); sd_nvic_SystemReset(); #else NRF_POWER->RESETREAS = 0x0F000F; /* erase reset-reason to avoid wrongful state-readout on reboot */ NRF_POWER->GPREGRET = RBC_MESH_GPREGRET_CODE_FORCED_REBOOT; NVIC_SystemReset(); //TODO: Wait for serial commands and flash? #endif return NRF_SUCCESS; /* unreachable */ } else { /* the UICR->BOOTLOADERADDR isn't set, and we have no way to find the bootloader-address. */ return NRF_ERROR_FORBIDDEN; } }
/* * Return current RTC time. Note that due to waiting for the update cycle to * complete, this call may take some time. */ static uint64_t rtc_gettimeofday(void) { struct bmk_clock_ymdhms dt; interrupts_disable(); /* * If RTC_UIP is down, we have at least 244us to obtain a * consistent reading before an update can occur. */ while (rtc_read(RTC_STATUS_A) & RTC_UIP) continue; dt.dt_sec = bcdtobin(rtc_read(RTC_SEC)); dt.dt_min = bcdtobin(rtc_read(RTC_MIN)); dt.dt_hour = bcdtobin(rtc_read(RTC_HOUR)); dt.dt_day = bcdtobin(rtc_read(RTC_DAY)); dt.dt_mon = bcdtobin(rtc_read(RTC_MONTH)); dt.dt_year = bcdtobin(rtc_read(RTC_YEAR)) + 2000; interrupts_enable(); return clock_ymdhms_to_secs(&dt) * NSEC_PER_SEC; }
/* * Adds length bytes from buffer to the transmit circular buffer associated * with the US1 interface. This function relies on the fact that it is the * only function to *add* bytes to the buffer. Which this function is in * progress, bytes may be removed, but not added. This function does not * update the size field of the buffer until after all bytes have been added * to minimize the number of times interrupts must be disabled. */ us1_error_t us1_send_buffer(uint8_t buffer[], uint16_t length) { us1_error_t ret_val = US1_SUCCESS; uint16_t bytes_added = 0; if((tx_circular_buffer.size + length) < tx_circular_buffer.capacity) { while(bytes_added != length) { cb_add_byte(&tx_circular_buffer, buffer[bytes_added++]); } // Update the number of bytes in the circular buffer interrupts_disable(); tx_circular_buffer.size += length; us1_kickstart_tx(); interrupts_enable(); } else { ret_val = US1_NOT_ENOUGH_ROOM; } return ret_val; }
/**@brief Function for preparing the reset, disabling SoftDevice and jump to the bootloader. */ void bootloader_start(void) { m_reset_prepare(); uint32_t err_code = sd_power_gpregret_set(BOOTLOADER_DFU_START); APP_ERROR_CHECK(err_code); err_code = sd_softdevice_disable(); APP_ERROR_CHECK(err_code); err_code = sd_softdevice_vector_table_base_set(NRF_UICR->BOOTLOADERADDR); APP_ERROR_CHECK(err_code); // Commenting out the following block because it brings in unwanted dependencies from bonding. // TODO: discuss this with Nordic. // if (m_dm_handle_valid) // { // dfu_app_set_peer_data(); // } NVIC_ClearPendingIRQ(SWI2_IRQn); interrupts_disable(); bootloader_util_app_start(NRF_UICR->BOOTLOADERADDR); }
int solo5_poll(uint64_t until_nsecs) { int rc = 0; /* * cpu_block() as currently implemented will only poll for the maximum time * the PIT can be run in "one shot" mode. Loop until either I/O is possible * or the desired time has been reached. */ interrupts_disable(); do { if (virtio_net_pkt_poll()) { rc = 1; break; } cpu_block(until_nsecs); } while (solo5_clock_monotonic() < until_nsecs); if (!rc) rc = virtio_net_pkt_poll(); interrupts_enable(); return rc; }
/**@brief Function for preparing the reset, disabling SoftDevice, and jumping to the bootloader. * * @param[in] conn_handle Connection handle for peer requesting to enter DFU mode. */ static void bootloader_start(uint16_t conn_handle) { uint32_t err_code; uint16_t sys_serv_attr_len = sizeof(m_peer_data.sys_serv_attr); err_code = sd_ble_gatts_sys_attr_get(conn_handle, m_peer_data.sys_serv_attr, &sys_serv_attr_len, BLE_GATTS_SYS_ATTR_FLAG_SYS_SRVCS); if (err_code != NRF_SUCCESS) { // Any error at this stage means the system service attributes could not be fetched. // This means the service changed indication cannot be sent in DFU mode, but connection // is still possible to establish. } m_reset_prepare(); err_code = sd_power_gpregret_clr(0xFF); APP_ERROR_CHECK(err_code); err_code = sd_power_gpregret_set(BOOTLOADER_DFU_START); APP_ERROR_CHECK(err_code); err_code = sd_softdevice_disable(); APP_ERROR_CHECK(err_code); err_code = sd_softdevice_vector_table_base_set(NRF_UICR->NRFFW[0]); APP_ERROR_CHECK(err_code); dfu_app_peer_data_set(conn_handle); NVIC_ClearPendingIRQ(SWI2_IRQn); interrupts_disable(); bootloader_util_app_start(NRF_UICR->NRFFW[0]); }
/* Freezes the kernel (without possibility to unfreeze). * Mainly used for debugging when developing and in panic(_raw). */ void freeze(void) { interrupts_disable(); asm volatile("hlt;"); }
/** Allocate frames of physical memory. * * @param count Number of continuous frames to allocate. * @param flags Flags for host zone selection and address processing. * @param constraint Indication of physical address bits that cannot be * set in the address of the first allocated frame. * @param pzone Preferred zone. * * @return Physical address of the allocated frame. * */ uintptr_t frame_alloc_generic(size_t count, frame_flags_t flags, uintptr_t constraint, size_t *pzone) { ASSERT(count > 0); size_t hint = pzone ? (*pzone) : 0; pfn_t frame_constraint = ADDR2PFN(constraint); /* * If not told otherwise, we must first reserve the memory. */ if (!(flags & FRAME_NO_RESERVE)) reserve_force_alloc(count); loop: irq_spinlock_lock(&zones.lock, true); /* * First, find suitable frame zone. */ size_t znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), frame_constraint, hint); /* * If no memory, reclaim some slab memory, * if it does not help, reclaim all. */ if ((znum == (size_t) -1) && (!(flags & FRAME_NO_RECLAIM))) { irq_spinlock_unlock(&zones.lock, true); size_t freed = slab_reclaim(0); irq_spinlock_lock(&zones.lock, true); if (freed > 0) znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), frame_constraint, hint); if (znum == (size_t) -1) { irq_spinlock_unlock(&zones.lock, true); freed = slab_reclaim(SLAB_RECLAIM_ALL); irq_spinlock_lock(&zones.lock, true); if (freed > 0) znum = find_free_zone(count, FRAME_TO_ZONE_FLAGS(flags), frame_constraint, hint); } } if (znum == (size_t) -1) { if (flags & FRAME_ATOMIC) { irq_spinlock_unlock(&zones.lock, true); if (!(flags & FRAME_NO_RESERVE)) reserve_free(count); return 0; } size_t avail = frame_total_free_get_internal(); irq_spinlock_unlock(&zones.lock, true); if (!THREAD) panic("Cannot wait for %zu frames to become available " "(%zu available).", count, avail); /* * Sleep until some frames are available again. */ #ifdef CONFIG_DEBUG log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " waiting for %zu frames " "%zu available.", THREAD->tid, count, avail); #endif /* * Since the mem_avail_mtx is an active mutex, we need to * disable interrupts to prevent deadlock with TLB shootdown. */ ipl_t ipl = interrupts_disable(); mutex_lock(&mem_avail_mtx); if (mem_avail_req > 0) mem_avail_req = min(mem_avail_req, count); else mem_avail_req = count; size_t gen = mem_avail_gen; while (gen == mem_avail_gen) condvar_wait(&mem_avail_cv, &mem_avail_mtx); mutex_unlock(&mem_avail_mtx); interrupts_restore(ipl); #ifdef CONFIG_DEBUG log(LF_OTHER, LVL_DEBUG, "Thread %" PRIu64 " woken up.", THREAD->tid); #endif goto loop; } pfn_t pfn = zone_frame_alloc(&zones.info[znum], count, frame_constraint) + zones.info[znum].base; irq_spinlock_unlock(&zones.lock, true); if (pzone) *pzone = znum; return PFN2ADDR(pfn); }
/* Sets the interrupts level and returns the previous one. */ enum interrupts_level interrupts_set_level(enum interrupts_level level) { return level == INTERRUPTS_ON ? interrupts_enable() : interrupts_disable(); }
void panic(const char* msg) { terminal_printf("KERNEL PANIC: %s", msg); interrupts_disable(); hang(); };
/** Kernel initialization thread. * * kinit takes care of higher level kernel * initialization (i.e. thread creation, * userspace initialization etc.). * * @param arg Not used. */ void kinit(void *arg) { thread_t *thread; /* * Detach kinit as nobody will call thread_join_timeout() on it. */ thread_detach(THREAD); interrupts_disable(); #ifdef CONFIG_SMP if (config.cpu_count > 1) { waitq_initialize(&ap_completion_wq); /* * Create the kmp thread and wait for its completion. * cpu1 through cpuN-1 will come up consecutively and * not mess together with kcpulb threads. * Just a beautification. */ thread = thread_create(kmp, NULL, TASK, THREAD_FLAG_UNCOUNTED, "kmp"); if (thread != NULL) { thread_wire(thread, &cpus[0]); thread_ready(thread); } else panic("Unable to create kmp thread."); thread_join(thread); thread_detach(thread); /* * For each CPU, create its load balancing thread. */ unsigned int i; for (i = 0; i < config.cpu_count; i++) { thread = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_UNCOUNTED, "kcpulb"); if (thread != NULL) { thread_wire(thread, &cpus[i]); thread_ready(thread); } else printf("Unable to create kcpulb thread for cpu%u\n", i); } } #endif /* CONFIG_SMP */ /* * At this point SMP, if present, is configured. */ arch_post_smp_init(); /* Start thread computing system load */ thread = thread_create(kload, NULL, TASK, THREAD_FLAG_NONE, "kload"); if (thread != NULL) thread_ready(thread); else printf("Unable to create kload thread\n"); #ifdef CONFIG_KCONSOLE if (stdin) { /* * Create kernel console. */ thread = thread_create(kconsole_thread, NULL, TASK, THREAD_FLAG_NONE, "kconsole"); if (thread != NULL) thread_ready(thread); else printf("Unable to create kconsole thread\n"); } #endif /* CONFIG_KCONSOLE */ interrupts_enable(); /* * Create user tasks, load RAM disk images. */ size_t i; program_t programs[CONFIG_INIT_TASKS]; for (i = 0; i < init.cnt; i++) { if (init.tasks[i].paddr % FRAME_SIZE) { printf("init[%zu]: Address is not frame aligned\n", i); programs[i].task = NULL; continue; } /* * Construct task name from the 'init:' prefix and the * name stored in the init structure (if any). */ char namebuf[TASK_NAME_BUFLEN]; const char *name = init.tasks[i].name; if (name[0] == 0) name = "<unknown>"; ASSERT(TASK_NAME_BUFLEN >= INIT_PREFIX_LEN); str_cpy(namebuf, TASK_NAME_BUFLEN, INIT_PREFIX); str_cpy(namebuf + INIT_PREFIX_LEN, TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name); /* * Create virtual memory mappings for init task images. */ uintptr_t page = km_map(init.tasks[i].paddr, init.tasks[i].size, PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); ASSERT(page); int rc = program_create_from_image((void *) page, namebuf, &programs[i]); if (rc == 0) { if (programs[i].task != NULL) { /* * Set capabilities to init userspace tasks. */ cap_set(programs[i].task, CAP_CAP | CAP_MEM_MANAGER | CAP_IO_MANAGER | CAP_IRQ_REG); if (!ipc_phone_0) ipc_phone_0 = &programs[i].task->answerbox; } /* * If programs[i].task == NULL then it is * the program loader and it was registered * successfully. */ } else if (i == init.cnt - 1) { /* * Assume the last task is the RAM disk. */ init_rd((void *) init.tasks[i].paddr, init.tasks[i].size); } else printf("init[%zu]: Init binary load failed " "(error %d, loader status %u)\n", i, rc, programs[i].loader_status); } /* * Run user tasks. */ for (i = 0; i < init.cnt; i++) { if (programs[i].task != NULL) program_ready(&programs[i]); } #ifdef CONFIG_KCONSOLE if (!stdin) { thread_sleep(10); printf("kinit: No stdin\nKernel alive: ."); unsigned int i = 0; while (true) { printf("\b%c", alive[i % ALIVE_CHARS]); thread_sleep(1); i++; } } #endif /* CONFIG_KCONSOLE */ }
#include <stdio.h> #include "timer.h" #include "synch.h" #include "timer_demo.h" #include "thread.h" static struct lock lock_task; static struct lock lock_task_busy; static struct lock lock_task_nonbusy; static void task_busy_sleeper(void *param UNUSED) { int *delay = param; enum interrupts_level old_level = interrupts_disable(); printf("\nI'm the busy sleeper and I will fall asleep for %d microseconds\n", *delay); interrupts_set_level(old_level); timer_msleep(*delay); old_level = interrupts_disable(); printf("\nI'm the busy sleeper and now I wake up after %d microseconds\n", *delay); interrupts_set_level(old_level); } static void task_nonbusy_sleeper(void *param UNUSED) { int *delay = param; enum interrupts_level old_level = interrupts_disable(); printf("\nI'm the non busy sleeper and I will fall asleep for %d microseconds\n", *delay); interrupts_set_level(old_level);
/** Kernel initialization thread. * * kinit takes care of higher level kernel * initialization (i.e. thread creation, * userspace initialization etc.). * * @param arg Not used. */ void kinit(void *arg) { thread_t *thread; /* * Detach kinit as nobody will call thread_join_timeout() on it. */ thread_detach(THREAD); interrupts_disable(); /* Start processing RCU callbacks. RCU is fully functional afterwards. */ rcu_kinit_init(); /* * Start processing work queue items. Some may have been queued during boot. */ workq_global_worker_init(); #ifdef CONFIG_SMP if (config.cpu_count > 1) { waitq_initialize(&ap_completion_wq); /* * Create the kmp thread and wait for its completion. * cpu1 through cpuN-1 will come up consecutively and * not mess together with kcpulb threads. * Just a beautification. */ thread = thread_create(kmp, NULL, TASK, THREAD_FLAG_UNCOUNTED, "kmp"); if (thread != NULL) { thread_wire(thread, &cpus[0]); thread_ready(thread); } else panic("Unable to create kmp thread."); thread_join(thread); thread_detach(thread); /* * For each CPU, create its load balancing thread. */ unsigned int i; for (i = 0; i < config.cpu_count; i++) { thread = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_UNCOUNTED, "kcpulb"); if (thread != NULL) { thread_wire(thread, &cpus[i]); thread_ready(thread); } else log(LF_OTHER, LVL_ERROR, "Unable to create kcpulb thread for cpu%u", i); } } #endif /* CONFIG_SMP */ /* * At this point SMP, if present, is configured. */ ARCH_OP(post_smp_init); /* Start thread computing system load */ thread = thread_create(kload, NULL, TASK, THREAD_FLAG_NONE, "kload"); if (thread != NULL) thread_ready(thread); else log(LF_OTHER, LVL_ERROR, "Unable to create kload thread"); #ifdef CONFIG_KCONSOLE if (stdin) { /* * Create kernel console. */ thread = thread_create(kconsole_thread, NULL, TASK, THREAD_FLAG_NONE, "kconsole"); if (thread != NULL) thread_ready(thread); else log(LF_OTHER, LVL_ERROR, "Unable to create kconsole thread"); } #endif /* CONFIG_KCONSOLE */ /* * Store the default stack size in sysinfo so that uspace can create * stack with this default size. */ sysinfo_set_item_val("default.stack_size", NULL, STACK_SIZE_USER); interrupts_enable(); /* * Create user tasks, load RAM disk images. */ size_t i; program_t programs[CONFIG_INIT_TASKS]; // FIXME: do not propagate arguments through sysinfo // but pass them directly to the tasks for (i = 0; i < init.cnt; i++) { const char *arguments = init.tasks[i].arguments; if (str_length(arguments) == 0) continue; if (str_length(init.tasks[i].name) == 0) continue; size_t arguments_size = str_size(arguments); void *arguments_copy = malloc(arguments_size, 0); if (arguments_copy == NULL) continue; memcpy(arguments_copy, arguments, arguments_size); char item_name[CONFIG_TASK_NAME_BUFLEN + 15]; snprintf(item_name, CONFIG_TASK_NAME_BUFLEN + 15, "init_args.%s", init.tasks[i].name); sysinfo_set_item_data(item_name, NULL, arguments_copy, arguments_size); } for (i = 0; i < init.cnt; i++) { if (init.tasks[i].paddr % FRAME_SIZE) { log(LF_OTHER, LVL_ERROR, "init[%zu]: Address is not frame aligned", i); programs[i].task = NULL; continue; } /* * Construct task name from the 'init:' prefix and the * name stored in the init structure (if any). */ char namebuf[TASK_NAME_BUFLEN]; const char *name = init.tasks[i].name; if (name[0] == 0) name = "<unknown>"; STATIC_ASSERT(TASK_NAME_BUFLEN >= INIT_PREFIX_LEN); str_cpy(namebuf, TASK_NAME_BUFLEN, INIT_PREFIX); str_cpy(namebuf + INIT_PREFIX_LEN, TASK_NAME_BUFLEN - INIT_PREFIX_LEN, name); /* * Create virtual memory mappings for init task images. */ uintptr_t page = km_map(init.tasks[i].paddr, init.tasks[i].size, PAGE_READ | PAGE_WRITE | PAGE_CACHEABLE); ASSERT(page); int rc = program_create_from_image((void *) page, namebuf, &programs[i]); if (rc == 0) { if (programs[i].task != NULL) { /* * Set capabilities to init userspace tasks. */ cap_set(programs[i].task, CAP_CAP | CAP_MEM_MANAGER | CAP_IO_MANAGER | CAP_IRQ_REG); if (!ipc_phone_0) { ipc_phone_0 = &programs[i].task->answerbox; /* * Hold the first task so that the * ipc_phone_0 remains a valid pointer * even if the first task exits for * whatever reason. */ task_hold(programs[i].task); } } /* * If programs[i].task == NULL then it is * the program loader and it was registered * successfully. */ } else if (i == init.cnt - 1) { /* * Assume the last task is the RAM disk. */ init_rd((void *) init.tasks[i].paddr, init.tasks[i].size); } else log(LF_OTHER, LVL_ERROR, "init[%zu]: Init binary load failed " "(error %d, loader status %u)", i, rc, programs[i].loader_status); } /* * Run user tasks. */ for (i = 0; i < init.cnt; i++) { if (programs[i].task != NULL) program_ready(&programs[i]); } #ifdef CONFIG_KCONSOLE if (!stdin) { thread_sleep(10); printf("kinit: No stdin\nKernel alive: ."); unsigned int i = 0; while (true) { printf("\b%c", alive[i % ALIVE_CHARS]); thread_sleep(1); i++; } } #endif /* CONFIG_KCONSOLE */ }