static status_t vmi_init_private( vmi_instance_t *vmi, uint32_t flags, uint64_t id, const char *name, vmi_config_t config) { uint32_t access_mode = flags & 0x0000FFFF; uint32_t init_mode = flags & 0x00FF0000; uint32_t config_mode = flags & 0xFF000000; status_t status = VMI_FAILURE; /* allocate memory for instance structure */ *vmi = (vmi_instance_t) safe_malloc(sizeof(struct vmi_instance)); memset(*vmi, 0, sizeof(struct vmi_instance)); /* initialize instance struct to default values */ dbprint(VMI_DEBUG_CORE, "LibVMI Version 0.11.0\n"); //TODO change this with each release /* save the flags and init mode */ (*vmi)->flags = flags; (*vmi)->init_mode = init_mode; (*vmi)->config_mode = config_mode; /* the config hash table is set up later based on mode */ (*vmi)->config = NULL; /* set page mode to unknown */ (*vmi)->page_mode = VMI_PM_UNKNOWN; /* setup the caches */ pid_cache_init(*vmi); sym_cache_init(*vmi); rva_cache_init(*vmi); v2p_cache_init(*vmi); if ( init_mode & VMI_INIT_SHM_SNAPSHOT ) { #if ENABLE_SHM_SNAPSHOT == 1 v2m_cache_init(*vmi); #else errprint("LibVMI wasn't compiled with SHM support!\n"); status = VMI_FAILURE; goto error_exit; #endif } /* connecting to xen, kvm, file, etc */ if (VMI_FAILURE == set_driver_type(*vmi, access_mode, id, name)) { goto error_exit; } /* driver-specific initilization */ if (VMI_FAILURE == driver_init(*vmi)) { goto error_exit; } dbprint(VMI_DEBUG_CORE, "--completed driver init.\n"); /* resolve the id and name */ if (VMI_FAILURE == set_id_and_name(*vmi, access_mode, id, name)) { goto error_exit; } /* init vmi for specific file/domain through the driver */ if (VMI_FAILURE == driver_init_vmi(*vmi)) { goto error_exit; } /* setup the page offset size */ if (VMI_FAILURE == init_page_offset(*vmi)) { goto error_exit; } /* get the memory size */ if (driver_get_memsize(*vmi, &(*vmi)->allocated_ram_size, &(*vmi)->max_physical_address) == VMI_FAILURE) { errprint("Failed to get memory size.\n"); goto error_exit; } dbprint(VMI_DEBUG_CORE, "**set allocated_ram_size = %"PRIx64", " "max_physical_address = 0x%"PRIx64"\n", (*vmi)->allocated_ram_size, (*vmi)->max_physical_address); // for file mode we need os-specific heuristics to deduce the architecture // for live mode, having arch_interface set even in VMI_PARTIAL mode // allows use of dtb-based translation methods. if (VMI_FILE != (*vmi)->mode) { if(VMI_FAILURE == arch_init(*vmi)) { if (init_mode & VMI_INIT_COMPLETE) { dbprint(VMI_DEBUG_CORE, "--failed to determine architecture of live vm and INIT_COMPLETE.\n"); goto error_exit; } else { dbprint(VMI_DEBUG_CORE, "--failed to determine architecture of live vm and INIT_PARTIAL, continuing.\n"); } } else { dbprint(VMI_DEBUG_CORE, "--succesfully completed architecture init.\n"); } } /* we check VMI_INIT_COMPLETE first as VMI_INIT_PARTIAL is not exclusive */ if (init_mode & VMI_INIT_COMPLETE) { switch((*vmi)->config_mode) { case VMI_CONFIG_STRING: /* read and parse the config string */ if(VMI_FAILURE == read_config_string(*vmi, (char*)config)) { goto error_exit; } break; case VMI_CONFIG_GLOBAL_FILE_ENTRY: /* read and parse the config file */ if(VMI_FAILURE == read_config_file_entry(*vmi)) { goto error_exit; } break; case VMI_CONFIG_GHASHTABLE: /* read and parse the ghashtable */ if (!config) { goto error_exit; } (*vmi)->config = (GHashTable*)config; break; case VMI_CONFIG_NONE: default: /* init_complete requires configuration falling back to VMI_CONFIG_GLOBAL_FILE_ENTRY is unsafe here as the config pointer is probably NULL */ goto error_exit; } if(VMI_FAILURE == set_os_type_from_config(*vmi)) { dbprint(VMI_DEBUG_CORE, "--failed to determine os type from config\n"); goto error_exit; } /* setup OS specific stuff */ switch ( (*vmi)->os_type ) { #ifdef ENABLE_LINUX case VMI_OS_LINUX: if(VMI_FAILURE == linux_init(*vmi)) { goto error_exit; } break; #endif #ifdef ENABLE_WINDOWS case VMI_OS_WINDOWS: if(VMI_FAILURE == windows_init(*vmi)) { goto error_exit; } break; #endif default: goto error_exit; } status = VMI_SUCCESS; } else if (init_mode & VMI_INIT_PARTIAL) { status = VMI_SUCCESS; } else { errprint("Need to specify either VMI_INIT_PARTIAL or VMI_INIT_COMPLETE.\n"); goto error_exit; } if(init_mode & VMI_INIT_EVENTS) { /* Enable event handlers */ events_init(*vmi); } error_exit: return status; }
void main(void) { /* Start the ball rolling. */ struct boot_image *ip; /* boot image pointer */ register struct proc *rp; /* process pointer */ register struct priv *sp; /* privilege structure pointer */ register int i, j; int hdrindex; /* index to array of a.out headers */ phys_clicks text_base; vir_clicks text_clicks, data_clicks, st_clicks; reg_t ktsb; /* kernel task stack base */ struct exec *e_hdr = 0; /* for a copy of an a.out header */ /* Global value to test segment sanity. */ magictest = MAGICTEST; /* Clear the process table. Anounce each slot as empty and set up mappings * for proc_addr() and proc_nr() macros. Do the same for the table with * privilege structures for the system processes. */ for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) { rp->p_rts_flags = RTS_SLOT_FREE; /* initialize free slot */ #ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK rp->p_magic = PMAGIC; #endif rp->p_nr = i; /* proc number from ptr */ rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */ } for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) { sp->s_proc_nr = ENDPT_NONE; /* initialize as free */ sp->s_id = i; /* priv structure index */ ppriv_addr[i] = sp; /* priv ptr from number */ } /* Set up proc table entries for processes in boot image. The stacks of the * kernel tasks are initialized to an array in data space. The stacks * of the servers have been added to the data segment by the monitor, so * the stack pointer is set to the end of the data segment. All the * processes are in low memory on the 8086. On the 386 only the kernel * is in low memory, the rest is loaded in extended memory. */ /* Task stacks. */ ktsb = (reg_t) t_stack; for (i=0; i < NR_BOOT_PROCS; ++i) { int schedulable_proc, proc_nr; int ipc_to_m, kcalls; ip = &image[i]; /* process' attributes */ rp = proc_addr(ip->proc_nr); /* get process pointer */ ip->endpoint = rp->p_endpoint; /* ipc endpoint */ rp->p_max_priority = ip->priority; /* max scheduling priority */ rp->p_priority = ip->priority; /* current priority */ rp->p_quantum_size = ip->quantum; /* quantum size in ticks */ rp->p_ticks_left = ip->quantum; /* current credit */ strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */ /* See if this process is immediately schedulable. * In that case, set its privileges now and allow it to run. * Only kernel tasks and the root system process get to run immediately. * All the other system processes are inhibited from running by the * RTS_NO_PRIV flag. They can only be scheduled once the root system * process has set their privileges. */ proc_nr = proc_nr(rp); schedulable_proc = (iskerneln(proc_nr) || isrootsysn(proc_nr)); if(schedulable_proc) { /* Assign privilege structure. Force a static privilege id. */ (void) get_priv(rp, static_priv_id(proc_nr)); /* Priviliges for kernel tasks. */ if(iskerneln(proc_nr)) { /* Privilege flags. */ priv(rp)->s_flags = (proc_nr == IDLE ? IDL_F : TSK_F); /* Allowed traps. */ priv(rp)->s_trap_mask = (proc_nr == CLOCK || proc_nr == SYSTEM ? CSK_T : TSK_T); ipc_to_m = TSK_M; /* allowed targets */ kcalls = TSK_KC; /* allowed kernel calls */ } else if(isrootsysn(proc_nr)) { /* Priviliges for the root system process. */ priv(rp)->s_flags= RSYS_F; /* privilege flags */ priv(rp)->s_trap_mask= RSYS_T; /* allowed traps */ ipc_to_m = RSYS_M; /* allowed targets */ kcalls = RSYS_KC; /* allowed kernel calls */ } /* Fill in target mask. */ for (j=0; j < NR_SYS_PROCS; j++) { if (ipc_to_m & (1 << j)) set_sendto_bit(rp, j); else unset_sendto_bit(rp, j); } /* Fill in kernel call mask. */ for(j = 0; j < CALL_MASK_SIZE; j++) { priv(rp)->s_k_call_mask[j] = (kcalls == NO_C ? 0 : (~0)); } } else { /*Don't let the process run for now. */ RTS_SET(rp, RTS_NO_PRIV); } if (iskerneln(proc_nr)) { /* part of the kernel? */ if (ip->stksize > 0) { /* HARDWARE stack size is 0 */ rp->p_priv->s_stack_guard = (reg_t *) ktsb; *rp->p_priv->s_stack_guard = STACK_GUARD; } ktsb += ip->stksize; /* point to high end of stack */ rp->p_reg.sp = ktsb; /* this task's initial stack ptr */ hdrindex = 0; /* all use the first a.out header */ } else { hdrindex = 1 + i-NR_TASKS; /* system/user processes */ } /* Architecture-specific way to find out aout header of this * boot process. */ e_hdr = arch_get_aout_header(hdrindex); /* Convert addresses to clicks and build process memory map */ text_base = e_hdr->a_syms >> CLICK_SHIFT; st_clicks= (e_hdr->a_total + CLICK_SIZE-1) >> CLICK_SHIFT; data_clicks = (e_hdr->a_text + e_hdr->a_data + e_hdr->a_bss + CLICK_SIZE-1) >> CLICK_SHIFT; text_clicks = 0; rp->p_memmap[T].mem_phys = text_base; rp->p_memmap[T].mem_len = text_clicks; rp->p_memmap[D].mem_phys = text_base + text_clicks; rp->p_memmap[D].mem_len = data_clicks; rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks; rp->p_memmap[S].mem_vir = st_clicks; rp->p_memmap[S].mem_len = 0; /* Patch (override) the non-kernel process' entry points in image table. The * image table is located in kernel/kernel_syms.c. The kernel processes like * IDLE, SYSTEM, CLOCK, HARDWARE are not changed because they are part of kernel * and the entry points are set at compilation time. In case of IDLE or HARDWARE * the entry point can be ignored becasue they never run (set RTS_PROC_STOP). */ if (!iskerneln(proc_nr(rp))) ip->initial_pc = (task_t*)e_hdr->a_entry; /* Set initial register values. The processor status word for tasks * is different from that of other processes because tasks can * access I/O; this is not allowed to less-privileged processes */ rp->p_reg.pc = (reg_t) ip->initial_pc; rp->p_reg.psw = (iskerneln(proc_nr)) ? INIT_TASK_PSW : INIT_PSW; /* Initialize the server stack pointer. Take it down one word * to give crtso.s something to use as "argc","argv" and "envp". */ if (isusern(proc_nr)) { /* user-space process? */ rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len) << CLICK_SHIFT; rp->p_reg.sp -= 3*sizeof(reg_t); } /* scheduling functions depend on proc_ptr pointing somewhere. */ if(!proc_ptr) proc_ptr = rp; /* If this process has its own page table, VM will set the * PT up and manage it. VM will signal the kernel when it has * done this; until then, don't let it run. */ if(ip->flags & PROC_FULLVM) RTS_SET(rp, RTS_VMINHIBIT); /* IDLE & HARDWARE task is never put on a run queue as it is * never ready to run. */ if (rp->p_nr == HARDWARE) RTS_SET(rp, RTS_PROC_STOP); if (rp->p_nr == IDLE) RTS_SET(rp, RTS_PROC_STOP); RTS_UNSET(rp, RTS_SLOT_FREE); /* remove RTS_SLOT_FREE and schedule */ alloc_segments(rp); } /* for */ /* Architecture-dependent initialization. */ arch_init(); #ifdef CONFIG_DEBUG_KERNEL_STATS_PROFILE sprofiling = 0; /* we're not profiling until instructed to */ #endif cprof_procs_no = 0; /* init nr of hash table slots used */ #ifdef CONFIG_IDLE_TSC idle_tsc = cvu64(0); #endif vm_running = 0; krandom.random_sources = RANDOM_SOURCES; krandom.random_elements = RANDOM_ELEMENTS; /* Nucleos is now ready. All boot image processes are on the ready queue. * Return to the assembly code to start running the current process. */ bill_ptr = proc_addr(IDLE); /* it has to point somewhere */ announce(); /* print Nucleos startup banner */ /* * enable timer interrupts and clock task on the boot CPU */ if (boot_cpu_init_timer(system_hz)) { kernel_panic("FATAL : failed to initialize timer interrupts, " "cannot continue without any clock source!", NO_NUM); } /* Warnings for sanity checks that take time. These warnings are printed * so it's a clear warning no full release should be done with them * enabled. */ #ifdef CONFIG_DEBUG_KERNEL_SCHED_CHECK FIXME("CONFIG_DEBUG_KERNEL_SCHED_CHECK enabled"); #endif #ifdef CONFIG_DEBUG_KERNEL_VMASSERT FIXME("CONFIG_DEBUG_KERNEL_VMASSERT enabled"); #endif #ifdef CONFIG_DEBUG_PROC_CHECK FIXME("PROC check enabled"); #endif restart(); }
/* Tries to determine the page mode based on the kpgd found via heuristics */ static status_t find_page_mode( vmi_instance_t vmi) { status_t ret = VMI_FAILURE; windows_instance_t windows = vmi->os_data; uint32_t mask = ~0; if (!windows) { errprint("Windows functions not initialized in %s\n", __FUNCTION__); return VMI_FAILURE; } if (!windows->ntoskrnl || !windows->ntoskrnl_va) { errprint("Windows kernel virtual and physical address required for determining page mode\n"); return VMI_FAILURE; } if (!vmi->kpgd) { errprint("Windows kernel directory table base not set, can't determine page mode\n"); return VMI_FAILURE; } dbprint(VMI_DEBUG_MISC, "--trying VMI_PM_LEGACY\n"); vmi->page_mode = VMI_PM_LEGACY; /* As the size of vmi->kpgd is 64-bit, we mask it to be 32-bit here */ if (VMI_SUCCESS == arch_init(vmi)) { if (windows->ntoskrnl == vmi_pagetable_lookup(vmi, (vmi->kpgd & mask), windows->ntoskrnl_va)) { vmi->kpgd &= mask; goto found_pm; } } dbprint(VMI_DEBUG_MISC, "--trying VMI_PM_PAE\n"); vmi->page_mode = VMI_PM_PAE; /* As the size of vmi->kpgd is 64-bit, we mask it to be only 32-bit here */ if (VMI_SUCCESS == arch_init(vmi)) { if (windows->ntoskrnl == vmi_pagetable_lookup(vmi, (vmi->kpgd & mask), windows->ntoskrnl_va)) { vmi->kpgd &= mask; goto found_pm; } } dbprint(VMI_DEBUG_MISC, "--trying VMI_PM_IA32E\n"); vmi->page_mode = VMI_PM_IA32E; if (VMI_SUCCESS == arch_init(vmi)) { if (windows->ntoskrnl == vmi_pagetable_lookup(vmi, vmi->kpgd, windows->ntoskrnl_va)) { goto found_pm; } } goto done; found_pm: ret = VMI_SUCCESS; done: return ret; }
int main(int argc, char *argv[]) { int r; struct timespec start, end; struct fio_lfsr *fl; int verify = 0; unsigned int spin = 0; uint64_t seed = 0; uint64_t numbers; uint64_t v_size; uint64_t i; void *v = NULL, *v_start; double total, mean; arch_init(argv); /* Read arguments */ switch (argc) { case 5: if (strncmp(argv[4], "verify", 7) == 0) verify = 1; /* fall through */ case 4: spin = atoi(argv[3]); /* fall through */ case 3: seed = atol(argv[2]); /* fall through */ case 2: numbers = strtol(argv[1], NULL, 16); break; default: usage(); return 1; } /* Initialize LFSR */ fl = malloc(sizeof(struct fio_lfsr)); if (!fl) { perror("malloc"); return 1; } r = lfsr_init(fl, numbers, seed, spin); if (r) { printf("Initialization failed.\n"); return r; } /* Print specs */ printf("LFSR specs\n"); printf("==========================\n"); printf("Size is %u\n", 64 - __builtin_clzl(fl->cached_bit)); printf("Max val is %lu\n", (unsigned long) fl->max_val); printf("XOR-mask is 0x%lX\n", (unsigned long) fl->xormask); printf("Seed is %lu\n", (unsigned long) fl->last_val); printf("Spin is %u\n", fl->spin); printf("Cycle length is %lu\n", (unsigned long) fl->cycle_length); /* Create verification table */ if (verify) { v_size = numbers * sizeof(uint8_t); v = malloc(v_size); memset(v, 0, v_size); printf("\nVerification table is %lf KiB\n", (double)(v_size) / 1024); } v_start = v; /* * Iterate over a tight loop until we have produced all the requested * numbers. Verifying the results should introduce some small yet not * negligible overhead. */ fprintf(stderr, "\nTest initiated... "); fio_gettime(&start, NULL); while (!lfsr_next(fl, &i)) { if (verify) *(uint8_t *)(v + i) += 1; } fio_gettime(&end, NULL); fprintf(stderr, "finished.\n"); /* Check if all expected numbers within range have been calculated */ r = 0; if (verify) { fprintf(stderr, "Verifying results... "); for (i = 0; i < numbers; i++) { if (*(uint8_t *)(v + i) != 1) { fprintf(stderr, "failed (%lu = %d).\n", (unsigned long) i, *(uint8_t *)(v + i)); r = 1; break; } } if (!r) fprintf(stderr, "OK!\n"); } /* Calculate elapsed time and mean time per number */ total = utime_since(&start, &end); mean = total / fl->num_vals; printf("\nTime results "); if (verify) printf("(slower due to verification)"); printf("\n==============================\n"); printf("Elapsed: %lf s\n", total / pow(10,6)); printf("Mean: %lf us\n", mean); free(v_start); free(fl); return r; }
void __noreturn master_init(void) { __attribute__ ((aligned(16))) uint8_t bootstrap_pool[BOOTSTRAP_POOL_SIZE]; jump_handlers_apply(); kputs("KERN: We are in high address.\n"); arch_init(); /* * Page allocator requires arbitrary size allocation to allocate * struct pages, while arbitrary size allocation depends on * page allocator to actually give out memory. * * We break such circular dependency by * (1) bootstrap a small virtual memory allocator which works on the * stack. * (2) initialize a page allocator which works on the bootstrap * allocator obtained in (1). * (3) initialize a real virtual memory allocator which depend * on (2). * (4) make the page allocator depend on (3) instead. * * TODO: move the following piece of code to kern/mm */ simple_allocator_bootstrap(bootstrap_pool, BOOTSTRAP_POOL_SIZE); kputs("KERN: Simple allocator bootstrapping.\n"); page_allocator_init(); kputs("KERN: Page allocator initialized.\n"); add_memory_pages(); kputs("KERN: Pages added.\n"); kprintf("KERN: Free memory: 0x%p\n", (size_t)get_free_memory()); struct simple_allocator old; get_simple_allocator(&old); simple_allocator_init(); kputs("KERN: Simple allocator initialized.\n"); page_allocator_move(&old); kputs("KERN: Page allocator moved.\n"); trap_init(); kputs("KERN: Traps initialized.\n"); /* temporary test */ extern void trap_test(void); trap_test(); kputs("KERN: Traps test passed.\n"); /* do early initcalls, one by one */ do_early_initcalls(); mm_init(); kputs("KERN: Memory management component initialized.\n"); extern void mm_test(void); mm_test(); /* allocate per-cpu context and kworker */ // proc_init(); /* do initcalls, one by one */ do_initcalls(); /* temporary tests */ struct allocator_cache cache = { .size = 1024, .align = 1024, .flags = 0, .create_obj = NULL, .destroy_obj = NULL }; cache_create(&cache); void *a, *b, *c; a = cache_alloc(&cache); kprintf("DEBUG: a = 0x%08x\n", a); b = cache_alloc(&cache); kprintf("DEBUG: b = 0x%08x\n", b); c = cache_alloc(&cache); kprintf("DEBUG: c = 0x%08x\n", c); cache_free(&cache, a); cache_free(&cache, b); cache_free(&cache, c); a = cache_alloc(&cache); kprintf("DEBUG: a = 0x%08x\n", a); cache_free(&cache, a); int ret = cache_destroy(&cache); kprintf("DEBUG: cache_destroy returned %d.\n", ret); cache_create(&cache); a = cache_alloc(&cache); kprintf("DEBUG: a = 0x%08x\n", a); /* startup smp */ /* * do initcalls, one by one. * They may fork or sleep or reschedule. * In case any initcalls issue a fork, there MUST be EXACTLY one return * from each initcall. */ /* initialize or cleanup namespace */ panic("Test done, all is well.\n"); } void __noreturn slave_init(void) { panic("Unimplemented routine called."); }
/*===========================================================================* * cstart * *===========================================================================*/ void cstart() { /* Perform system initializations prior to calling main(). Most settings are * determined with help of the environment strings passed by MINIX' loader. */ register char *value; /* value in key=value pair */ int h; /* low-level initialization */ prot_init(); /* determine verbosity */ if ((value = env_get(VERBOSEBOOTVARNAME))) verboseboot = atoi(value); /* Get clock tick frequency. */ value = env_get("hz"); if(value) system_hz = atoi(value); if(!value || system_hz < 2 || system_hz > 50000) /* sanity check */ system_hz = DEFAULT_HZ; DEBUGEXTRA(("cstart\n")); /* Record miscellaneous information for user-space servers. */ kinfo.nr_procs = NR_PROCS; kinfo.nr_tasks = NR_TASKS; strlcpy(kinfo.release, OS_RELEASE, sizeof(kinfo.release)); strlcpy(kinfo.version, OS_VERSION, sizeof(kinfo.version)); /* Load average data initialization. */ kloadinfo.proc_last_slot = 0; for(h = 0; h < _LOAD_HISTORY; h++) kloadinfo.proc_load_history[h] = 0; #ifdef USE_APIC value = env_get("no_apic"); if(value) config_no_apic = atoi(value); else config_no_apic = 1; value = env_get("apic_timer_x"); if(value) config_apic_timer_x = atoi(value); else config_apic_timer_x = 1; #endif #ifdef USE_WATCHDOG value = env_get("watchdog"); if (value) watchdog_enabled = atoi(value); #endif #ifdef CONFIG_SMP if (config_no_apic) config_no_smp = 1; value = env_get("no_smp"); if(value) config_no_smp = atoi(value); else config_no_smp = 0; #endif DEBUGEXTRA(("intr_init(0)\n")); intr_init(0); arch_init(); }
static status_t linux_filemode_init(vmi_instance_t vmi) { status_t rc; addr_t swapper_pg_dir = 0, init_level4_pgt = 0; addr_t boundary = 0, phys_start = 0, virt_start = 0; switch (vmi->page_mode) { case VMI_PM_AARCH64: case VMI_PM_IA32E: linux_symbol_to_address(vmi, "phys_startup_64", NULL, &phys_start); linux_symbol_to_address(vmi, "startup_64", NULL, &virt_start); break; case VMI_PM_AARCH32: case VMI_PM_LEGACY: case VMI_PM_PAE: linux_symbol_to_address(vmi, "phys_startup_32", NULL, &phys_start); linux_symbol_to_address(vmi, "startup_32", NULL, &virt_start); break; case VMI_PM_UNKNOWN: linux_symbol_to_address(vmi, "phys_startup_64", NULL, &phys_start); linux_symbol_to_address(vmi, "startup_64", NULL, &virt_start); if (phys_start && virt_start) break; phys_start = virt_start = 0; linux_symbol_to_address(vmi, "phys_startup_32", NULL, &phys_start); linux_symbol_to_address(vmi, "startup_32", NULL, &virt_start); break; } virt_start = canonical_addr(virt_start); if(phys_start && virt_start && phys_start < virt_start) { boundary = virt_start - phys_start; dbprint(VMI_DEBUG_MISC, "--got kernel boundary (0x%.16"PRIx64").\n", boundary); } rc = linux_symbol_to_address(vmi, "swapper_pg_dir", NULL, &swapper_pg_dir); if (VMI_SUCCESS == rc) { dbprint(VMI_DEBUG_MISC, "--got vaddr for swapper_pg_dir (0x%.16"PRIx64").\n", swapper_pg_dir); swapper_pg_dir = canonical_addr(swapper_pg_dir); /* We don't know if VMI_PM_LEGACY, VMI_PM_PAE or VMI_PM_AARCH32 yet * so we do some heuristics below. */ if (boundary) { rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, phys_start, virt_start); if (VMI_SUCCESS == rc) return rc; } /* * So we have a swapper but don't know the physical page of it. * We will make some educated guesses now. */ boundary = 0xC0000000; dbprint(VMI_DEBUG_MISC, "--trying boundary 0x%.16"PRIx64".\n", boundary); rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, swapper_pg_dir-boundary, swapper_pg_dir); if (VMI_SUCCESS == rc) { return rc; } boundary = 0x80000000; dbprint(VMI_DEBUG_MISC, "--trying boundary 0x%.16"PRIx64".\n", boundary); rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, swapper_pg_dir-boundary, swapper_pg_dir); if (VMI_SUCCESS == rc) { return rc; } boundary = 0x40000000; dbprint(VMI_DEBUG_MISC, "--trying boundary 0x%.16"PRIx64".\n", boundary); rc = linux_filemode_32bit_init(vmi, swapper_pg_dir, boundary, swapper_pg_dir-boundary, swapper_pg_dir); if (VMI_SUCCESS == rc) { return rc; } return VMI_FAILURE; } rc = linux_symbol_to_address(vmi, "init_level4_pgt", NULL, &init_level4_pgt); if (rc == VMI_SUCCESS) { dbprint(VMI_DEBUG_MISC, "--got vaddr for init_level4_pgt (0x%.16"PRIx64").\n", init_level4_pgt); init_level4_pgt = canonical_addr(init_level4_pgt); if (boundary) { vmi->page_mode = VMI_PM_IA32E; if (VMI_SUCCESS == arch_init(vmi)) { if (phys_start == vmi_pagetable_lookup(vmi, init_level4_pgt - boundary, virt_start)) { vmi->kpgd = init_level4_pgt - boundary; return VMI_SUCCESS; } } } } return VMI_FAILURE; }
/* * INITIAL C ENTRY POINT. */ void start_kernel(start_info_t *si) { static char hello[] = "Bootstrapping...\n"; (void)HYPERVISOR_console_io(CONSOLEIO_write, strlen(hello), hello); setup_xen_features(); pvh_early_init(); arch_init(si); trap_init(); /* print out some useful information */ printk("Xen Minimal OS!\n"); printk(" start_info: %p(VA)\n", si); printk(" nr_pages: 0x%lx\n", si->nr_pages); printk(" shared_inf: 0x%08lx(MA)\n", si->shared_info); printk(" pt_base: %p(VA)\n", (void *)si->pt_base); printk("nr_pt_frames: 0x%lx\n", si->nr_pt_frames); printk(" mfn_list: %p(VA)\n", (void *)si->mfn_list); printk(" mod_start: 0x%lx(VA)\n", si->mod_start); printk(" mod_len: %lu\n", si->mod_len); printk(" flags: 0x%x\n", (unsigned int)si->flags); printk(" cmd_line: %s\n", si->cmd_line ? (const char *)si->cmd_line : "NULL"); /* Set up events. */ init_events(); /* ENABLE EVENT DELIVERY. This is disabled at start of day. */ __sti(); arch_print_info(); /* Init memory management. */ init_mm(); /* Init time and timers. */ init_time(); /* Init the console driver. */ init_console(); /* Init grant tables */ init_gnttab(); /* Init scheduler. */ init_sched(); /* Init XenBus */ init_xenbus(); #ifdef CONFIG_XENBUS /* Init shutdown thread */ init_shutdown(si); #endif /* Call (possibly overridden) app_main() */ app_main(&start_info); /* Everything initialised, start idle thread */ run_idle_thread(); }
void kernel_init(multiboot_info_t *mboot_info) { extern char __start_bss[], __stop_bss[]; memset(__start_bss, 0, __stop_bss - __start_bss); /* mboot_info is a physical address. while some arches currently have the * lower memory mapped, everyone should have it mapped at kernbase by now. * also, it might be in 'free' memory, so once we start dynamically using * memory, we may clobber it. */ multiboot_kaddr = (struct multiboot_info*)((physaddr_t)mboot_info + KERNBASE); extract_multiboot_cmdline(multiboot_kaddr); cons_init(); print_cpuinfo(); printk("Boot Command Line: '%s'\n", boot_cmdline); exception_table_init(); cache_init(); // Determine systems's cache properties pmem_init(multiboot_kaddr); kmem_cache_init(); // Sets up slab allocator kmalloc_init(); hashtable_init(); radix_init(); cache_color_alloc_init(); // Inits data structs colored_page_alloc_init(); // Allocates colors for agnostic processes acpiinit(); topology_init(); kthread_init(); /* might need to tweak when this happens */ vmr_init(); file_init(); page_check(); idt_init(); kernel_msg_init(); timer_init(); vfs_init(); devfs_init(); train_timing(); kb_buf_init(&cons_buf); arch_init(); block_init(); enable_irq(); run_linker_funcs(); /* reset/init devtab after linker funcs 3 and 4. these run NIC and medium * pre-inits, which need to happen before devether. */ devtabreset(); devtabinit(); #ifdef CONFIG_EXT2FS mount_fs(&ext2_fs_type, "/dev/ramdisk", "/mnt", 0); #endif /* CONFIG_EXT2FS */ #ifdef CONFIG_ETH_AUDIO eth_audio_init(); #endif /* CONFIG_ETH_AUDIO */ get_coreboot_info(&sysinfo); booting = 0; #ifdef CONFIG_RUN_INIT_SCRIPT if (run_init_script()) { printk("Configured to run init script, but no script specified!\n"); manager(); } #else manager(); #endif }
int main(int argc, char *argv[], char *envp[]) { #ifdef LISP_FEATURE_WIN32 /* Exception handling support structure. Evil Win32 hack. */ struct lisp_exception_frame exception_frame; #endif /* the name of the core file we're to execute. Note that this is * a malloc'ed string which should be freed eventually. */ char *core = 0; char **sbcl_argv = 0; os_vm_offset_t embedded_core_offset = 0; char *runtime_path = 0; /* other command line options */ boolean noinform = 0; boolean end_runtime_options = 0; boolean disable_lossage_handler_p = 0; lispobj initial_function; const char *sbcl_home = getenv("SBCL_HOME"); interrupt_init(); block_blockable_signals(0, 0); setlocale(LC_ALL, ""); runtime_options = NULL; /* Check early to see if this executable has an embedded core, * which also populates runtime_options if the core has runtime * options */ runtime_path = os_get_runtime_executable_path(); if (runtime_path) { os_vm_offset_t offset = search_for_embedded_core(runtime_path); if (offset != -1) { embedded_core_offset = offset; core = runtime_path; } else { free(runtime_path); } } /* Parse our part of the command line (aka "runtime options"), * stripping out those options that we handle. */ if (runtime_options != NULL) { dynamic_space_size = runtime_options->dynamic_space_size; thread_control_stack_size = runtime_options->thread_control_stack_size; sbcl_argv = argv; } else { int argi = 1; runtime_options = successful_malloc(sizeof(struct runtime_options)); while (argi < argc) { char *arg = argv[argi]; if (0 == strcmp(arg, "--script")) { /* This is both a runtime and a toplevel option. As a * runtime option, it is equivalent to --noinform. * This exits, and does not increment argi, so that * TOPLEVEL-INIT sees the option. */ noinform = 1; end_runtime_options = 1; disable_lossage_handler_p = 1; lose_on_corruption_p = 1; break; } else if (0 == strcmp(arg, "--noinform")) { noinform = 1; ++argi; } else if (0 == strcmp(arg, "--core")) { if (core) { lose("more than one core file specified\n"); } else { ++argi; if (argi >= argc) { lose("missing filename for --core argument\n"); } core = copied_string(argv[argi]); ++argi; } } else if (0 == strcmp(arg, "--help")) { /* I think this is the (or a) usual convention: upon * seeing "--help" we immediately print our help * string and exit, ignoring everything else. */ print_help(); exit(0); } else if (0 == strcmp(arg, "--version")) { /* As in "--help" case, I think this is expected. */ print_version(); exit(0); } else if (0 == strcmp(arg, "--dynamic-space-size")) { ++argi; if (argi >= argc) lose("missing argument for --dynamic-space-size"); errno = 0; dynamic_space_size = strtol(argv[argi++], 0, 0) << 20; if (errno) lose("argument to --dynamic-space-size is not a number"); # ifdef MAX_DYNAMIC_SPACE_END if (!((DYNAMIC_SPACE_START < DYNAMIC_SPACE_START+dynamic_space_size) && (DYNAMIC_SPACE_START+dynamic_space_size <= MAX_DYNAMIC_SPACE_END))) lose("specified --dynamic-space-size too large"); # endif } else if (0 == strcmp(arg, "--control-stack-size")) { ++argi; if (argi >= argc) lose("missing argument for --control-stack-size"); errno = 0; thread_control_stack_size = strtol(argv[argi++], 0, 0) << 20; if (errno) lose("argument to --control-stack-size is not a number"); } else if (0 == strcmp(arg, "--debug-environment")) { int n = 0; printf("; Commandline arguments:\n"); while (n < argc) { printf("; %2d: \"%s\"\n", n, argv[n]); ++n; } n = 0; printf(";\n; Environment:\n"); while (ENVIRON[n]) { printf("; %2d: \"%s\"\n", n, ENVIRON[n]); ++n; } ++argi; } else if (0 == strcmp(arg, "--disable-ldb")) { disable_lossage_handler_p = 1; ++argi; } else if (0 == strcmp(arg, "--lose-on-corruption")) { lose_on_corruption_p = 1; ++argi; } else if (0 == strcmp(arg, "--end-runtime-options")) { end_runtime_options = 1; ++argi; break; } else { /* This option was unrecognized as a runtime option, * so it must be a toplevel option or a user option, * so we must be past the end of the runtime option * section. */ break; } } /* This is where we strip out those options that we handle. We * also take this opportunity to make sure that we don't find * an out-of-place "--end-runtime-options" option. */ { char *argi0 = argv[argi]; int argj = 1; /* (argc - argi) for the arguments, one for the binary, and one for the terminating NULL. */ sbcl_argv = successful_malloc((2 + argc - argi) * sizeof(char *)); sbcl_argv[0] = argv[0]; while (argi < argc) { char *arg = argv[argi++]; /* If we encounter --end-runtime-options for the first * time after the point where we had to give up on * runtime options, then the point where we had to * give up on runtime options must've been a user * error. */ if (!end_runtime_options && 0 == strcmp(arg, "--end-runtime-options")) { lose("bad runtime option \"%s\"\n", argi0); } sbcl_argv[argj++] = arg; } sbcl_argv[argj] = 0; } } /* Align down to multiple of page_table page size, and to the appropriate * stack alignment. */ dynamic_space_size &= ~(PAGE_BYTES-1); thread_control_stack_size &= ~(CONTROL_STACK_ALIGNMENT_BYTES-1); /* Preserve the runtime options for possible future core saving */ runtime_options->dynamic_space_size = dynamic_space_size; runtime_options->thread_control_stack_size = thread_control_stack_size; /* KLUDGE: os_vm_page_size is set by os_init(), and on some * systems (e.g. Alpha) arch_init() needs need os_vm_page_size, so * it must follow os_init(). -- WHN 2000-01-26 */ os_init(argv, envp); arch_init(); gc_init(); validate(); /* If no core file was specified, look for one. */ if (!core) { core = search_for_core(); } /* Make sure that SBCL_HOME is set and not the empty string, unless loading an embedded core. */ if (!(sbcl_home && *sbcl_home) && embedded_core_offset == 0) { char *envstring, *copied_core, *dir; char *stem = "SBCL_HOME="; copied_core = copied_string(core); dir = dirname(copied_core); envstring = (char *) calloc(strlen(stem) + strlen(dir) + 1, sizeof(char)); sprintf(envstring, "%s%s", stem, dir); putenv(envstring); free(copied_core); } if (!noinform && embedded_core_offset == 0) { print_banner(); fflush(stdout); } #if defined(SVR4) || defined(__linux__) tzset(); #endif define_var("nil", NIL, 1); define_var("t", T, 1); if (!disable_lossage_handler_p) enable_lossage_handler(); globals_init(); initial_function = load_core_file(core, embedded_core_offset); if (initial_function == NIL) { lose("couldn't find initial function\n"); } #ifdef LISP_FEATURE_HPUX /* -1 = CLOSURE_FUN_OFFSET, 23 = SIMPLE_FUN_CODE_OFFSET, we are * not in LANGUAGE_ASSEMBLY so we cant reach them. */ return_from_lisp_stub = (void *) ((char *)*((unsigned long *) ((char *)initial_function + -1)) + 23); #endif gc_initialize_pointers(); arch_install_interrupt_handlers(); #ifndef LISP_FEATURE_WIN32 os_install_interrupt_handlers(); #else /* wos_install_interrupt_handlers(handler); */ wos_install_interrupt_handlers(&exception_frame); #endif /* Pass core filename and the processed argv into Lisp. They'll * need to be processed further there, to do locale conversion. */ core_string = core; posix_argv = sbcl_argv; FSHOW((stderr, "/funcalling initial_function=0x%lx\n", (unsigned long)initial_function)); #ifdef LISP_FEATURE_WIN32 fprintf(stderr, "\n\ This is experimental prerelease support for the Windows platform: use\n\ at your own risk. \"Your Kitten of Death awaits!\"\n"); fflush(stdout); fflush(stderr); #endif create_initial_thread(initial_function); lose("CATS. CATS ARE NICE.\n"); return 0; }
/** * Local main function which calls the application Main * app_main will be the entry point to application and * must return. **/ static void local_main(void * arg) { arch_init(); app_main(arg); }
/*===========================================================================* * main * *===========================================================================*/ PUBLIC void main() { /* Start the ball rolling. */ struct boot_image *ip; /* boot image pointer */ register struct proc *rp; /* process pointer */ register struct priv *sp; /* privilege structure pointer */ register int i, j, s; int hdrindex; /* index to array of a.out headers */ phys_clicks text_base; vir_clicks text_clicks, data_clicks, st_clicks; reg_t ktsb; /* kernel task stack base */ struct exec e_hdr; /* for a copy of an a.out header */ /* Architecture-dependent initialization. */ arch_init(); /* Global value to test segment sanity. */ magictest = MAGICTEST; /* Clear the process table. Anounce each slot as empty and set up mappings * for proc_addr() and proc_nr() macros. Do the same for the table with * privilege structures for the system processes. */ for (rp = BEG_PROC_ADDR, i = -NR_TASKS; rp < END_PROC_ADDR; ++rp, ++i) { rp->p_rts_flags = SLOT_FREE; /* initialize free slot */ #if DEBUG_SCHED_CHECK rp->p_magic = PMAGIC; #endif rp->p_nr = i; /* proc number from ptr */ rp->p_endpoint = _ENDPOINT(0, rp->p_nr); /* generation no. 0 */ } for (sp = BEG_PRIV_ADDR, i = 0; sp < END_PRIV_ADDR; ++sp, ++i) { sp->s_proc_nr = NONE; /* initialize as free */ sp->s_id = i; /* priv structure index */ ppriv_addr[i] = sp; /* priv ptr from number */ } /* Set up proc table entries for processes in boot image. The stacks of the * kernel tasks are initialized to an array in data space. The stacks * of the servers have been added to the data segment by the monitor, so * the stack pointer is set to the end of the data segment. All the * processes are in low memory on the 8086. On the 386 only the kernel * is in low memory, the rest is loaded in extended memory. */ /* Task stacks. */ ktsb = (reg_t) t_stack; for (i=0; i < NR_BOOT_PROCS; ++i) { int ci; bitchunk_t fv; ip = &image[i]; /* process' attributes */ rp = proc_addr(ip->proc_nr); /* get process pointer */ ip->endpoint = rp->p_endpoint; /* ipc endpoint */ rp->p_max_priority = ip->priority; /* max scheduling priority */ rp->p_priority = ip->priority; /* current priority */ rp->p_quantum_size = ip->quantum; /* quantum size in ticks */ rp->p_ticks_left = ip->quantum; /* current credit */ strncpy(rp->p_name, ip->proc_name, P_NAME_LEN); /* set process name */ (void) get_priv(rp, (ip->flags & SYS_PROC)); /* assign structure */ priv(rp)->s_flags = ip->flags; /* process flags */ priv(rp)->s_trap_mask = ip->trap_mask; /* allowed traps */ /* Warn about violations of the boot image table order consistency. */ if (priv_id(rp) != s_nr_to_id(ip->proc_nr) && (ip->flags & SYS_PROC)) kprintf("Warning: boot image table has wrong process order\n"); /* Initialize call mask bitmap from unordered set. * A single SYS_ALL_CALLS is a special case - it * means all calls are allowed. */ if(ip->nr_k_calls == 1 && ip->k_calls[0] == SYS_ALL_CALLS) fv = ~0; /* fill call mask */ else fv = 0; /* clear call mask */ for(ci = 0; ci < CALL_MASK_SIZE; ci++) /* fill or clear call mask */ priv(rp)->s_k_call_mask[ci] = fv; if(!fv) /* not all full? enter calls bit by bit */ for(ci = 0; ci < ip->nr_k_calls; ci++) SET_BIT(priv(rp)->s_k_call_mask, ip->k_calls[ci]-KERNEL_CALL); for (j = 0; j < NR_SYS_PROCS && j < BITCHUNK_BITS; j++) if (ip->ipc_to & (1 << j)) set_sendto_bit(rp, j); /* restrict targets */ if (iskerneln(proc_nr(rp))) { /* part of the kernel? */ if (ip->stksize > 0) { /* HARDWARE stack size is 0 */ rp->p_priv->s_stack_guard = (reg_t *) ktsb; *rp->p_priv->s_stack_guard = STACK_GUARD; } ktsb += ip->stksize; /* point to high end of stack */ rp->p_reg.sp = ktsb; /* this task's initial stack ptr */ hdrindex = 0; /* all use the first a.out header */ } else { hdrindex = 1 + i-NR_TASKS; /* servers, drivers, INIT */ } /* Architecture-specific way to find out aout header of this * boot process. */ arch_get_aout_headers(hdrindex, &e_hdr); /* Convert addresses to clicks and build process memory map */ text_base = e_hdr.a_syms >> CLICK_SHIFT; text_clicks = (e_hdr.a_text + CLICK_SIZE-1) >> CLICK_SHIFT; data_clicks = (e_hdr.a_data+e_hdr.a_bss + CLICK_SIZE-1) >> CLICK_SHIFT; st_clicks= (e_hdr.a_total + CLICK_SIZE-1) >> CLICK_SHIFT; if (!(e_hdr.a_flags & A_SEP)) { data_clicks= (e_hdr.a_text+e_hdr.a_data+e_hdr.a_bss + CLICK_SIZE-1) >> CLICK_SHIFT; text_clicks = 0; /* common I&D */ } rp->p_memmap[T].mem_phys = text_base; rp->p_memmap[T].mem_len = text_clicks; rp->p_memmap[D].mem_phys = text_base + text_clicks; rp->p_memmap[D].mem_len = data_clicks; rp->p_memmap[S].mem_phys = text_base + text_clicks + st_clicks; rp->p_memmap[S].mem_vir = st_clicks; rp->p_memmap[S].mem_len = 0; /* Set initial register values. The processor status word for tasks * is different from that of other processes because tasks can * access I/O; this is not allowed to less-privileged processes */ rp->p_reg.pc = (reg_t) ip->initial_pc; rp->p_reg.psw = (iskernelp(rp)) ? INIT_TASK_PSW : INIT_PSW; /* Initialize the server stack pointer. Take it down one word * to give crtso.s something to use as "argc". */ if (isusern(proc_nr(rp))) { /* user-space process? */ rp->p_reg.sp = (rp->p_memmap[S].mem_vir + rp->p_memmap[S].mem_len) << CLICK_SHIFT; rp->p_reg.sp -= sizeof(reg_t); } /* scheduling functions depend on proc_ptr pointing somewhere. */ if(!proc_ptr) proc_ptr = rp; /* If this process has its own page table, VM will set the * PT up and manage it. VM will signal the kernel when it has * done this; until then, don't let it run. */ if(priv(rp)->s_flags & PROC_FULLVM) RTS_SET(rp, VMINHIBIT); /* Set ready. The HARDWARE task is never ready. */ if (rp->p_nr == HARDWARE) RTS_SET(rp, PROC_STOP); RTS_UNSET(rp, SLOT_FREE); /* remove SLOT_FREE and schedule */ alloc_segments(rp); }