void arch_init(void) { k_stacks = (void*) &k_stacks_start; assert(!((vir_bytes) k_stacks % K_STACK_SIZE)); #ifndef CONFIG_SMP /* * use stack 0 and cpu id 0 on a single processor machine, SMP * configuration does this in smp_init() for all cpus at once */ tss_init(0, get_k_stack_top(0)); #endif #if !CONFIG_OXPCIE ser_init(); #endif #ifdef USE_ACPI acpi_init(); #endif #if defined(USE_APIC) && !defined(CONFIG_SMP) if (config_no_apic) { BOOT_VERBOSE(printf("APIC disabled, using legacy PIC\n")); } else if (!apic_single_cpu_init()) { BOOT_VERBOSE(printf("APIC not present, using legacy PIC\n")); } #endif /* Reserve some BIOS ranges */ cut_memmap(&kinfo, BIOS_MEM_BEGIN, BIOS_MEM_END); cut_memmap(&kinfo, BASE_MEM_TOP, UPPER_MEM_END); }
void arch_boot_proc(struct boot_image *ip, struct proc *rp) { multiboot_module_t *mod; if(rp->p_nr < 0) return; mod = bootmod(rp->p_nr); /* Important special case: we put VM in the bootstrap pagetable * so it can run. */ if(rp->p_nr == VM_PROC_NR) { struct exec_info execi; memset(&execi, 0, sizeof(execi)); /* exec parameters */ execi.stack_high = kinfo.user_sp; execi.stack_size = 32 * 1024; /* not too crazy as it must be preallocated */ execi.proc_e = ip->endpoint; execi.hdr = (char *) mod->mod_start; /* phys mem direct */ execi.hdr_len = mod->mod_end - mod->mod_start; strcpy(execi.progname, ip->proc_name); execi.frame_len = 0; /* callbacks for use in the kernel */ execi.copymem = libexec_copy_memcpy; execi.clearmem = libexec_clear_memset; execi.allocmem_prealloc = libexec_pg_alloc; execi.allocmem_ondemand = libexec_pg_alloc; execi.clearproc = NULL; /* parse VM ELF binary and alloc/map it into bootstrap pagetable */ libexec_load_elf(&execi); /* Initialize the server stack pointer. Take it down three words * to give startup code something to use as "argc", "argv" and "envp". */ arch_proc_init(rp, execi.pc, kinfo.user_sp - 3*4, ip->proc_name); /* Free VM blob that was just copied into existence. */ cut_memmap(&kinfo, mod->mod_start, mod->mod_end); /* Remember them */ kinfo.vm_allocated_bytes = alloc_for_vm; } }
phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len) { /* Allocate the lowest physical page we have. */ int m; #define EMPTY 0xffffffff phys_bytes lowest = EMPTY; assert(len > 0); len = roundup(len, ARM_PAGE_SIZE); assert(kernel_may_alloc); for(m = 0; m < cbi->mmap_size; m++) { if(cbi->memmap[m].len < len) continue; if(cbi->memmap[m].addr < lowest) lowest = cbi->memmap[m].addr; } assert(lowest != EMPTY); cut_memmap(cbi, lowest, len); return lowest; }
void get_parameters(u32_t ebx, kinfo_t *cbi) { multiboot_memory_map_t *mmap; multiboot_info_t *mbi = &cbi->mbi; int var_i,value_i, m, k; char *p; extern char _kern_phys_base, _kern_vir_base, _kern_size, _kern_unpaged_start, _kern_unpaged_end; phys_bytes kernbase = (phys_bytes) &_kern_phys_base, kernsize = (phys_bytes) &_kern_size; #define BUF 1024 static char cmdline[BUF]; /* get our own copy of the multiboot info struct and module list */ //memcpy((void *) mbi, (void *) ebx, sizeof(*mbi)); setup_mbi(mbi); /* Set various bits of info for the higher-level kernel. */ cbi->mem_high_phys = 0; cbi->user_sp = (vir_bytes) &_kern_vir_base; cbi->vir_kern_start = (vir_bytes) &_kern_vir_base; cbi->bootstrap_start = (vir_bytes) &_kern_unpaged_start; cbi->bootstrap_len = (vir_bytes) &_kern_unpaged_end - cbi->bootstrap_start; cbi->kmess = &kmess; /* set some configurable defaults */ cbi->do_serial_debug = 1; cbi->serial_debug_baud = 115200; /* parse boot command line */ if (mbi->flags&MULTIBOOT_INFO_CMDLINE) { static char var[BUF]; static char value[BUF]; /* Override values with cmdline argument */ memcpy(cmdline, (void *) mbi->cmdline, BUF); p = cmdline; while (*p) { var_i = 0; value_i = 0; while (*p == ' ') p++; if (!*p) break; while (*p && *p != '=' && *p != ' ' && var_i < BUF - 1) var[var_i++] = *p++ ; var[var_i] = 0; if (*p++ != '=') continue; /* skip if not name=value */ while (*p && *p != ' ' && value_i < BUF - 1) value[value_i++] = *p++ ; value[value_i] = 0; mb_set_param(cbi->param_buf, var, value, cbi); } } /* let higher levels know what we are booting on */ mb_set_param(cbi->param_buf, ARCHVARNAME, "earm", cbi); /* round user stack down to leave a gap to catch kernel * stack overflow; and to distinguish kernel and user addresses * at a glance (0xf.. vs 0xe..) */ cbi->user_sp &= 0xF0000000; cbi->user_end = cbi->user_sp; /* kernel bytes without bootstrap code/data that is currently * still needed but will be freed after bootstrapping. */ kinfo.kernel_allocated_bytes = (phys_bytes) &_kern_size; kinfo.kernel_allocated_bytes -= cbi->bootstrap_len; assert(!(cbi->bootstrap_start % ARM_PAGE_SIZE)); cbi->bootstrap_len = rounddown(cbi->bootstrap_len, ARM_PAGE_SIZE); assert(mbi->flags & MULTIBOOT_INFO_MODS); assert(mbi->mods_count < MULTIBOOT_MAX_MODS); assert(mbi->mods_count > 0); memcpy(&cbi->module_list, (void *) mbi->mods_addr, mbi->mods_count * sizeof(multiboot_module_t)); memset(cbi->memmap, 0, sizeof(cbi->memmap)); /* mem_map has a variable layout */ if(mbi->flags & MULTIBOOT_INFO_MEM_MAP) { cbi->mmap_size = 0; for (mmap = (multiboot_memory_map_t *) mbi->mmap_addr; (unsigned long) mmap < mbi->mmap_addr + mbi->mmap_length; mmap = (multiboot_memory_map_t *) ((unsigned long) mmap + mmap->size + sizeof(mmap->size))) { if(mmap->type != MULTIBOOT_MEMORY_AVAILABLE) continue; add_memmap(cbi, mmap->addr, mmap->len); } } else { assert(mbi->flags & MULTIBOOT_INFO_MEMORY); add_memmap(cbi, 0, mbi->mem_lower_unused*1024); add_memmap(cbi, 0x100000, mbi->mem_upper_unused*1024); } /* Sanity check: the kernel nor any of the modules may overlap * with each other. Pretend the kernel is an extra module for a * second. */ k = mbi->mods_count; assert(k < MULTIBOOT_MAX_MODS); cbi->module_list[k].mod_start = kernbase; cbi->module_list[k].mod_end = kernbase + kernsize; cbi->mods_with_kernel = mbi->mods_count+1; cbi->kern_mod = k; for(m = 0; m < cbi->mods_with_kernel; m++) { #if 0 printf("checking overlap of module %08lx-%08lx\n", cbi->module_list[m].mod_start, cbi->module_list[m].mod_end); #endif if(overlaps(cbi->module_list, cbi->mods_with_kernel, m)) panic("overlapping boot modules/kernel"); /* We cut out the bits of memory that we know are * occupied by the kernel and boot modules. */ cut_memmap(cbi, cbi->module_list[m].mod_start, cbi->module_list[m].mod_end); } }