/* * Initialize the kernel debugger by initializing the master symbol * table. Note that if initializing the master symbol table fails, * no other symbol tables can be loaded. */ void ddb_init(int symsize, void *vss, void *vse) { #ifdef _KERNEL ksyms_addsyms_elf(symsize, vss, vse); /* Will complain if necessary */ #else /* _KERNEL */ db_symformat = &db_symformat_elf; if ((*db_symformat->sym_init)(symsize, vss, vse, TBLNAME) != true) printf("sym_init failed"); #endif /* _KERNEL */ }
/* * Console initialization: called early on from main, */ void consinit(void) { if (sysconsole == 0) syscnattach(0); else { omfb_cnattach(); ws_cnattach(); } #if NKSYMS || defined(DDB) || defined(MODULAR) ksyms_addsyms_elf((esym != NULL) ? 1 : 0, (void *)&end, esym); #endif #ifdef DDB if (boothowto & RB_KDB) cpu_Debugger(); #endif }
/* * Console initialization: called early on from main, * before vm init or startup. Do enough configuration * to choose and initialize a console. */ void consinit(void) { /* * Initialize the console before we print anything out. */ cninit(); #if NKSYMS || defined(DDB) || defined(MODULAR) { extern char end[]; extern int *esym; ksyms_addsyms_elf((int)esym - (int)&end - sizeof(Elf32_Ehdr), (void *)&end, esym); } #endif #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif }
/* * locore.s code calls bootstrap() just before calling main(). * * What we try to do is as follows: * - Initialize PROM and the console * - Read in part of information provided by a bootloader and find out * kernel load and end addresses * - Initialize ksyms * - Find out number of active CPUs * - Finalize the bootstrap by calling pmap_bootstrap() * * We will try to run out of the prom until we get out of pmap_bootstrap(). */ void bootstrap(void *o0, void *bootargs, void *bootsize, void *o3, void *ofw) { void *bi; long bmagic; char buf[32]; #if NKSYMS || defined(DDB) || defined(MODULAR) struct btinfo_symtab *bi_sym; #endif struct btinfo_count *bi_count; struct btinfo_kernend *bi_kend; struct btinfo_tlb *bi_tlb; struct btinfo_boothowto *bi_howto; extern void *romtba; extern void* get_romtba(void); extern void OF_val2sym32(void *); extern void OF_sym2val32(void *); extern struct consdev consdev_prom; /* Save OpenFrimware entry point */ romp = ofw; romtba = get_romtba(); prom_init(); console_instance = promops.po_stdout; console_node = OF_instance_to_package(promops.po_stdout); /* Initialize the PROM console so printf will not panic */ cn_tab = &consdev_prom; (*cn_tab->cn_init)(cn_tab); DPRINTF(ACDB_BOOTARGS, ("sparc64_init(%p, %p, %p, %p, %p)\n", o0, bootargs, bootsize, o3, ofw)); /* Extract bootinfo pointer */ if ((long)bootsize >= (4 * sizeof(uint64_t))) { /* Loaded by 64-bit bootloader */ bi = (void*)(u_long)(((uint64_t*)bootargs)[3]); bmagic = (long)(((uint64_t*)bootargs)[0]); } else if ((long)bootsize >= (4 * sizeof(uint32_t))) { /* Loaded by 32-bit bootloader */ bi = (void*)(u_long)(((uint32_t*)bootargs)[3]); bmagic = (long)(((uint32_t*)bootargs)[0]); } else { printf("Bad bootinfo size.\n"); die_old_boot_loader: printf("This kernel requires NetBSD boot loader version 1.9 " "or newer\n"); panic("sparc64_init."); } DPRINTF(ACDB_BOOTARGS, ("sparc64_init: bmagic=%lx, bi=%p\n", bmagic, bi)); /* Read in the information provided by NetBSD boot loader */ if (SPARC_MACHINE_OPENFIRMWARE != bmagic) { printf("No bootinfo information.\n"); goto die_old_boot_loader; } bootinfo = (void*)(u_long)((uint64_t*)bi)[1]; LOOKUP_BOOTINFO(bi_kend, BTINFO_KERNEND); if (bi_kend->addr == (vaddr_t)0) { panic("Kernel end address is not found in bootinfo.\n"); } #if NKSYMS || defined(DDB) || defined(MODULAR) LOOKUP_BOOTINFO(bi_sym, BTINFO_SYMTAB); ksyms_addsyms_elf(bi_sym->nsym, (int *)(u_long)bi_sym->ssym, (int *)(u_long)bi_sym->esym); #ifdef DDB #ifdef __arch64__ /* This can only be installed on an 64-bit system cause otherwise our stack is screwed */ OF_set_symbol_lookup(OF_sym2val, OF_val2sym); #else OF_set_symbol_lookup(OF_sym2val32, OF_val2sym32); #endif #endif #endif if (OF_getprop(findroot(), "compatible", buf, sizeof(buf)) > 0) { if (strcmp(buf, "sun4us") == 0) setcputyp(CPU_SUN4US); else if (strcmp(buf, "sun4v") == 0) setcputyp(CPU_SUN4V); } bi_howto = lookup_bootinfo(BTINFO_BOOTHOWTO); if (bi_howto) boothowto = bi_howto->boothowto; LOOKUP_BOOTINFO(bi_count, BTINFO_DTLB_SLOTS); kernel_dtlb_slots = bi_count->count; kernel_itlb_slots = kernel_dtlb_slots-1; bi_count = lookup_bootinfo(BTINFO_ITLB_SLOTS); if (bi_count) kernel_itlb_slots = bi_count->count; LOOKUP_BOOTINFO(bi_tlb, BTINFO_DTLB); kernel_tlbs = &bi_tlb->tlb[0]; get_ncpus(); pmap_bootstrap(KERNBASE, bi_kend->addr); }
/* * Do all the stuff that locore normally does before calling main(). */ void mach_init(int32_t memsize32, u_int bim, int32_t bip32) { intptr_t memsize = (int32_t)memsize32; char *kernend; char *bip = (char *)(intptr_t)(int32_t)bip32; u_long first, last; extern char edata[], end[]; const char *bi_msg; #if NKSYMS || defined(DDB) || defined(MODULAR) char *ssym = 0; struct btinfo_symtab *bi_syms; #endif struct btinfo_howto *bi_howto; /* * Clear the BSS segment (if needed). */ if (memcmp(((Elf_Ehdr *)end)->e_ident, ELFMAG, SELFMAG) == 0 && ((Elf_Ehdr *)end)->e_ident[EI_CLASS] == ELFCLASS) { esym = end; #if NKSYMS || defined(DDB) || defined(MODULAR) esym += ((Elf_Ehdr *)end)->e_entry; #endif kernend = (char *)mips_round_page(esym); /* * We don't have to clear BSS here * since our bootloader already does it. */ #if 0 memset(edata, 0, end - edata); #endif } else { kernend = (void *)mips_round_page(end); /* * No symbol table, so assume we are loaded by * the firmware directly with "bfd" command. * The firmware loader doesn't clear BSS of * a loaded kernel, so do it here. */ memset(edata, 0, kernend - edata); } /* * Copy exception-dispatch code down to exception vector. * Initialize locore-function vector. * Clear out the I and D caches. */ mips_vector_init(NULL, false); /* Check for valid bootinfo passed from bootstrap */ if (bim == BOOTINFO_MAGIC) { struct btinfo_magic *bi_magic; bootinfo = bip; bi_magic = lookup_bootinfo(BTINFO_MAGIC); if (bi_magic == NULL) { bi_msg = "missing bootinfo structure"; bim = (uintptr_t)bip; } else if (bi_magic->magic != BOOTINFO_MAGIC) { bi_msg = "invalid bootinfo structure"; bim = bi_magic->magic; } else bi_msg = NULL; } else { bi_msg = "invalid bootinfo (standalone boot?)"; } #if NKSYMS || defined(DDB) || defined(MODULAR) bi_syms = lookup_bootinfo(BTINFO_SYMTAB); /* Load symbol table if present */ if (bi_syms != NULL) { ssym = (void *)(intptr_t)bi_syms->ssym; esym = (void *)(intptr_t)bi_syms->esym; kernend = (void *)mips_round_page(esym); } #endif bi_howto = lookup_bootinfo(BTINFO_HOWTO); if (bi_howto != NULL) boothowto = bi_howto->bi_howto; cobalt_id = read_board_id(); if (cobalt_id >= COBALT_MODELS || cobalt_model[cobalt_id] == NULL) cpu_setmodel("Cobalt unknown model (board ID %u)", cobalt_id); else cpu_setmodel("%s", cobalt_model[cobalt_id]); switch (cobalt_id) { case COBALT_ID_QUBE2700: case COBALT_ID_RAQ: cpuspeed = 150; /* MHz */ break; case COBALT_ID_QUBE2: case COBALT_ID_RAQ2: cpuspeed = 250; /* MHz */ break; default: /* assume the fastest, so that delay(9) works */ cpuspeed = 250; break; } curcpu()->ci_cpu_freq = cpuspeed * 1000 * 1000; curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz; curcpu()->ci_divisor_delay = ((curcpu()->ci_cpu_freq + (1000000 / 2)) / 1000000); /* all models have Rm5200, which is CPU_MIPS_DOUBLE_COUNT */ curcpu()->ci_cycles_per_hz /= 2; curcpu()->ci_divisor_delay /= 2; physmem = btoc(memsize - MIPS_KSEG0_START); consinit(); KASSERT(&lwp0 == curlwp); if (bi_msg != NULL) printf("%s: magic=%#x bip=%p\n", bi_msg, bim, bip); uvm_setpagesize(); /* * The boot command is passed in the top 512 bytes, * so don't clobber that. */ mem_clusters[0].start = 0; mem_clusters[0].size = ctob(physmem) - 512; mem_cluster_cnt = 1; memcpy(bootstring, (char *)(memsize - 512), 512); memset((char *)(memsize - 512), 0, 512); bootstring[511] = '\0'; decode_bootstring(); #if NKSYMS || defined(DDB) || defined(MODULAR) /* init symbols if present */ if ((bi_syms != NULL) && (esym != NULL)) ksyms_addsyms_elf(esym - ssym, ssym, esym); #endif KASSERT(&lwp0 == curlwp); #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif #ifdef KGDB if (boothowto & RB_KDB) kgdb_connect(0); #endif /* * Load the rest of the available pages into the VM system. */ first = round_page(MIPS_KSEG0_TO_PHYS(kernend)); last = mem_clusters[0].start + mem_clusters[0].size; uvm_page_physload(atop(first), atop(last), atop(first), atop(last), VM_FREELIST_DEFAULT); /* * Initialize error message buffer (at end of core). */ mips_init_msgbuf(); pmap_bootstrap(); /* * Allocate space for proc0's USPACE. */ mips_init_lwp0_uarea(); }
/* * Do all the stuff that locore normally does before calling main(). * Process arguments passed to us by the prom monitor. * Return the first page address following the system. */ void mach_init(int x_boothowto, int x_bootdev, int x_bootname, int x_maxmem) { u_long first, last; char *kernend; struct btinfo_magic *bi_magic; struct btinfo_bootarg *bi_arg; struct btinfo_systype *bi_systype; #if NKSYMS || defined(DDB) || defined(MODULAR) struct btinfo_symtab *bi_sym; int nsym = 0; char *ssym, *esym; ssym = esym = NULL; /* XXX: gcc */ #endif bi_arg = NULL; bootinfo = (void *)BOOTINFO_ADDR; /* XXX */ bi_magic = lookup_bootinfo(BTINFO_MAGIC); if (bi_magic && bi_magic->magic == BOOTINFO_MAGIC) { bi_arg = lookup_bootinfo(BTINFO_BOOTARG); if (bi_arg) { x_boothowto = bi_arg->howto; x_bootdev = bi_arg->bootdev; x_maxmem = bi_arg->maxmem; } #if NKSYMS || defined(DDB) || defined(MODULAR) bi_sym = lookup_bootinfo(BTINFO_SYMTAB); if (bi_sym) { nsym = bi_sym->nsym; ssym = (void *)bi_sym->ssym; esym = (void *)bi_sym->esym; } #endif bi_systype = lookup_bootinfo(BTINFO_SYSTYPE); if (bi_systype) systype = bi_systype->type; } else { /* * Running kernel is loaded by non-native loader; * clear the BSS segment here. */ memset(edata, 0, end - edata); } if (systype == 0) systype = NEWS3400; /* XXX compatibility for old boot */ #ifdef news5000 if (systype == NEWS5000) { int i; char *bootspec = (char *)x_bootdev; if (bi_arg == NULL) panic("news5000 requires BTINFO_BOOTARG to boot"); _sip = (void *)bi_arg->sip; x_maxmem = _sip->apbsi_memsize; x_maxmem -= 0x00100000; /* reserve 1MB for ROM monitor */ if (strncmp(bootspec, "scsi", 4) == 0) { x_bootdev = (5 << 28) | 0; /* magic, sd */ bootspec += 4; if (*bootspec != '(' /*)*/) goto bootspec_end; i = strtoul(bootspec + 1, &bootspec, 10); x_bootdev |= (i << 24); /* bus */ if (*bootspec != ',') goto bootspec_end; i = strtoul(bootspec + 1, &bootspec, 10); x_bootdev |= (i / 10) << 20; /* controller */ x_bootdev |= (i % 10) << 16; /* unit */ if (*bootspec != ',') goto bootspec_end; i = strtoul(bootspec + 1, &bootspec, 10); x_bootdev |= (i << 8); /* partition */ } bootspec_end: consinit(); } #endif /* * Save parameters into kernel work area. */ *(int *)(MIPS_PHYS_TO_KSEG1(MACH_MAXMEMSIZE_ADDR)) = x_maxmem; *(int *)(MIPS_PHYS_TO_KSEG1(MACH_BOOTDEV_ADDR)) = x_bootdev; *(int *)(MIPS_PHYS_TO_KSEG1(MACH_BOOTSW_ADDR)) = x_boothowto; kernend = (char *)mips_round_page(end); #if NKSYMS || defined(DDB) || defined(MODULAR) if (nsym) kernend = (char *)mips_round_page(esym); #endif /* * Set the VM page size. */ uvm_setpagesize(); boothowto = x_boothowto; bootdev = x_bootdev; physmem = btoc(x_maxmem); /* * Now that we know how much memory we have, initialize the * mem cluster array. */ mem_clusters[0].start = 0; /* XXX is this correct? */ mem_clusters[0].size = ctob(physmem); mem_cluster_cnt = 1; /* * Copy exception-dispatch code down to exception vector. * Initialize locore-function vector. * Clear out the I and D caches. */ mips_vector_init(NULL, false); /* * We know the CPU type now. Initialize our DMA tags (might * need this early). */ newsmips_bus_dma_init(); #if NKSYMS || defined(DDB) || defined(MODULAR) if (nsym) ksyms_addsyms_elf(esym - ssym, ssym, esym); #endif #ifdef KADB boothowto |= RB_KDB; #endif /* * Check to see if a mini-root was loaded into memory. It resides * at the start of the next page just after the end of BSS. */ if (boothowto & RB_MINIROOT) kernend += round_page(mfs_initminiroot(kernend)); /* * Load the rest of the available pages into the VM system. */ first = round_page(MIPS_KSEG0_TO_PHYS(kernend)); last = mem_clusters[0].start + mem_clusters[0].size; uvm_page_physload(atop(first), atop(last), atop(first), atop(last), VM_FREELIST_DEFAULT); /* * Initialize error message buffer (at end of core). */ mips_init_msgbuf(); /* * Initialize the virtual memory system. */ pmap_bootstrap(); /* * Allocate uarea page for lwp0 and set it. */ mips_init_lwp0_uarea(); /* * Determine what model of computer we are running on. */ switch (systype) { #ifdef news3400 case NEWS3400: news3400_init(); strcpy(cpu_model, idrom.id_machine); if (strcmp(cpu_model, "news3400") == 0 || strcmp(cpu_model, "news3200") == 0 || strcmp(cpu_model, "news3700") == 0) { /* * Set up interrupt handling and I/O addresses. */ hardware_intr = news3400_intr; cpuspeed = 10; } else { printf("kernel not configured for machine %s\n", cpu_model); } break; #endif #ifdef news5000 case NEWS5000: news5000_init(); strcpy(cpu_model, idrom.id_machine); if (strcmp(cpu_model, "news5000") == 0 || strcmp(cpu_model, "news5900") == 0) { /* * Set up interrupt handling and I/O addresses. */ hardware_intr = news5000_intr; cpuspeed = 50; /* ??? XXX */ } else { printf("kernel not configured for machine %s\n", cpu_model); } break; #endif default: printf("kernel not configured for systype %d\n", systype); break; } }
u_int initarm(void *arg) { ofw_handle_t ofw_handle = arg; paddr_t pclean; vaddr_t isa_io_virtaddr, isa_mem_virtaddr; paddr_t isadmaphysbufs; extern char shark_fiq[], shark_fiq_end[]; /* Don't want to get hit with interrupts 'til we're ready. */ (void)disable_interrupts(I32_bit | F32_bit); set_cpufuncs(); /* XXX - set these somewhere else? -JJK */ boothowto = 0; /* Init the OFW interface. */ /* MUST do this before invoking any OFW client services! */ ofw_init(ofw_handle); /* Configure ISA stuff: must be done before consinit */ ofw_configisa(&isa_io_physaddr, &isa_mem_physaddr); /* Map-in ISA I/O and memory space. */ /* XXX - this should be done in the isa-bus attach routine! -JJK */ isa_mem_virtaddr = ofw_map(isa_mem_physaddr, L1_S_SIZE, 0); isa_io_virtaddr = ofw_map(isa_io_physaddr, L1_S_SIZE, 0); /* Set-up the ISA system: must be done before consinit */ isa_init(isa_io_virtaddr, isa_mem_virtaddr); /* Initialize the console (which will call into OFW). */ /* This will allow us to see panic messages and other printf output. */ consinit(); /* Get boot info and process it. */ ofw_getbootinfo(&boot_file, &boot_args); process_kernel_args(); ofw_configisadma(&isadmaphysbufs); #if (NISADMA > 0) isa_dma_init(); #endif /* allocate a cache clean space */ if ((pclean = ofw_getcleaninfo()) != -1) { sa1_cache_clean_addr = ofw_map(pclean, 0x4000 * 2, L2_B | L2_C); sa1_cache_clean_size = 0x4000; } /* Configure memory. */ ofw_configmem(); /* * Set-up stacks. * The kernel stack for SVC mode will be updated on return * from this routine. */ set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + PAGE_SIZE); set_stackptr(PSR_UND32_MODE, undstack.pv_va + PAGE_SIZE); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + PAGE_SIZE); /* Set-up exception handlers. */ /* * Take control of selected vectors from OFW. * We take: undefined, swi, pre-fetch abort, data abort, addrexc, * irq, fiq * OFW retains: reset */ arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL & ~ARM_VEC_RESET); data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; /* why is this needed? -JJK */ /* Initialise the undefined instruction handlers. */ undefined_init(); /* Now for the SHARK-specific part of the FIQ set-up */ shark_fiqhandler.fh_func = shark_fiq; shark_fiqhandler.fh_size = shark_fiq_end - shark_fiq; shark_fiqhandler.fh_flags = 0; shark_fiqhandler.fh_regs = &shark_fiqregs; shark_fiqregs.fr_r8 = isa_io_virtaddr; shark_fiqregs.fr_r9 = 0; /* no routine right now */ shark_fiqregs.fr_r10 = 0; /* no arg right now */ shark_fiqregs.fr_r11 = 0; /* scratch */ shark_fiqregs.fr_r12 = 0; /* scratch */ shark_fiqregs.fr_r13 = 0; /* must set a stack when r9 is set! */ if (fiq_claim(&shark_fiqhandler)) panic("Cannot claim FIQ vector."); #if NKSYMS || defined(DDB) || defined(MODULAR) #ifndef __ELF__ { struct exec *kernexec = (struct exec *)KERNEL_TEXT_BASE; extern int end; extern char *esym; ksyms_addsyms_elf(kernexec->a_syms, &end, esym); } #endif /* __ELF__ */ #endif /* NKSYMS || defined(DDB) || defined(MODULAR) */ #ifdef DDB db_machine_init(); if (boothowto & RB_KDB) Debugger(); #endif /* Return the new stackbase. */ return(kernelstack.pv_va + USPACE_SVC_STACK_TOP); }
/* * locore.s code calls bootstrap() just before calling main(), after double * mapping the kernel to high memory and setting up the trap base register. * We must finish mapping the kernel properly and glean any bootstrap info. */ void bootstrap(void) { extern uint8_t u0[]; extern struct consdev consdev_prom; #if NKSYMS || defined(DDB) || defined(MODULAR) struct btinfo_symtab *bi_sym; #else extern int end[]; #endif struct btinfo_boothowto *bi_howto; cn_tab = &consdev_prom; prom_init(); /* Find the number of CPUs as early as possible */ sparc_ncpus = find_cpus(); uvm_lwp_setuarea(&lwp0, (vaddr_t)u0); cpuinfo.master = 1; getcpuinfo(&cpuinfo, 0); curlwp = &lwp0; #if defined(SUN4M) || defined(SUN4D) /* Switch to sparc v8 multiply/divide functions on v8 machines */ if (cpu_arch == 8) { extern void sparc_v8_muldiv(void); sparc_v8_muldiv(); } #endif /* SUN4M || SUN4D */ #if !NKSYMS && !defined(DDB) && !defined(MODULAR) /* * We want to reuse the memory where the symbols were stored * by the loader. Relocate the bootinfo array which is loaded * above the symbols (we assume) to the start of BSS. Then * adjust kernel_top accordingly. */ bootinfo_relocate((void *)ALIGN((u_int)end)); #endif pmap_bootstrap(cpuinfo.mmu_ncontext, cpuinfo.mmu_nregion, cpuinfo.mmu_nsegment); #if !defined(MSGBUFSIZE) || MSGBUFSIZE == 8192 /* * Now that the kernel map has been set up, we can enable * the message buffer at the first physical page in the * memory bank where we were loaded. There are 8192 * bytes available for the buffer at this location (see the * comment in locore.s at the top of the .text segment). */ initmsgbuf((void *)KERNBASE, 8192); #endif #if defined(SUN4M) /* * sun4m bootstrap is complex and is totally different for "normal" 4m * and for microSPARC-IIep - so it's split into separate functions. */ if (CPU_ISSUN4M) { #if !defined(MSIIEP) bootstrap4m(); #else bootstrapIIep(); #endif } #endif /* SUN4M */ #if defined(SUN4) || defined(SUN4C) if (CPU_ISSUN4 || CPU_ISSUN4C) { /* Map Interrupt Enable Register */ pmap_kenter_pa(INTRREG_VA, INT_ENABLE_REG_PHYSADR | PMAP_NC | PMAP_OBIO, VM_PROT_READ | VM_PROT_WRITE, 0); pmap_update(pmap_kernel()); /* Disable all interrupts */ *((unsigned char *)INTRREG_VA) = 0; } #endif /* SUN4 || SUN4C */ #if NKSYMS || defined(DDB) || defined(MODULAR) if ((bi_sym = lookup_bootinfo(BTINFO_SYMTAB)) != NULL) { if (bi_sym->ssym < KERNBASE) { /* Assume low-loading boot loader */ bi_sym->ssym += KERNBASE; bi_sym->esym += KERNBASE; } ksyms_addsyms_elf(bi_sym->nsym, (void*)bi_sym->ssym, (void*)bi_sym->esym); } #endif if ((bi_howto = lookup_bootinfo(BTINFO_BOOTHOWTO)) != NULL) { boothowto = bi_howto->boothowto; } }
/* * Initial entry point on startup. This gets called before main() is * entered. * It should be responsible for setting up everything that must be * in place when main is called. * This includes * Taking a copy of the boot configuration structure. * Initialising the physical console so characters can be printed. * Setting up page tables for the kernel * Relocating the kernel to the bottom of physical memory */ u_int initarm(void *arg) { int loop; int loop1; u_int kerneldatasize, symbolsize; vaddr_t l1pagetable; vaddr_t freemempos; #if NKSYMS || defined(DDB) || defined(MODULAR) Elf_Shdr *sh; #endif cpu_reset_address = ixp12x0_reset; /* * Since we map v0xf0000000 == p0x90000000, it's possible for * us to initialize the console now. */ consinit(); #ifdef VERBOSE_INIT_ARM /* Talk to the user */ printf("\nNetBSD/evbarm (IXM1200) booting ...\n"); #endif /* * Heads up ... Setup the CPU / MMU / TLB functions */ if (set_cpufuncs()) panic("CPU not recognized!"); /* XXX overwrite bootconfig to hardcoded values */ bootconfig.dram[0].address = 0xc0000000; bootconfig.dram[0].pages = 0x10000000 / PAGE_SIZE; /* SDRAM 256MB */ bootconfig.dramblocks = 1; kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE; symbolsize = 0; #ifdef PMAP_DEBUG pmap_debug(-1); #endif #if NKSYMS || defined(DDB) || defined(MODULAR) if (! memcmp(&end, "\177ELF", 4)) { sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff); loop = ((Elf_Ehdr *)&end)->e_shnum; for(; loop; loop--, sh++) if (sh->sh_offset > 0 && (sh->sh_offset + sh->sh_size) > symbolsize) symbolsize = sh->sh_offset + sh->sh_size; } #endif #ifdef VERBOSE_INIT_ARM printf("kernsize=0x%x\n", kerneldatasize); #endif kerneldatasize += symbolsize; kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) + PAGE_SIZE * 8; /* * Set up the variables that define the availablilty of physcial * memory */ physical_start = bootconfig.dram[0].address; physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE); physical_freestart = physical_start + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize; physical_freeend = physical_end; physmem = (physical_end - physical_start) / PAGE_SIZE; freemempos = 0xc0000000; #ifdef VERBOSE_INIT_ARM printf("Allocating page tables\n"); #endif free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE; #ifdef VERBOSE_INIT_ARM printf("CP15 Register1 = 0x%08x\n", cpu_get_control()); printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n", physical_freestart, free_pages, free_pages); printf("physical_start = 0x%08lx, physical_end = 0x%08lx\n", physical_start, physical_end); #endif /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start; #define alloc_pages(var, np) \ (var) = freemempos; \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); \ freemempos += (np) * PAGE_SIZE; loop1 = 0; for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) { /* Are we 16KB aligned for an L1 ? */ if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0 && kernel_l1pt.pv_pa == 0) { valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); } else { valloc_pages(kernel_pt_table[loop1], L2_TABLE_SIZE / PAGE_SIZE); ++loop1; } } #ifdef DIAGNOSTIC /* This should never be able to happen but better confirm that. */ if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0) panic("initarm: Failed to align the kernel page directory"); #endif /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ alloc_pages(systempage.pv_pa, 1); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, UPAGES); #ifdef VERBOSE_INIT_ARM printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va); printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va); printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va); printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va); #endif alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE); #ifdef CPU_IXP12X0 /* * XXX totally stuffed hack to work round problems introduced * in recent versions of the pmap code. Due to the calls used there * we cannot allocate virtual memory during bootstrap. */ for(;;) { alloc_pages(ixp12x0_cc_base, 1); if (! (ixp12x0_cc_base & (CPU_IXP12X0_CACHE_CLEAN_SIZE - 1))) break; } { vaddr_t dummy; alloc_pages(dummy, CPU_IXP12X0_CACHE_CLEAN_SIZE / PAGE_SIZE - 1); } ixp12x0_cache_clean_addr = ixp12x0_cc_base; ixp12x0_cache_clean_size = CPU_IXP12X0_CACHE_CLEAN_SIZE / 2; #endif /* CPU_IXP12X0 */ #ifdef VERBOSE_INIT_ARM printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa); #endif /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_pa; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1), &kernel_pt_table[KERNEL_PT_SYS]); for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_KERNEL + loop]); for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); /* update the top of the kernel VM */ pmap_curmaxkvaddr = KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000); pmap_link_l2pt(l1pagetable, IXP12X0_IO_VBASE, &kernel_pt_table[KERNEL_PT_IO]); #ifdef VERBOSE_INIT_ARM printf("Mapping kernel\n"); #endif #if XXX /* Now we fill in the L2 pagetable for the kernel code/data */ { extern char etext[], _end[]; size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE; size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE; u_int logical; textsize = (textsize + PGOFSET) & ~PGOFSET; totalsize = (totalsize + PGOFSET) & ~PGOFSET; logical = 0x00200000; /* offset of kernel in RAM */ logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, textsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, totalsize - textsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); } #else { pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_BASE, kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); } #endif #ifdef VERBOSE_INIT_ARM printf("Constructing L2 page tables\n"); #endif /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va, kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); } /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); #ifdef VERBOSE_INIT_ARM printf("systempage (vector page): p0x%08lx v0x%08lx\n", systempage.pv_pa, vector_page); #endif /* Map the statically mapped devices. */ pmap_devmap_bootstrap(l1pagetable, ixm1200_devmap); #ifdef VERBOSE_INIT_ARM printf("done.\n"); #endif /* * Map the Dcache Flush page. * Hw Ref Manual 3.2.4.5 Software Dcache Flush */ pmap_map_chunk(l1pagetable, ixp12x0_cache_clean_addr, 0xe0000000, CPU_IXP12X0_CACHE_CLEAN_SIZE, VM_PROT_READ, PTE_CACHE); /* * Now we have the real page tables in place so we can switch to them. * Once this is done we will be running with the REAL kernel page * tables. */ /* Switch tables */ cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa, true); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Moved here from cpu_startup() as data_abort_handler() references * this during init */ uvm_lwp_setuarea(&lwp0, kernelstack.pv_va); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross reloations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ #ifdef VERBOSE_INIT_ARM printf("init subsystems: stacks "); #endif set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); #ifdef PMAP_DEBUG if (pmap_debug_level >= 0) printf("kstack V%08lx P%08lx\n", kernelstack.pv_va, kernelstack.pv_pa); #endif /* PMAP_DEBUG */ /* * Well we should set a data abort handler. * Once things get going this will change as we will need a proper * handler. Until then we will use a handler that just panics but * tells us why. * Initialisation of the vetcors will just panic on a data abort. * This just fills in a slightly better one. */ #ifdef VERBOSE_INIT_ARM printf("vectors "); #endif data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; #ifdef VERBOSE_INIT_ARM printf("\ndata_abort_handler_address = %08x\n", data_abort_handler_address); printf("prefetch_abort_handler_address = %08x\n", prefetch_abort_handler_address); printf("undefined_handler_address = %08x\n", undefined_handler_address); #endif /* Initialise the undefined instruction handlers */ #ifdef VERBOSE_INIT_ARM printf("undefined "); #endif undefined_init(); /* Load memory into UVM. */ #ifdef VERBOSE_INIT_ARM printf("page "); #endif uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ uvm_page_physload(atop(physical_freestart), atop(physical_freeend), atop(physical_freestart), atop(physical_freeend), VM_FREELIST_DEFAULT); /* Boot strap pmap telling it where the kernel page table is */ #ifdef VERBOSE_INIT_ARM printf("pmap "); #endif pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE); /* Setup the IRQ system */ #ifdef VERBOSE_INIT_ARM printf("irq "); #endif ixp12x0_intr_init(); #ifdef VERBOSE_INIT_ARM printf("done.\n"); #endif #ifdef VERBOSE_INIT_ARM printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n", physical_freestart, free_pages, free_pages); printf("freemempos=%08lx\n", freemempos); printf("switching to new L1 page table @%#lx... \n", kernel_l1pt.pv_pa); #endif consinit(); #ifdef VERBOSE_INIT_ARM printf("consinit \n"); #endif ixdp_ixp12x0_cc_setup(); #ifdef VERBOSE_INIT_ARM printf("bootstrap done.\n"); #endif #if NKSYMS || defined(DDB) || defined(MODULAR) ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize); #endif #ifdef DDB db_machine_init(); if (boothowto & RB_KDB) Debugger(); #endif /* We return the new stack pointer address */ return(kernelstack.pv_va + USPACE_SVC_STACK_TOP); }
/* * cpu_startup: allocate memory for variable-sized tables, * initialize CPU, and do autoconfiguration. * * This is called early in init_main.c:main(), after the * kernel memory allocator is ready for use, but before * the creation of processes 1,2, and mountroot, etc. */ void cpu_startup(void) { void *v; vaddr_t minaddr, maxaddr; char pbuf[9]; /* * Initialize message buffer (for kernel printf). * This is put in physical pages four through seven * so it will always be in the same place after a * reboot. (physical pages 0-3 are reserved by the PROM * for its vector table and other stuff.) * Its mapping was prepared in pmap_bootstrap(). * Also, offset some to avoid PROM scribbles. */ v = (void *) (PAGE_SIZE * 4); msgbufaddr = (void *)((char *)v + MSGBUFOFF); initmsgbuf(msgbufaddr, MSGBUFSIZE); #if NKSYMS || defined(DDB) || defined(MODULAR) { extern int nsym; extern char *ssym, *esym; ksyms_addsyms_elf(nsym, ssym, esym); } #endif /* DDB */ /* * Good {morning,afternoon,evening,night}. */ printf("%s%s", copyright, version); identifycpu(); fputype = FPU_NONE; #ifdef FPU_EMULATE printf("fpu: emulator\n"); #else printf("fpu: no math support\n"); #endif format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); printf("total memory = %s\n", pbuf); /* * XXX fredette - we force a small number of buffers * to help me debug this on my low-memory machine. * this should go away at some point, allowing the * normal automatic buffer-sizing to happen. */ bufpages = 37; /* * Get scratch page for dumpsys(). */ if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE,0, UVM_KMF_WIRED)) == 0) panic("startup: alloc dumppage"); minaddr = 0; /* * Allocate a submap for physio */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, false, NULL); format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); /* * Allocate a virtual page (for use by /dev/mem) * This page is handed to pmap_enter() therefore * it has to be in the normal kernel VA range. */ vmmap = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA); /* * Allocate DMA map for devices on the bus. */ dvmamap = extent_create("dvmamap", DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL, 0, 0, EX_NOWAIT); if (dvmamap == NULL) panic("unable to allocate DVMA map"); /* * Set up CPU-specific registers, cache, etc. */ initcpu(); }
/* * It should be responsible for setting up everything that must be * in place when main is called. * This includes: * Initializing the physical console so characters can be printed. * Setting up page tables for the kernel. */ u_int init_sa11x0(int argc, char **argv, struct bootinfo *bi) { u_int kerneldatasize, symbolsize; u_int l1pagetable; vaddr_t freemempos; vsize_t pt_size; int loop; #if NKSYMS || defined(DDB) || defined(MODULAR) Elf_Shdr *sh; #endif #ifdef DEBUG_BEFOREMMU /* * At this point, we cannot call real consinit(). * Just call a faked up version of consinit(), which does the thing * with MMU disabled. */ fakecninit(); #endif /* * XXX for now, overwrite bootconfig to hardcoded values. * XXX kill bootconfig and directly call uvm_physload */ bootconfig.dram[0].address = 0xc0000000; bootconfig.dram[0].pages = DRAM_PAGES; bootconfig.dramblocks = 1; kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE; symbolsize = 0; #if NKSYMS || defined(DDB) || defined(MODULAR) if (!memcmp(&end, "\177ELF", 4)) { sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff); loop = ((Elf_Ehdr *)&end)->e_shnum; for (; loop; loop--, sh++) if (sh->sh_offset > 0 && (sh->sh_offset + sh->sh_size) > symbolsize) symbolsize = sh->sh_offset + sh->sh_size; } #endif printf("kernsize=0x%x\n", kerneldatasize); kerneldatasize += symbolsize; kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) + PAGE_SIZE * 8; /* * hpcboot has loaded me with MMU disabled. * So create kernel page tables and enable MMU. */ /* * Set up the variables that define the availability of physcial * memory. */ physical_start = bootconfig.dram[0].address; physical_freestart = physical_start + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize; physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address + bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE; physical_freeend = physical_end; for (loop = 0; loop < bootconfig.dramblocks; ++loop) physmem += bootconfig.dram[loop].pages; /* XXX handle UMA framebuffer memory */ /* Use the first 256kB to allocate things */ freemempos = KERNEL_BASE; memset((void *)KERNEL_BASE, 0, KERNEL_TEXT_BASE - KERNEL_BASE); /* * Right. We have the bottom meg of memory mapped to 0x00000000 * so was can get at it. The kernel will occupy the start of it. * After the kernel/args we allocate some of the fixed page tables * we need to get the system going. * We allocate one page directory and NUM_KERNEL_PTS page tables * and store the physical addresses in the kernel_pt_table array. * Must remember that neither the page L1 or L2 page tables are the * same size as a page ! * * Ok, the next bit of physical allocate may look complex but it is * simple really. I have done it like this so that no memory gets * wasted during the allocate of various pages and tables that are * all different sizes. * The start address will be page aligned. * We allocate the kernel page directory on the first free 16KB * boundary we find. * We allocate the kernel page tables on the first 1KB boundary we * find. We allocate at least 9 PT's (12 currently). This means * that in the process we KNOW that we will encounter at least one * 16KB boundary. * * Eventually if the top end of the memory gets used for process L1 * page tables the kernel L1 page table may be moved up there. */ #ifdef VERBOSE_INIT_ARM printf("Allocating page tables\n"); #endif /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start; #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np) * PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { alloc_pages(kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE / PAGE_SIZE); kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa; } /* This should never be able to happen but better confirm that. */ if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0) panic("initarm: Failed to align the kernel page directory"); /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ valloc_pages(systempage, 1); pt_size = round_page(freemempos) - physical_start; /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, UPAGES); #ifdef VERBOSE_INIT_ARM printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va); printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va); printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va); printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va); #endif alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE); /* * XXX Actually, we only need virtual space and don't need * XXX physical memory for sa110_cc_base and sa11x0_idle_mem. */ /* * XXX totally stuffed hack to work round problems introduced * in recent versions of the pmap code. Due to the calls used there * we cannot allocate virtual memory during bootstrap. */ for (;;) { alloc_pages(sa1_cc_base, 1); if (!(sa1_cc_base & (CPU_SA110_CACHE_CLEAN_SIZE - 1))) break; } alloc_pages(sa1_cache_clean_addr, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE - 1); sa1_cache_clean_addr = sa1_cc_base; sa1_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2; alloc_pages(sa11x0_idle_mem, 1); /* * Ok, we have allocated physical pages for the primary kernel * page tables. */ #ifdef VERBOSE_INIT_ARM printf("Creating L1 page table\n"); #endif /* * Now we start construction of the L1 page table. * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary. */ l1pagetable = kernel_l1pt.pv_pa; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, 0x00000000, &kernel_pt_table[KERNEL_PT_SYS]); #define SAIPIO_BASE 0xd0000000 /* XXX XXX */ pmap_link_l2pt(l1pagetable, SAIPIO_BASE, &kernel_pt_table[KERNEL_PT_IO]); for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; ++loop) pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_KERNEL + loop]); for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); /* update the top of the kernel VM */ pmap_curmaxkvaddr = KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000); #ifdef VERBOSE_INIT_ARM printf("Mapping kernel\n"); #endif /* Now we fill in the L2 pagetable for the kernel code/data */ /* * XXX there is no ELF header to find RO region. * XXX What should we do? */ #if 0 if (N_GETMAGIC(kernexec[0]) == ZMAGIC) { logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, physical_start, kernexec->a_text, VM_PROT_READ, PTE_CACHE); logical += pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE + logical, physical_start + logical, kerneldatasize - kernexec->a_text, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); } else #endif pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_BASE - KERNEL_BASE + physical_start, kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); #ifdef VERBOSE_INIT_ARM printf("Constructing L2 page tables\n"); #endif /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map page tables */ pmap_map_chunk(l1pagetable, KERNEL_BASE, physical_start, pt_size, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map a page for entering idle mode */ pmap_map_entry(l1pagetable, sa11x0_idle_mem, sa11x0_idle_mem, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); /* Map the vector page. */ pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the statically mapped devices. */ pmap_devmap_bootstrap(l1pagetable, sa11x0_devmap); pmap_map_chunk(l1pagetable, sa1_cache_clean_addr, 0xe0000000, CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* * Now we have the real page tables in place so we can switch to them. * Once this is done we will be running with the REAL kernel page * tables. */ #ifdef VERBOSE_INIT_ARM printf("done.\n"); #endif /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ #ifdef VERBOSE_INIT_ARM printf("init subsystems: stacks "); #endif set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); #ifdef PMAP_DEBUG if (pmap_debug_level >= 0) printf("kstack V%08lx P%08lx\n", kernelstack.pv_va, kernelstack.pv_pa); #endif /* PMAP_DEBUG */ /* * Well we should set a data abort handler. * Once things get going this will change as we will need a proper * handler. Until then we will use a handler that just panics but * tells us why. * Initialization of the vectors will just panic on a data abort. * This just fills in a slightly better one. */ #ifdef VERBOSE_INIT_ARM printf("vectors "); #endif data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; #ifdef DEBUG printf("%08x %08x %08x\n", data_abort_handler_address, prefetch_abort_handler_address, undefined_handler_address); #endif /* Initialize the undefined instruction handlers */ #ifdef VERBOSE_INIT_ARM printf("undefined\n"); #endif undefined_init(); /* Set the page table address. */ #ifdef VERBOSE_INIT_ARM printf("switching to new L1 page table @%#lx...\n", kernel_l1pt.pv_pa); #endif cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa, true); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Moved from cpu_startup() as data_abort_handler() references * this during uvm init. */ uvm_lwp_setuarea(&lwp0, kernelstack.pv_va); #ifdef BOOT_DUMP dumppages((char *)0xc0000000, 16 * PAGE_SIZE); dumppages((char *)0xb0100000, 64); /* XXX */ #endif /* Enable MMU, I-cache, D-cache, write buffer. */ cpufunc_control(0x337f, 0x107d); arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL); consinit(); #ifdef VERBOSE_INIT_ARM printf("bootstrap done.\n"); #endif #ifdef VERBOSE_INIT_ARM printf("freemempos=%08lx\n", freemempos); printf("MMU enabled. control=%08x\n", cpu_get_control()); #endif /* Load memory into UVM. */ uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ for (loop = 0; loop < bootconfig.dramblocks; loop++) { paddr_t dblk_start = (paddr_t)bootconfig.dram[loop].address; paddr_t dblk_end = dblk_start + (bootconfig.dram[loop].pages * PAGE_SIZE); if (dblk_start < physical_freestart) dblk_start = physical_freestart; if (dblk_end > physical_freeend) dblk_end = physical_freeend; uvm_page_physload(atop(dblk_start), atop(dblk_end), atop(dblk_start), atop(dblk_end), VM_FREELIST_DEFAULT); } /* Boot strap pmap telling it where the kernel page table is */ pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE); #ifdef BOOT_DUMP dumppages((char *)kernel_l1pt.pv_va, 16); #endif #ifdef DDB db_machine_init(); #endif #if NKSYMS || defined(DDB) || defined(MODULAR) ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize); #endif printf("kernsize=0x%x", kerneldatasize); printf(" (including 0x%x symbols)\n", symbolsize); #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif /* DDB */ /* We return the new stack pointer address */ return (kernelstack.pv_va + USPACE_SVC_STACK_TOP); }