/* * Initial entry point on startup. This gets called before main() is * entered. * It should be responsible for setting up everything that must be * in place when main is called. * This includes * Taking a copy of the boot configuration structure. * Initialising the physical console so characters can be printed. * Setting up page tables for the kernel * Relocating the kernel to the bottom of physical memory */ u_int initarm(void *arg) { int loop; int loop1; u_int kerneldatasize, symbolsize; vaddr_t l1pagetable; vaddr_t freemempos; #if NKSYMS || defined(DDB) || defined(MODULAR) Elf_Shdr *sh; #endif cpu_reset_address = ixp12x0_reset; /* * Since we map v0xf0000000 == p0x90000000, it's possible for * us to initialize the console now. */ consinit(); #ifdef VERBOSE_INIT_ARM /* Talk to the user */ printf("\nNetBSD/evbarm (IXM1200) booting ...\n"); #endif /* * Heads up ... Setup the CPU / MMU / TLB functions */ if (set_cpufuncs()) panic("CPU not recognized!"); /* XXX overwrite bootconfig to hardcoded values */ bootconfig.dram[0].address = 0xc0000000; bootconfig.dram[0].pages = 0x10000000 / PAGE_SIZE; /* SDRAM 256MB */ bootconfig.dramblocks = 1; kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE; symbolsize = 0; #ifdef PMAP_DEBUG pmap_debug(-1); #endif #if NKSYMS || defined(DDB) || defined(MODULAR) if (! memcmp(&end, "\177ELF", 4)) { sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff); loop = ((Elf_Ehdr *)&end)->e_shnum; for(; loop; loop--, sh++) if (sh->sh_offset > 0 && (sh->sh_offset + sh->sh_size) > symbolsize) symbolsize = sh->sh_offset + sh->sh_size; } #endif #ifdef VERBOSE_INIT_ARM printf("kernsize=0x%x\n", kerneldatasize); #endif kerneldatasize += symbolsize; kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) + PAGE_SIZE * 8; /* * Set up the variables that define the availablilty of physcial * memory */ physical_start = bootconfig.dram[0].address; physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE); physical_freestart = physical_start + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize; physical_freeend = physical_end; physmem = (physical_end - physical_start) / PAGE_SIZE; freemempos = 0xc0000000; #ifdef VERBOSE_INIT_ARM printf("Allocating page tables\n"); #endif free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE; #ifdef VERBOSE_INIT_ARM printf("CP15 Register1 = 0x%08x\n", cpu_get_control()); printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n", physical_freestart, free_pages, free_pages); printf("physical_start = 0x%08lx, physical_end = 0x%08lx\n", physical_start, physical_end); #endif /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start; #define alloc_pages(var, np) \ (var) = freemempos; \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); \ freemempos += (np) * PAGE_SIZE; loop1 = 0; for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) { /* Are we 16KB aligned for an L1 ? */ if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0 && kernel_l1pt.pv_pa == 0) { valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); } else { valloc_pages(kernel_pt_table[loop1], L2_TABLE_SIZE / PAGE_SIZE); ++loop1; } } #ifdef DIAGNOSTIC /* This should never be able to happen but better confirm that. */ if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0) panic("initarm: Failed to align the kernel page directory"); #endif /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ alloc_pages(systempage.pv_pa, 1); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, UPAGES); #ifdef VERBOSE_INIT_ARM printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va); printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va); printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va); printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va); #endif alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE); #ifdef CPU_IXP12X0 /* * XXX totally stuffed hack to work round problems introduced * in recent versions of the pmap code. Due to the calls used there * we cannot allocate virtual memory during bootstrap. */ for(;;) { alloc_pages(ixp12x0_cc_base, 1); if (! (ixp12x0_cc_base & (CPU_IXP12X0_CACHE_CLEAN_SIZE - 1))) break; } { vaddr_t dummy; alloc_pages(dummy, CPU_IXP12X0_CACHE_CLEAN_SIZE / PAGE_SIZE - 1); } ixp12x0_cache_clean_addr = ixp12x0_cc_base; ixp12x0_cache_clean_size = CPU_IXP12X0_CACHE_CLEAN_SIZE / 2; #endif /* CPU_IXP12X0 */ #ifdef VERBOSE_INIT_ARM printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa); #endif /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_pa; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1), &kernel_pt_table[KERNEL_PT_SYS]); for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_KERNEL + loop]); for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); /* update the top of the kernel VM */ pmap_curmaxkvaddr = KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000); pmap_link_l2pt(l1pagetable, IXP12X0_IO_VBASE, &kernel_pt_table[KERNEL_PT_IO]); #ifdef VERBOSE_INIT_ARM printf("Mapping kernel\n"); #endif #if XXX /* Now we fill in the L2 pagetable for the kernel code/data */ { extern char etext[], _end[]; size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE; size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE; u_int logical; textsize = (textsize + PGOFSET) & ~PGOFSET; totalsize = (totalsize + PGOFSET) & ~PGOFSET; logical = 0x00200000; /* offset of kernel in RAM */ logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, textsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, totalsize - textsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); } #else { pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_BASE, kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); } #endif #ifdef VERBOSE_INIT_ARM printf("Constructing L2 page tables\n"); #endif /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va, kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); } /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); #ifdef VERBOSE_INIT_ARM printf("systempage (vector page): p0x%08lx v0x%08lx\n", systempage.pv_pa, vector_page); #endif /* Map the statically mapped devices. */ pmap_devmap_bootstrap(l1pagetable, ixm1200_devmap); #ifdef VERBOSE_INIT_ARM printf("done.\n"); #endif /* * Map the Dcache Flush page. * Hw Ref Manual 3.2.4.5 Software Dcache Flush */ pmap_map_chunk(l1pagetable, ixp12x0_cache_clean_addr, 0xe0000000, CPU_IXP12X0_CACHE_CLEAN_SIZE, VM_PROT_READ, PTE_CACHE); /* * Now we have the real page tables in place so we can switch to them. * Once this is done we will be running with the REAL kernel page * tables. */ /* Switch tables */ cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa, true); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Moved here from cpu_startup() as data_abort_handler() references * this during init */ uvm_lwp_setuarea(&lwp0, kernelstack.pv_va); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross reloations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ #ifdef VERBOSE_INIT_ARM printf("init subsystems: stacks "); #endif set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); #ifdef PMAP_DEBUG if (pmap_debug_level >= 0) printf("kstack V%08lx P%08lx\n", kernelstack.pv_va, kernelstack.pv_pa); #endif /* PMAP_DEBUG */ /* * Well we should set a data abort handler. * Once things get going this will change as we will need a proper * handler. Until then we will use a handler that just panics but * tells us why. * Initialisation of the vetcors will just panic on a data abort. * This just fills in a slightly better one. */ #ifdef VERBOSE_INIT_ARM printf("vectors "); #endif data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; #ifdef VERBOSE_INIT_ARM printf("\ndata_abort_handler_address = %08x\n", data_abort_handler_address); printf("prefetch_abort_handler_address = %08x\n", prefetch_abort_handler_address); printf("undefined_handler_address = %08x\n", undefined_handler_address); #endif /* Initialise the undefined instruction handlers */ #ifdef VERBOSE_INIT_ARM printf("undefined "); #endif undefined_init(); /* Load memory into UVM. */ #ifdef VERBOSE_INIT_ARM printf("page "); #endif uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ uvm_page_physload(atop(physical_freestart), atop(physical_freeend), atop(physical_freestart), atop(physical_freeend), VM_FREELIST_DEFAULT); /* Boot strap pmap telling it where the kernel page table is */ #ifdef VERBOSE_INIT_ARM printf("pmap "); #endif pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE); /* Setup the IRQ system */ #ifdef VERBOSE_INIT_ARM printf("irq "); #endif ixp12x0_intr_init(); #ifdef VERBOSE_INIT_ARM printf("done.\n"); #endif #ifdef VERBOSE_INIT_ARM printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n", physical_freestart, free_pages, free_pages); printf("freemempos=%08lx\n", freemempos); printf("switching to new L1 page table @%#lx... \n", kernel_l1pt.pv_pa); #endif consinit(); #ifdef VERBOSE_INIT_ARM printf("consinit \n"); #endif ixdp_ixp12x0_cc_setup(); #ifdef VERBOSE_INIT_ARM printf("bootstrap done.\n"); #endif #if NKSYMS || defined(DDB) || defined(MODULAR) ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize); #endif #ifdef DDB db_machine_init(); if (boothowto & RB_KDB) Debugger(); #endif /* We return the new stack pointer address */ return(kernelstack.pv_va + USPACE_SVC_STACK_TOP); }
/* * It should be responsible for setting up everything that must be * in place when main is called. * This includes: * Initializing the physical console so characters can be printed. * Setting up page tables for the kernel. */ u_int init_sa11x0(int argc, char **argv, struct bootinfo *bi) { u_int kerneldatasize, symbolsize; u_int l1pagetable; vaddr_t freemempos; vsize_t pt_size; int loop; #if NKSYMS || defined(DDB) || defined(MODULAR) Elf_Shdr *sh; #endif #ifdef DEBUG_BEFOREMMU /* * At this point, we cannot call real consinit(). * Just call a faked up version of consinit(), which does the thing * with MMU disabled. */ fakecninit(); #endif /* * XXX for now, overwrite bootconfig to hardcoded values. * XXX kill bootconfig and directly call uvm_physload */ bootconfig.dram[0].address = 0xc0000000; bootconfig.dram[0].pages = DRAM_PAGES; bootconfig.dramblocks = 1; kerneldatasize = (uint32_t)&end - (uint32_t)KERNEL_TEXT_BASE; symbolsize = 0; #if NKSYMS || defined(DDB) || defined(MODULAR) if (!memcmp(&end, "\177ELF", 4)) { sh = (Elf_Shdr *)((char *)&end + ((Elf_Ehdr *)&end)->e_shoff); loop = ((Elf_Ehdr *)&end)->e_shnum; for (; loop; loop--, sh++) if (sh->sh_offset > 0 && (sh->sh_offset + sh->sh_size) > symbolsize) symbolsize = sh->sh_offset + sh->sh_size; } #endif printf("kernsize=0x%x\n", kerneldatasize); kerneldatasize += symbolsize; kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) + PAGE_SIZE * 8; /* * hpcboot has loaded me with MMU disabled. * So create kernel page tables and enable MMU. */ /* * Set up the variables that define the availability of physcial * memory. */ physical_start = bootconfig.dram[0].address; physical_freestart = physical_start + (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize; physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address + bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE; physical_freeend = physical_end; for (loop = 0; loop < bootconfig.dramblocks; ++loop) physmem += bootconfig.dram[loop].pages; /* XXX handle UMA framebuffer memory */ /* Use the first 256kB to allocate things */ freemempos = KERNEL_BASE; memset((void *)KERNEL_BASE, 0, KERNEL_TEXT_BASE - KERNEL_BASE); /* * Right. We have the bottom meg of memory mapped to 0x00000000 * so was can get at it. The kernel will occupy the start of it. * After the kernel/args we allocate some of the fixed page tables * we need to get the system going. * We allocate one page directory and NUM_KERNEL_PTS page tables * and store the physical addresses in the kernel_pt_table array. * Must remember that neither the page L1 or L2 page tables are the * same size as a page ! * * Ok, the next bit of physical allocate may look complex but it is * simple really. I have done it like this so that no memory gets * wasted during the allocate of various pages and tables that are * all different sizes. * The start address will be page aligned. * We allocate the kernel page directory on the first free 16KB * boundary we find. * We allocate the kernel page tables on the first 1KB boundary we * find. We allocate at least 9 PT's (12 currently). This means * that in the process we KNOW that we will encounter at least one * 16KB boundary. * * Eventually if the top end of the memory gets used for process L1 * page tables the kernel L1 page table may be moved up there. */ #ifdef VERBOSE_INIT_ARM printf("Allocating page tables\n"); #endif /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start; #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np) * PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { alloc_pages(kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE / PAGE_SIZE); kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa; } /* This should never be able to happen but better confirm that. */ if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0) panic("initarm: Failed to align the kernel page directory"); /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ valloc_pages(systempage, 1); pt_size = round_page(freemempos) - physical_start; /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, UPAGES); #ifdef VERBOSE_INIT_ARM printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va); printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va); printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va); printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va); #endif alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE); /* * XXX Actually, we only need virtual space and don't need * XXX physical memory for sa110_cc_base and sa11x0_idle_mem. */ /* * XXX totally stuffed hack to work round problems introduced * in recent versions of the pmap code. Due to the calls used there * we cannot allocate virtual memory during bootstrap. */ for (;;) { alloc_pages(sa1_cc_base, 1); if (!(sa1_cc_base & (CPU_SA110_CACHE_CLEAN_SIZE - 1))) break; } alloc_pages(sa1_cache_clean_addr, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE - 1); sa1_cache_clean_addr = sa1_cc_base; sa1_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2; alloc_pages(sa11x0_idle_mem, 1); /* * Ok, we have allocated physical pages for the primary kernel * page tables. */ #ifdef VERBOSE_INIT_ARM printf("Creating L1 page table\n"); #endif /* * Now we start construction of the L1 page table. * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary. */ l1pagetable = kernel_l1pt.pv_pa; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, 0x00000000, &kernel_pt_table[KERNEL_PT_SYS]); #define SAIPIO_BASE 0xd0000000 /* XXX XXX */ pmap_link_l2pt(l1pagetable, SAIPIO_BASE, &kernel_pt_table[KERNEL_PT_IO]); for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; ++loop) pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_KERNEL + loop]); for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); /* update the top of the kernel VM */ pmap_curmaxkvaddr = KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000); #ifdef VERBOSE_INIT_ARM printf("Mapping kernel\n"); #endif /* Now we fill in the L2 pagetable for the kernel code/data */ /* * XXX there is no ELF header to find RO region. * XXX What should we do? */ #if 0 if (N_GETMAGIC(kernexec[0]) == ZMAGIC) { logical = pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, physical_start, kernexec->a_text, VM_PROT_READ, PTE_CACHE); logical += pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE + logical, physical_start + logical, kerneldatasize - kernexec->a_text, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); } else #endif pmap_map_chunk(l1pagetable, KERNEL_TEXT_BASE, KERNEL_TEXT_BASE - KERNEL_BASE + physical_start, kerneldatasize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); #ifdef VERBOSE_INIT_ARM printf("Constructing L2 page tables\n"); #endif /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map page tables */ pmap_map_chunk(l1pagetable, KERNEL_BASE, physical_start, pt_size, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map a page for entering idle mode */ pmap_map_entry(l1pagetable, sa11x0_idle_mem, sa11x0_idle_mem, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE); /* Map the vector page. */ pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the statically mapped devices. */ pmap_devmap_bootstrap(l1pagetable, sa11x0_devmap); pmap_map_chunk(l1pagetable, sa1_cache_clean_addr, 0xe0000000, CPU_SA110_CACHE_CLEAN_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* * Now we have the real page tables in place so we can switch to them. * Once this is done we will be running with the REAL kernel page * tables. */ #ifdef VERBOSE_INIT_ARM printf("done.\n"); #endif /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ #ifdef VERBOSE_INIT_ARM printf("init subsystems: stacks "); #endif set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); #ifdef PMAP_DEBUG if (pmap_debug_level >= 0) printf("kstack V%08lx P%08lx\n", kernelstack.pv_va, kernelstack.pv_pa); #endif /* PMAP_DEBUG */ /* * Well we should set a data abort handler. * Once things get going this will change as we will need a proper * handler. Until then we will use a handler that just panics but * tells us why. * Initialization of the vectors will just panic on a data abort. * This just fills in a slightly better one. */ #ifdef VERBOSE_INIT_ARM printf("vectors "); #endif data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; #ifdef DEBUG printf("%08x %08x %08x\n", data_abort_handler_address, prefetch_abort_handler_address, undefined_handler_address); #endif /* Initialize the undefined instruction handlers */ #ifdef VERBOSE_INIT_ARM printf("undefined\n"); #endif undefined_init(); /* Set the page table address. */ #ifdef VERBOSE_INIT_ARM printf("switching to new L1 page table @%#lx...\n", kernel_l1pt.pv_pa); #endif cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa, true); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Moved from cpu_startup() as data_abort_handler() references * this during uvm init. */ uvm_lwp_setuarea(&lwp0, kernelstack.pv_va); #ifdef BOOT_DUMP dumppages((char *)0xc0000000, 16 * PAGE_SIZE); dumppages((char *)0xb0100000, 64); /* XXX */ #endif /* Enable MMU, I-cache, D-cache, write buffer. */ cpufunc_control(0x337f, 0x107d); arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL); consinit(); #ifdef VERBOSE_INIT_ARM printf("bootstrap done.\n"); #endif #ifdef VERBOSE_INIT_ARM printf("freemempos=%08lx\n", freemempos); printf("MMU enabled. control=%08x\n", cpu_get_control()); #endif /* Load memory into UVM. */ uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ for (loop = 0; loop < bootconfig.dramblocks; loop++) { paddr_t dblk_start = (paddr_t)bootconfig.dram[loop].address; paddr_t dblk_end = dblk_start + (bootconfig.dram[loop].pages * PAGE_SIZE); if (dblk_start < physical_freestart) dblk_start = physical_freestart; if (dblk_end > physical_freeend) dblk_end = physical_freeend; uvm_page_physload(atop(dblk_start), atop(dblk_end), atop(dblk_start), atop(dblk_end), VM_FREELIST_DEFAULT); } /* Boot strap pmap telling it where the kernel page table is */ pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE); #ifdef BOOT_DUMP dumppages((char *)kernel_l1pt.pv_va, 16); #endif #ifdef DDB db_machine_init(); #endif #if NKSYMS || defined(DDB) || defined(MODULAR) ksyms_addsyms_elf(symbolsize, ((int *)&end), ((char *)&end) + symbolsize); #endif printf("kernsize=0x%x", kerneldatasize); printf(" (including 0x%x symbols)\n", symbolsize); #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif /* DDB */ /* We return the new stack pointer address */ return (kernelstack.pv_va + USPACE_SVC_STACK_TOP); }