void restart() { /* Perform some initialization to restart a program */ memcpy(hwa_to_va(LOADER_START), loader, loader_len); tlb_init(); /* General initialization */ cpu.eip = LOADER_START; cpu.ebp = 0; cpu.esp = 0x8000000; cpu.eflags = 0x2; cpu.eax = 0; cpu.ecx = 0; cpu.edx = 0; cpu.cr0.paging = 0; cpu.cr0.protect_enable = 0; cpu.INTR = 0; FLAG_CHG(IF, 0); /* Segment initialization */ cpu.gdtr.limit = 0; cpu.gdtr.base = 0; cpu.cr[0] = 0; // Set PE to 0 load_prog(); // trigger = TRIGGER_INIT; init_dram(); }
void vmm_init (struct virtual_memory_manager *vmm, FILE * backing_store, FILE * vmm_log, FILE * tlb_log, FILE * pm_log) { // Initialise la mémoire physique. pm_init (&vmm->pm, backing_store, pm_log); tlb_init (&vmm->tlb, tlb_log); // Initialise les compteurs. vmm->page_fault_count = 0; vmm->page_found_count = 0; vmm->tlb_hit_count = 0; vmm->tlb_miss_count = 0; // Initialise le fichier de journal. vmm->log = vmm_log; vmm ->backstore = backing_store ; // Initialise la table de page. for (int i=0; i < NUM_PAGES; i++) { vmm->page_table[i].flags = 0x0; vmm->page_table[i].frame_number = -1; } }
void slave_main(void) { send(MSIM_MASTER_ID, 0, 4); set_trap_handler(); tlb_init(); kprintf("--DEBUG-- Hello world from CPU #%d!\n", cpuid()); for (;;); }
void init(void) { memcpy(__data_begin__, __rodata_end__, __data_end__ - __data_begin__); memset(__bss_begin__, 0, __bss_end__ - __bss_begin__); mach_init(); set_trap_handler(); tlb_init(); pcb_init(); main(); }
void __init setup_arch(char **cmdline_p) { randomize_va_space = 0; *cmdline_p = command_line; cpu_cache_init(); tlb_init(); bootmem_init(); paging_init(); resource_init(); }
void platform_init(int argc, char **argv, char **envp, unsigned memsize) { /* clear BSS section */ bzero(__bss, __ebss - __bss); setup_kenv(argc, argv, envp); uart_init(); pcpu_init(); cpu_init(); tlb_init(); intr_init(); pm_bootstrap(memsize); thread_bootstrap(); kprintf("[startup] Switching to 'kernel-main' thread...\n"); }
void ia32_initreg(void) { int i; CPU_STATSAVE.cpu_inst_default.seg_base = (UINT32)-1; CPU_EDX = (CPU_FAMILY << 8) | (CPU_MODEL << 4) | CPU_STEPPING; CPU_EFLAG = 2; CPU_CR0 = CPU_CR0_CD | CPU_CR0_NW; #if defined(USE_FPU) CPU_CR0 &= ~CPU_CR0_EM; CPU_CR0 |= CPU_CR0_ET; #else CPU_CR0 |= CPU_CR0_EM | CPU_CR0_NE; CPU_CR0 &= ~(CPU_CR0_MP | CPU_CR0_ET); #endif CPU_MXCSR = 0x1f80; CPU_GDTR_BASE = 0x0; CPU_GDTR_LIMIT = 0xffff; CPU_IDTR_BASE = 0x0; CPU_IDTR_LIMIT = 0xffff; CPU_LDTR_BASE = 0x0; CPU_LDTR_LIMIT = 0xffff; CPU_TR_BASE = 0x0; CPU_TR_LIMIT = 0xffff; CPU_STATSAVE.cpu_regs.dr[6] = 0xffff1ff0; for (i = 0; i < CPU_SEGREG_NUM; ++i) { segdesc_init(i, 0, &CPU_STAT_SREG(i)); } LOAD_SEGREG(CPU_CS_INDEX, 0xf000); CPU_STAT_CS_BASE = 0xffff0000; CPU_EIP = 0xfff0; CPU_ADRSMASK = 0x000fffff; tlb_init(); #if defined(USE_FPU) fpu_init(); #endif }
void ia32_initreg(void) { int i; CPU_STATSAVE.cpu_inst_default.seg_base = (UINT32)-1; CPU_EDX = (CPU_FAMILY << 8) | (CPU_MODEL << 4) | CPU_STEPPING; CPU_EFLAG = 2; CPU_CR0 = CPU_CR0_CD | CPU_CR0_NW | CPU_CR0_ET; #if defined(USE_FPU) CPU_CR0 |= CPU_CR0_EM | CPU_CR0_NE; CPU_CR0 &= ~CPU_CR0_MP; #else CPU_CR0 |= CPU_CR0_ET; #endif CPU_MXCSR = 0x1f80; CPU_GDTR_LIMIT = 0xffff; CPU_IDTR_LIMIT = 0xffff; #if CPU_FAMILY == 4 CPU_STATSAVE.cpu_regs.dr[6] = 0xffff1ff0; #elif CPU_FAMILY >= 5 CPU_STATSAVE.cpu_regs.dr[6] = 0xffff0ff0; CPU_STATSAVE.cpu_regs.dr[7] = 0x00000400; #endif for (i = 0; i < CPU_SEGREG_NUM; ++i) { CPU_STAT_SREG_INIT(i); } CPU_LDTR_LIMIT = 0xffff; CPU_TR_LIMIT = 0xffff; CPU_SET_SEGREG(CPU_CS_INDEX, 0xf000); CPU_EIP = 0xfff0; CPU_ADRSMASK = 0x000fffff; tlb_init(); #if defined(USE_FPU) fpu_init(); #endif }
int main(int (*openfirm)(void *)) { char bootpath[64]; struct devsw **dp; phandle_t chosenh; /* * Tell the OpenFirmware functions where they find the ofw gate. */ OF_init(openfirm); archsw.arch_getdev = ofw_getdev; archsw.arch_copyin = sparc64_copyin; archsw.arch_copyout = ofw_copyout; archsw.arch_readin = sparc64_readin; archsw.arch_autoload = sparc64_autoload; init_heap(); setheap((void *)heapva, (void *)(heapva + HEAPSZ)); /* * Probe for a console. */ cons_probe(); tlb_init(); bcache_init(32, 512); /* * Initialize devices. */ for (dp = devsw; *dp != 0; dp++) { if ((*dp)->dv_init != 0) (*dp)->dv_init(); } /* * Set up the current device. */ chosenh = OF_finddevice("/chosen"); OF_getprop(chosenh, "bootpath", bootpath, sizeof(bootpath)); /* * Sun compatible bootable CD-ROMs have a disk label placed * before the cd9660 data, with the actual filesystem being * in the first partition, while the other partitions contain * pseudo disk labels with embedded boot blocks for different * architectures, which may be followed by UFS filesystems. * The firmware will set the boot path to the partition it * boots from ('f' in the sun4u case), but we want the kernel * to be loaded from the cd9660 fs ('a'), so the boot path * needs to be altered. */ if (bootpath[strlen(bootpath) - 2] == ':' && bootpath[strlen(bootpath) - 1] == 'f') { bootpath[strlen(bootpath) - 1] = 'a'; printf("Boot path set to %s\n", bootpath); } env_setenv("currdev", EV_VOLATILE, bootpath, ofw_setcurrdev, env_nounset); env_setenv("loaddev", EV_VOLATILE, bootpath, env_noset, env_nounset); printf("\n"); printf("%s, Revision %s\n", bootprog_name, bootprog_rev); printf("(%s, %s)\n", bootprog_maker, bootprog_date); printf("bootpath=\"%s\"\n", bootpath); /* Give control to the machine independent loader code. */ interact(); return 1; }
/* * The kernel is already mapped with linear mapping at kseg_c so there's no * need to map it with a page table. However, head.S also temporarily mapped it * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various * other paging stuff. */ void __init cris_mmu_init(void) { unsigned long mmu_config; unsigned long mmu_kbase_hi; unsigned long mmu_kbase_lo; unsigned short mmu_page_id; /* * Make sure the current pgd table points to something sane, even if it * is most probably not used until the next switch_mm. */ per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; #ifdef CONFIG_SMP { pgd_t **pgd; pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); SUPP_BANK_SEL(1); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); SUPP_BANK_SEL(2); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); } #endif /* Initialise the TLB. Function found in tlb.c. */ tlb_init(); /* Enable exceptions and initialize the kernel segments. */ mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) | REG_STATE(mmu, rw_mm_cfg, acc, on) | REG_STATE(mmu, rw_mm_cfg, ex, on) | REG_STATE(mmu, rw_mm_cfg, inv, on) | REG_STATE(mmu, rw_mm_cfg, seg_f, linear) | REG_STATE(mmu, rw_mm_cfg, seg_e, linear) | REG_STATE(mmu, rw_mm_cfg, seg_d, page) | REG_STATE(mmu, rw_mm_cfg, seg_c, linear) | REG_STATE(mmu, rw_mm_cfg, seg_b, linear) | #ifndef CONFIG_ETRAX_VCS_SIM REG_STATE(mmu, rw_mm_cfg, seg_a, page) | #else REG_STATE(mmu, rw_mm_cfg, seg_a, linear) | #endif REG_STATE(mmu, rw_mm_cfg, seg_9, page) | REG_STATE(mmu, rw_mm_cfg, seg_8, page) | REG_STATE(mmu, rw_mm_cfg, seg_7, page) | REG_STATE(mmu, rw_mm_cfg, seg_6, page) | REG_STATE(mmu, rw_mm_cfg, seg_5, page) | REG_STATE(mmu, rw_mm_cfg, seg_4, page) | REG_STATE(mmu, rw_mm_cfg, seg_3, page) | REG_STATE(mmu, rw_mm_cfg, seg_2, page) | REG_STATE(mmu, rw_mm_cfg, seg_1, page) | REG_STATE(mmu, rw_mm_cfg, seg_0, page)); mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) | REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) | #ifndef CONFIG_ETRAX_VCS_SIM REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) | #else REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) | #endif REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0)); mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0)); mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0); /* Update the instruction MMU. */ SUPP_BANK_SEL(BANK_IM); SUPP_REG_WR(RW_MM_CFG, mmu_config); SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi); SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo); SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id); /* Update the data MMU. */ SUPP_BANK_SEL(BANK_DM); SUPP_REG_WR(RW_MM_CFG, mmu_config); SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi); SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo); SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id); SPEC_REG_WR(SPEC_REG_PID, 0); /* * The MMU has been enabled ever since head.S but just to make it * totally obvious enable it here as well. */ SUPP_BANK_SEL(BANK_GC); SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */ }
/** * Initializes the simulator by invoking the CPU and OS module initialization * routines. */ void sim_init(void) { mem_init(); proc_init(); tlb_init(); }
void __init paging_init(void) { int i; unsigned long zones_size[MAX_NR_ZONES]; printk("Setting up paging and the MMU.\n"); for(i = 0; i < PTRS_PER_PGD; i++) swapper_pg_dir[i] = __pgd(0); per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; tlb_init(); #ifdef CONFIG_CRIS_LOW_MAP #define CACHED_BOOTROM (KSEG_F | 0x08000000UL) *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | IO_STATE(R_MMU_KSEG, seg_e, page ) | IO_STATE(R_MMU_KSEG, seg_d, page ) | IO_STATE(R_MMU_KSEG, seg_c, page ) | IO_STATE(R_MMU_KSEG, seg_b, seg ) | #ifdef CONFIG_JULIETTE IO_STATE(R_MMU_KSEG, seg_a, seg ) | #else IO_STATE(R_MMU_KSEG, seg_a, page ) | #endif IO_STATE(R_MMU_KSEG, seg_9, seg ) | IO_STATE(R_MMU_KSEG, seg_8, seg ) | IO_STATE(R_MMU_KSEG, seg_7, page ) | IO_STATE(R_MMU_KSEG, seg_6, seg ) | IO_STATE(R_MMU_KSEG, seg_5, seg ) | IO_STATE(R_MMU_KSEG, seg_4, page ) | IO_STATE(R_MMU_KSEG, seg_3, page ) | IO_STATE(R_MMU_KSEG, seg_2, page ) | IO_STATE(R_MMU_KSEG, seg_1, page ) | IO_STATE(R_MMU_KSEG, seg_0, page ) ); *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x3 ) | IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | #ifdef CONFIG_JULIETTE IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) | #else IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) | #endif IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) | IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) ); *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) | IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); #else #define CACHED_BOOTROM (KSEG_A | 0x08000000UL) *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | IO_STATE(R_MMU_KSEG, seg_e, seg ) | IO_STATE(R_MMU_KSEG, seg_d, page ) | IO_STATE(R_MMU_KSEG, seg_c, seg ) | IO_STATE(R_MMU_KSEG, seg_b, seg ) | IO_STATE(R_MMU_KSEG, seg_a, seg ) | IO_STATE(R_MMU_KSEG, seg_9, page ) | IO_STATE(R_MMU_KSEG, seg_8, page ) | IO_STATE(R_MMU_KSEG, seg_7, page ) | IO_STATE(R_MMU_KSEG, seg_6, page ) | IO_STATE(R_MMU_KSEG, seg_5, page ) | IO_STATE(R_MMU_KSEG, seg_4, page ) | IO_STATE(R_MMU_KSEG, seg_3, page ) | IO_STATE(R_MMU_KSEG, seg_2, page ) | IO_STATE(R_MMU_KSEG, seg_1, page ) | IO_STATE(R_MMU_KSEG, seg_0, page ) ); *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) | IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) | IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | IO_FIELD(R_MMU_KBASE_HI, base_a, 0x3 ) | IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) ); *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); #endif *R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) ); *R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) | IO_STATE(R_MMU_CTRL, acc_excp, enable ) | IO_STATE(R_MMU_CTRL, we_excp, enable ) ); *R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable); empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE); zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; for (i = 1; i < MAX_NR_ZONES; i++) zones_size[i] = 0; free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); }
void __init paging_init(void) { int i; unsigned long zones_size[MAX_NR_ZONES]; printk("Setting up paging and the MMU.\n"); /* clear out the init_mm.pgd that will contain the kernel's mappings */ for(i = 0; i < PTRS_PER_PGD; i++) swapper_pg_dir[i] = __pgd(0); /* make sure the current pgd table points to something sane * (even if it is most probably not used until the next * switch_mm) */ current_pgd = init_mm.pgd; /* initialise the TLB (tlb.c) */ tlb_init(); /* see README.mm for details on the KSEG setup */ #ifdef CONFIG_CRIS_LOW_MAP /* Etrax-100 LX version 1 has a bug so that we cannot map anything * across the 0x80000000 boundary, so we need to shrink the user-virtual * area to 0x50000000 instead of 0xb0000000 and map things slightly * different. The unused areas are marked as paged so that we can catch * freak kernel accesses there. * * The ARTPEC chip is mapped at 0xa so we pass that segment straight * through. We cannot vremap it because the vmalloc area is below 0x8 * and Juliette needs an uncached area above 0x8. * * Same thing with 0xc and 0x9, which is memory-mapped I/O on some boards. * We map them straight over in LOW_MAP, but use vremap in LX version 2. */ #define CACHED_BOOTROM (KSEG_F | 0x08000000UL) *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* bootrom */ IO_STATE(R_MMU_KSEG, seg_e, page ) | IO_STATE(R_MMU_KSEG, seg_d, page ) | IO_STATE(R_MMU_KSEG, seg_c, page ) | IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */ #ifdef CONFIG_JULIETTE IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* ARTPEC etc. */ #else IO_STATE(R_MMU_KSEG, seg_a, page ) | #endif IO_STATE(R_MMU_KSEG, seg_9, seg ) | /* LED's on some boards */ IO_STATE(R_MMU_KSEG, seg_8, seg ) | /* CSE0/1, flash and I/O */ IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */ IO_STATE(R_MMU_KSEG, seg_6, seg ) | /* kernel DRAM area */ IO_STATE(R_MMU_KSEG, seg_5, seg ) | /* cached flash */ IO_STATE(R_MMU_KSEG, seg_4, page ) | /* user area */ IO_STATE(R_MMU_KSEG, seg_3, page ) | /* user area */ IO_STATE(R_MMU_KSEG, seg_2, page ) | /* user area */ IO_STATE(R_MMU_KSEG, seg_1, page ) | /* user area */ IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */ *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x3 ) | IO_FIELD(R_MMU_KBASE_HI, base_e, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | #ifdef CONFIG_JULIETTE IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) | #else IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) | #endif IO_FIELD(R_MMU_KBASE_HI, base_9, 0x9 ) | IO_FIELD(R_MMU_KBASE_HI, base_8, 0x8 ) ); *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) | IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); #else /* This code is for the corrected Etrax-100 LX version 2... */ #define CACHED_BOOTROM (KSEG_A | 0x08000000UL) *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* cached flash */ IO_STATE(R_MMU_KSEG, seg_e, seg ) | /* uncached flash */ IO_STATE(R_MMU_KSEG, seg_d, page ) | /* vmalloc area */ IO_STATE(R_MMU_KSEG, seg_c, seg ) | /* kernel area */ IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */ IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* bootrom */ IO_STATE(R_MMU_KSEG, seg_9, page ) | /* user area */ IO_STATE(R_MMU_KSEG, seg_8, page ) | IO_STATE(R_MMU_KSEG, seg_7, page ) | IO_STATE(R_MMU_KSEG, seg_6, page ) | IO_STATE(R_MMU_KSEG, seg_5, page ) | IO_STATE(R_MMU_KSEG, seg_4, page ) | IO_STATE(R_MMU_KSEG, seg_3, page ) | IO_STATE(R_MMU_KSEG, seg_2, page ) | IO_STATE(R_MMU_KSEG, seg_1, page ) | IO_STATE(R_MMU_KSEG, seg_0, page ) ); *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) | IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) | IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | IO_FIELD(R_MMU_KBASE_HI, base_a, 0x3 ) | IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) | IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) ); *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); #endif *R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) ); /* The MMU has been enabled ever since head.S but just to make * it totally obvious we do it here as well. */ *R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) | IO_STATE(R_MMU_CTRL, acc_excp, enable ) | IO_STATE(R_MMU_CTRL, we_excp, enable ) ); *R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable); /* * initialize the bad page table and bad page to point * to a couple of allocated pages */ empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE); /* All pages are DMA'able in Etrax, so put all in the DMA'able zone */ zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; for (i = 1; i < MAX_NR_ZONES; i++) zones_size[i] = 0; /* Use free_area_init_node instead of free_area_init, because the former * is designed for systems where the DRAM starts at an address substantially * higher than 0, like us (we start at PAGE_OFFSET). This saves space in the * mem_map page array. */ free_area_init_node(0, &contig_page_data, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0); mem_map = contig_page_data.node_mem_map; }