/* Flush all TLB entries. */ void __flush_tlb_all(void) { int i; int mmu; unsigned long flags; unsigned long mmu_tlb_hi; unsigned long mmu_tlb_sel; /* * Mask with 0xf so similar TLB entries aren't written in the same 4-way * entry group. */ local_irq_save(flags); for (mmu = 1; mmu <= 2; mmu++) { SUPP_BANK_SEL(mmu); /* Select the MMU */ for (i = 0; i < NUM_TLB_ENTRIES; i++) { /* Store invalid entry */ mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i); mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID) | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf)); SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel); SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi); SUPP_REG_WR(RW_MM_TLB_LO, 0); } } local_irq_restore(flags); }
void __devinit smp_prepare_boot_cpu(void) { /* PGD pointer has moved after per_cpu initialization so * update the MMU. */ pgd_t **pgd; pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); SUPP_BANK_SEL(1); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); SUPP_BANK_SEL(2); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); cpu_set(0, cpu_online_map); cpu_set(0, phys_cpu_present_map); }
/* * The kernel is already mapped with linear mapping at kseg_c so there's no * need to map it with a page table. However, head.S also temporarily mapped it * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various * other paging stuff. */ void __init cris_mmu_init(void) { unsigned long mmu_config; unsigned long mmu_kbase_hi; unsigned long mmu_kbase_lo; unsigned short mmu_page_id; /* * Make sure the current pgd table points to something sane, even if it * is most probably not used until the next switch_mm. */ per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; #ifdef CONFIG_SMP { pgd_t **pgd; pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); SUPP_BANK_SEL(1); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); SUPP_BANK_SEL(2); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); } #endif /* Initialise the TLB. Function found in tlb.c. */ tlb_init(); /* Enable exceptions and initialize the kernel segments. */ mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) | REG_STATE(mmu, rw_mm_cfg, acc, on) | REG_STATE(mmu, rw_mm_cfg, ex, on) | REG_STATE(mmu, rw_mm_cfg, inv, on) | REG_STATE(mmu, rw_mm_cfg, seg_f, linear) | REG_STATE(mmu, rw_mm_cfg, seg_e, linear) | REG_STATE(mmu, rw_mm_cfg, seg_d, page) | REG_STATE(mmu, rw_mm_cfg, seg_c, linear) | REG_STATE(mmu, rw_mm_cfg, seg_b, linear) | #ifndef CONFIG_ETRAX_VCS_SIM REG_STATE(mmu, rw_mm_cfg, seg_a, page) | #else REG_STATE(mmu, rw_mm_cfg, seg_a, linear) | #endif REG_STATE(mmu, rw_mm_cfg, seg_9, page) | REG_STATE(mmu, rw_mm_cfg, seg_8, page) | REG_STATE(mmu, rw_mm_cfg, seg_7, page) | REG_STATE(mmu, rw_mm_cfg, seg_6, page) | REG_STATE(mmu, rw_mm_cfg, seg_5, page) | REG_STATE(mmu, rw_mm_cfg, seg_4, page) | REG_STATE(mmu, rw_mm_cfg, seg_3, page) | REG_STATE(mmu, rw_mm_cfg, seg_2, page) | REG_STATE(mmu, rw_mm_cfg, seg_1, page) | REG_STATE(mmu, rw_mm_cfg, seg_0, page)); mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) | REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) | #ifndef CONFIG_ETRAX_VCS_SIM REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) | #else REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) | #endif REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0)); mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0)); mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0); /* Update the instruction MMU. */ SUPP_BANK_SEL(BANK_IM); SUPP_REG_WR(RW_MM_CFG, mmu_config); SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi); SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo); SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id); /* Update the data MMU. */ SUPP_BANK_SEL(BANK_DM); SUPP_REG_WR(RW_MM_CFG, mmu_config); SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi); SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo); SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id); SPEC_REG_WR(SPEC_REG_PID, 0); /* * The MMU has been enabled ever since head.S but just to make it * totally obvious enable it here as well. */ SUPP_BANK_SEL(BANK_GC); SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */ }
static int put_debugreg(long pid, unsigned int regno, long data) { int ret = 0; register int old_srs; #ifdef CONFIG_ETRAX_KGDB /* Ignore write, but pretend it was ok if value is 0 (we don't want POKEUSR/SETREGS failing unnessecarily). */ return (data == 0) ? ret : -1; #endif /* Simple owner management. */ if (!bp_owner) bp_owner = pid; else if (bp_owner != pid) { /* Ignore write, but pretend it was ok if value is 0 (we don't want POKEUSR/SETREGS failing unnessecarily). */ return (data == 0) ? ret : -1; } /* Remember old SRS. */ SPEC_REG_RD(SPEC_REG_SRS, old_srs); /* Switch to BP bank. */ SUPP_BANK_SEL(BANK_BP); switch (regno - PT_BP) { case 0: SUPP_REG_WR(0, data); break; case 1: case 2: if (data) ret = -1; break; case 3: SUPP_REG_WR(3, data); break; case 4: SUPP_REG_WR(4, data); break; case 5: SUPP_REG_WR(5, data); break; case 6: SUPP_REG_WR(6, data); break; case 7: SUPP_REG_WR(7, data); break; case 8: SUPP_REG_WR(8, data); break; case 9: SUPP_REG_WR(9, data); break; case 10: SUPP_REG_WR(10, data); break; case 11: SUPP_REG_WR(11, data); break; case 12: SUPP_REG_WR(12, data); break; case 13: SUPP_REG_WR(13, data); break; case 14: SUPP_REG_WR(14, data); break; default: ret = -1; break; } /* Restore SRS. */ SPEC_REG_WR(SPEC_REG_SRS, old_srs); /* Just for show. */ NOP(); NOP(); NOP(); return ret; }