/* Flush all TLB entries. */ void __flush_tlb_all(void) { int i; int mmu; unsigned long flags; unsigned long mmu_tlb_hi; unsigned long mmu_tlb_sel; /* * Mask with 0xf so similar TLB entries aren't written in the same 4-way * entry group. */ local_irq_save(flags); for (mmu = 1; mmu <= 2; mmu++) { SUPP_BANK_SEL(mmu); /* Select the MMU */ for (i = 0; i < NUM_TLB_ENTRIES; i++) { /* Store invalid entry */ mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i); mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID) | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf)); SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel); SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi); SUPP_REG_WR(RW_MM_TLB_LO, 0); } } local_irq_restore(flags); }
static long get_debugreg(long pid, unsigned int regno) { register int old_srs; register long data; if (pid != bp_owner) { return 0; } /* Remember old SRS. */ SPEC_REG_RD(SPEC_REG_SRS, old_srs); /* Switch to BP bank. */ SUPP_BANK_SEL(BANK_BP); switch (regno - PT_BP) { case 0: SUPP_REG_RD(0, data); break; case 1: case 2: /* error return value? */ data = 0; break; case 3: SUPP_REG_RD(3, data); break; case 4: SUPP_REG_RD(4, data); break; case 5: SUPP_REG_RD(5, data); break; case 6: SUPP_REG_RD(6, data); break; case 7: SUPP_REG_RD(7, data); break; case 8: SUPP_REG_RD(8, data); break; case 9: SUPP_REG_RD(9, data); break; case 10: SUPP_REG_RD(10, data); break; case 11: SUPP_REG_RD(11, data); break; case 12: SUPP_REG_RD(12, data); break; case 13: SUPP_REG_RD(13, data); break; case 14: SUPP_REG_RD(14, data); break; default: /* error return value? */ data = 0; } /* Restore SRS. */ SPEC_REG_WR(SPEC_REG_SRS, old_srs); /* Just for show. */ NOP(); NOP(); NOP(); return data; }
void __devinit smp_prepare_boot_cpu(void) { /* PGD pointer has moved after per_cpu initialization so * update the MMU. */ pgd_t **pgd; pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); SUPP_BANK_SEL(1); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); SUPP_BANK_SEL(2); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); cpu_set(0, cpu_online_map); cpu_set(0, phys_cpu_present_map); }
/* Invalidate a single page. */ void __flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { int i; int mmu; unsigned long page_id; unsigned long flags; unsigned long tlb_hi; unsigned long mmu_tlb_hi; page_id = vma->vm_mm->context.page_id; if (page_id == NO_CONTEXT) return; addr &= PAGE_MASK; /* * Invalidate those TLB entries that match both the mm context and the * requested virtual address. */ local_irq_save(flags); for (mmu = 1; mmu <= 2; mmu++) { SUPP_BANK_SEL(mmu); for (i = 0; i < NUM_TLB_ENTRIES; i++) { UPDATE_TLB_SEL_IDX(i); SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi); /* Check if page_id and address matches */ if (((tlb_hi & 0xff) == page_id) && ((tlb_hi & PAGE_MASK) == addr)) { mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID) | addr; UPDATE_TLB_HILO(mmu_tlb_hi, 0); } } } local_irq_restore(flags); }
/* Flush an entire user address space. */ void __flush_tlb_mm(struct mm_struct *mm) { int i; int mmu; unsigned long flags; unsigned long page_id; unsigned long tlb_hi; unsigned long mmu_tlb_hi; page_id = mm->context.page_id; if (page_id == NO_CONTEXT) return; /* Mark the TLB entries that match the page_id as invalid. */ local_irq_save(flags); for (mmu = 1; mmu <= 2; mmu++) { SUPP_BANK_SEL(mmu); for (i = 0; i < NUM_TLB_ENTRIES; i++) { UPDATE_TLB_SEL_IDX(i); /* Get the page_id */ SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi); /* Check if the page_id match. */ if ((tlb_hi & 0xff) == page_id) { mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID) | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf)); UPDATE_TLB_HILO(mmu_tlb_hi, 0); } } } local_irq_restore(flags); }
/* * The kernel is already mapped with linear mapping at kseg_c so there's no * need to map it with a page table. However, head.S also temporarily mapped it * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various * other paging stuff. */ void __init cris_mmu_init(void) { unsigned long mmu_config; unsigned long mmu_kbase_hi; unsigned long mmu_kbase_lo; unsigned short mmu_page_id; /* * Make sure the current pgd table points to something sane, even if it * is most probably not used until the next switch_mm. */ per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; #ifdef CONFIG_SMP { pgd_t **pgd; pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); SUPP_BANK_SEL(1); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); SUPP_BANK_SEL(2); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); } #endif /* Initialise the TLB. Function found in tlb.c. */ tlb_init(); /* Enable exceptions and initialize the kernel segments. */ mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) | REG_STATE(mmu, rw_mm_cfg, acc, on) | REG_STATE(mmu, rw_mm_cfg, ex, on) | REG_STATE(mmu, rw_mm_cfg, inv, on) | REG_STATE(mmu, rw_mm_cfg, seg_f, linear) | REG_STATE(mmu, rw_mm_cfg, seg_e, linear) | REG_STATE(mmu, rw_mm_cfg, seg_d, page) | REG_STATE(mmu, rw_mm_cfg, seg_c, linear) | REG_STATE(mmu, rw_mm_cfg, seg_b, linear) | #ifndef CONFIG_ETRAX_VCS_SIM REG_STATE(mmu, rw_mm_cfg, seg_a, page) | #else REG_STATE(mmu, rw_mm_cfg, seg_a, linear) | #endif REG_STATE(mmu, rw_mm_cfg, seg_9, page) | REG_STATE(mmu, rw_mm_cfg, seg_8, page) | REG_STATE(mmu, rw_mm_cfg, seg_7, page) | REG_STATE(mmu, rw_mm_cfg, seg_6, page) | REG_STATE(mmu, rw_mm_cfg, seg_5, page) | REG_STATE(mmu, rw_mm_cfg, seg_4, page) | REG_STATE(mmu, rw_mm_cfg, seg_3, page) | REG_STATE(mmu, rw_mm_cfg, seg_2, page) | REG_STATE(mmu, rw_mm_cfg, seg_1, page) | REG_STATE(mmu, rw_mm_cfg, seg_0, page)); mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) | REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) | #ifndef CONFIG_ETRAX_VCS_SIM REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) | #else REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) | #endif REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0)); mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0)); mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0); /* Update the instruction MMU. */ SUPP_BANK_SEL(BANK_IM); SUPP_REG_WR(RW_MM_CFG, mmu_config); SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi); SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo); SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id); /* Update the data MMU. */ SUPP_BANK_SEL(BANK_DM); SUPP_REG_WR(RW_MM_CFG, mmu_config); SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi); SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo); SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id); SPEC_REG_WR(SPEC_REG_PID, 0); /* * The MMU has been enabled ever since head.S but just to make it * totally obvious enable it here as well. */ SUPP_BANK_SEL(BANK_GC); SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */ }
static int put_debugreg(long pid, unsigned int regno, long data) { int ret = 0; register int old_srs; #ifdef CONFIG_ETRAX_KGDB /* Ignore write, but pretend it was ok if value is 0 (we don't want POKEUSR/SETREGS failing unnessecarily). */ return (data == 0) ? ret : -1; #endif /* Simple owner management. */ if (!bp_owner) bp_owner = pid; else if (bp_owner != pid) { /* Ignore write, but pretend it was ok if value is 0 (we don't want POKEUSR/SETREGS failing unnessecarily). */ return (data == 0) ? ret : -1; } /* Remember old SRS. */ SPEC_REG_RD(SPEC_REG_SRS, old_srs); /* Switch to BP bank. */ SUPP_BANK_SEL(BANK_BP); switch (regno - PT_BP) { case 0: SUPP_REG_WR(0, data); break; case 1: case 2: if (data) ret = -1; break; case 3: SUPP_REG_WR(3, data); break; case 4: SUPP_REG_WR(4, data); break; case 5: SUPP_REG_WR(5, data); break; case 6: SUPP_REG_WR(6, data); break; case 7: SUPP_REG_WR(7, data); break; case 8: SUPP_REG_WR(8, data); break; case 9: SUPP_REG_WR(9, data); break; case 10: SUPP_REG_WR(10, data); break; case 11: SUPP_REG_WR(11, data); break; case 12: SUPP_REG_WR(12, data); break; case 13: SUPP_REG_WR(13, data); break; case 14: SUPP_REG_WR(14, data); break; default: ret = -1; break; } /* Restore SRS. */ SPEC_REG_WR(SPEC_REG_SRS, old_srs); /* Just for show. */ NOP(); NOP(); NOP(); return ret; }
void show_registers(struct pt_regs *regs) { /* * It's possible to use either the USP register or current->thread.usp. * USP might not correspond to the current process for all cases this * function is called, and current->thread.usp isn't up to date for the * current process. Experience shows that using USP is the way to go. */ unsigned long usp = rdusp(); unsigned long d_mmu_cause; unsigned long i_mmu_cause; printk("CPU: %d\n", smp_processor_id()); printk("ERP: %08lx SRP: %08lx CCS: %08lx USP: %08lx MOF: %08lx\n", regs->erp, regs->srp, regs->ccs, usp, regs->mof); printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n", regs->r0, regs->r1, regs->r2, regs->r3); printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n", regs->r4, regs->r5, regs->r6, regs->r7); printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n", regs->r8, regs->r9, regs->r10, regs->r11); printk("r12: %08lx r13: %08lx oR10: %08lx acr: %08lx\n", regs->r12, regs->r13, regs->orig_r10, regs->acr); printk(" sp: %08lx\n", (unsigned long)regs); SUPP_BANK_SEL(BANK_IM); SUPP_REG_RD(RW_MM_CAUSE, i_mmu_cause); SUPP_BANK_SEL(BANK_DM); SUPP_REG_RD(RW_MM_CAUSE, d_mmu_cause); printk(" Data MMU Cause: %08lx\n", d_mmu_cause); printk("Instruction MMU Cause: %08lx\n", i_mmu_cause); printk("Process %s (pid: %d, stackpage=%08lx)\n", current->comm, current->pid, (unsigned long)current); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (!user_mode(regs)) { int i; show_stack(NULL, (unsigned long *)usp); /* * If the previous stack-dump wasn't a kernel one, dump the * kernel stack now. */ if (usp != 0) show_stack(NULL, NULL); printk("\nCode: "); if (regs->erp < PAGE_OFFSET) goto bad_value; /* * Quite often the value at regs->erp doesn't point to the * interesting instruction, which often is the previous * instruction. So dump at an offset large enough that the * instruction decoding should be in sync at the interesting * point, but small enough to fit on a row. The regs->erp * location is pointed out in a ksymoops-friendly way by * wrapping the byte for that address in parenthesises. */ for (i = -12; i < 12; i++) { unsigned char c; if (__get_user(c, &((unsigned char *)regs->erp)[i])) { bad_value: printk(" Bad IP value."); break; } if (i == 0) printk("(%02x) ", c); else printk("%02x ", c); } printk("\n"); } }