/* Called in schedule() just before actually doing the switch_to. */ void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) { int cpu = smp_processor_id(); /* Make sure there is a MMU context. */ spin_lock(&mmu_context_lock); get_mmu_context(next); cpu_set(cpu, next->cpu_vm_mask); spin_unlock(&mmu_context_lock); /* * Remember the pgd for the fault handlers. Keep a seperate * copy of it because current and active_mm might be invalid * at points where * there's still a need to derefer the pgd. */ per_cpu(current_pgd, cpu) = next->pgd; /* Switch context in the MMU. */ if (tsk && task_thread_info(tsk)) { SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | task_thread_info(tsk)->tls); } else { SPEC_REG_WR(SPEC_REG_PID, next->context.page_id); } } }
static long get_debugreg(long pid, unsigned int regno) { register int old_srs; register long data; if (pid != bp_owner) { return 0; } /* Remember old SRS. */ SPEC_REG_RD(SPEC_REG_SRS, old_srs); /* Switch to BP bank. */ SUPP_BANK_SEL(BANK_BP); switch (regno - PT_BP) { case 0: SUPP_REG_RD(0, data); break; case 1: case 2: /* error return value? */ data = 0; break; case 3: SUPP_REG_RD(3, data); break; case 4: SUPP_REG_RD(4, data); break; case 5: SUPP_REG_RD(5, data); break; case 6: SUPP_REG_RD(6, data); break; case 7: SUPP_REG_RD(7, data); break; case 8: SUPP_REG_RD(8, data); break; case 9: SUPP_REG_RD(9, data); break; case 10: SUPP_REG_RD(10, data); break; case 11: SUPP_REG_RD(11, data); break; case 12: SUPP_REG_RD(12, data); break; case 13: SUPP_REG_RD(13, data); break; case 14: SUPP_REG_RD(14, data); break; default: /* error return value? */ data = 0; } /* Restore SRS. */ SPEC_REG_WR(SPEC_REG_SRS, old_srs); /* Just for show. */ NOP(); NOP(); NOP(); return data; }
/* * The kernel is already mapped with linear mapping at kseg_c so there's no * need to map it with a page table. However, head.S also temporarily mapped it * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various * other paging stuff. */ void __init cris_mmu_init(void) { unsigned long mmu_config; unsigned long mmu_kbase_hi; unsigned long mmu_kbase_lo; unsigned short mmu_page_id; /* * Make sure the current pgd table points to something sane, even if it * is most probably not used until the next switch_mm. */ per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; #ifdef CONFIG_SMP { pgd_t **pgd; pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); SUPP_BANK_SEL(1); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); SUPP_BANK_SEL(2); SUPP_REG_WR(RW_MM_TLB_PGD, pgd); } #endif /* Initialise the TLB. Function found in tlb.c. */ tlb_init(); /* Enable exceptions and initialize the kernel segments. */ mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) | REG_STATE(mmu, rw_mm_cfg, acc, on) | REG_STATE(mmu, rw_mm_cfg, ex, on) | REG_STATE(mmu, rw_mm_cfg, inv, on) | REG_STATE(mmu, rw_mm_cfg, seg_f, linear) | REG_STATE(mmu, rw_mm_cfg, seg_e, linear) | REG_STATE(mmu, rw_mm_cfg, seg_d, page) | REG_STATE(mmu, rw_mm_cfg, seg_c, linear) | REG_STATE(mmu, rw_mm_cfg, seg_b, linear) | #ifndef CONFIG_ETRAX_VCS_SIM REG_STATE(mmu, rw_mm_cfg, seg_a, page) | #else REG_STATE(mmu, rw_mm_cfg, seg_a, linear) | #endif REG_STATE(mmu, rw_mm_cfg, seg_9, page) | REG_STATE(mmu, rw_mm_cfg, seg_8, page) | REG_STATE(mmu, rw_mm_cfg, seg_7, page) | REG_STATE(mmu, rw_mm_cfg, seg_6, page) | REG_STATE(mmu, rw_mm_cfg, seg_5, page) | REG_STATE(mmu, rw_mm_cfg, seg_4, page) | REG_STATE(mmu, rw_mm_cfg, seg_3, page) | REG_STATE(mmu, rw_mm_cfg, seg_2, page) | REG_STATE(mmu, rw_mm_cfg, seg_1, page) | REG_STATE(mmu, rw_mm_cfg, seg_0, page)); mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) | REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) | REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) | #ifndef CONFIG_ETRAX_VCS_SIM REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) | #else REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) | #endif REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) | REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0)); mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) | REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0)); mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0); /* Update the instruction MMU. */ SUPP_BANK_SEL(BANK_IM); SUPP_REG_WR(RW_MM_CFG, mmu_config); SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi); SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo); SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id); /* Update the data MMU. */ SUPP_BANK_SEL(BANK_DM); SUPP_REG_WR(RW_MM_CFG, mmu_config); SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi); SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo); SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id); SPEC_REG_WR(SPEC_REG_PID, 0); /* * The MMU has been enabled ever since head.S but just to make it * totally obvious enable it here as well. */ SUPP_BANK_SEL(BANK_GC); SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */ }
static int put_debugreg(long pid, unsigned int regno, long data) { int ret = 0; register int old_srs; #ifdef CONFIG_ETRAX_KGDB /* Ignore write, but pretend it was ok if value is 0 (we don't want POKEUSR/SETREGS failing unnessecarily). */ return (data == 0) ? ret : -1; #endif /* Simple owner management. */ if (!bp_owner) bp_owner = pid; else if (bp_owner != pid) { /* Ignore write, but pretend it was ok if value is 0 (we don't want POKEUSR/SETREGS failing unnessecarily). */ return (data == 0) ? ret : -1; } /* Remember old SRS. */ SPEC_REG_RD(SPEC_REG_SRS, old_srs); /* Switch to BP bank. */ SUPP_BANK_SEL(BANK_BP); switch (regno - PT_BP) { case 0: SUPP_REG_WR(0, data); break; case 1: case 2: if (data) ret = -1; break; case 3: SUPP_REG_WR(3, data); break; case 4: SUPP_REG_WR(4, data); break; case 5: SUPP_REG_WR(5, data); break; case 6: SUPP_REG_WR(6, data); break; case 7: SUPP_REG_WR(7, data); break; case 8: SUPP_REG_WR(8, data); break; case 9: SUPP_REG_WR(9, data); break; case 10: SUPP_REG_WR(10, data); break; case 11: SUPP_REG_WR(11, data); break; case 12: SUPP_REG_WR(12, data); break; case 13: SUPP_REG_WR(13, data); break; case 14: SUPP_REG_WR(14, data); break; default: ret = -1; break; } /* Restore SRS. */ SPEC_REG_WR(SPEC_REG_SRS, old_srs); /* Just for show. */ NOP(); NOP(); NOP(); return ret; }