static int tx4939_proc_show_cp0(char *sysbuf, char **start, off_t off, int count, int *eof, void *data) { int len = 0; len += sprintf(sysbuf + len, "INDEX :0x%08x\n", read_c0_index()); len += sprintf(sysbuf + len, "ENTRYLO0:0x%08lx\n", read_c0_entrylo0()); len += sprintf(sysbuf + len, "ENTRYLO1:0x%08lx\n", read_c0_entrylo1()); len += sprintf(sysbuf + len, "CONTEXT :0x%08lx\n", read_c0_context()); len += sprintf(sysbuf + len, "PAGEMASK:0x%08x\n", read_c0_pagemask()); len += sprintf(sysbuf + len, "WIRED :0x%08x\n", read_c0_wired()); len += sprintf(sysbuf + len, "COUNT :0x%08x\n", read_c0_count()); len += sprintf(sysbuf + len, "ENTRYHI :0x%08lx\n", read_c0_entryhi()); len += sprintf(sysbuf + len, "COMPARE :0x%08x\n", read_c0_compare()); len += sprintf(sysbuf + len, "STATUS :0x%08x\n", read_c0_status()); len += sprintf(sysbuf + len, "CAUSE :0x%08x\n", read_c0_cause()); len += sprintf(sysbuf + len, "PRId :0x%08x\n", read_c0_prid()); len += sprintf(sysbuf + len, "CONFIG :0x%08x\n", read_c0_config()); len += sprintf(sysbuf + len, "XCONTEXT:0x%08lx\n", read_c0_xcontext()); len += sprintf(sysbuf + len, "TagLo :0x%08x\n", read_c0_taglo()); len += sprintf(sysbuf + len, "TagHi :0x%08x\n", read_c0_taghi()); len += sprintf(sysbuf + len, "ErrorEPC:0x%08lx\n", read_c0_errorepc()); *eof = 1; return len; }
static void refill_tbl_to(struct km_walk_ctx * ctx, unsigned int asid, int write, int pos) { unsigned long entry, oldl1, oldl2; unsigned long G_FLAG; int idx; int oldpid; /* Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obay this rule. */ oldpid = read_c0_entryhi(); /* Entry HI */ asid = asid & CPU_PAGE_FALG_ASID_MASK; entry = get_vpn2(ctx->current_virtual_address); entry |= asid; write_c0_entryhi(entry); mtc0_tlbw_hazard(); tlb_probe(); tlb_probe_hazard(); idx = read_c0_index(); oldl1 = read_c0_entrylo0(); oldl2 = read_c0_entrylo1(); /* Add the G_FLAG if ASID == 0, because the entry is from kernel and shared by all process */ G_FLAG = (ctx->mem == &kp_get_system()->mem_ctx)? 1 : 0; /* Entry Low0 and Low1 */ WRITE_LO; /* Write by type, the write is random if the TLB entry is flushed for R/W flags changing */ mtc0_tlbw_hazard(); if (unlikely(idx < 0)) tlb_write_random(); else { if (write == 2) { printk("Write is forced index for %x, pos %d, idx %d,asid %d, %x %x.\n", ctx->current_virtual_address, pos, idx, asid, oldl1, oldl2); } tlb_write_indexed(); } tlbw_use_hazard(); /* Sanity: Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obey this rule. */ if ((oldpid & 0xff) != (asid & 0xff) && asid != 0/*kernel asid*/) printk("Why old = %x, asid = %x. ", oldpid, asid); }
void show_tlb(void) { #define ASID_MASK 0xFF unsigned long flags; unsigned int old_ctx; unsigned int entry; unsigned int entrylo0, entrylo1, entryhi; unsigned int pagemask; local_irq_save(flags); /* Save old context */ old_ctx = (read_c0_entryhi() & 0xff); printk("TLB content:\n"); entry = 0; while(entry < 32) { write_c0_index(entry); BARRIER; tlb_read(); BARRIER; entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); pagemask = read_c0_pagemask(); printk("%02d: ASID=%02d%s VA=0x%08x ", entry, entryhi & ASID_MASK, (entrylo0 & entrylo1 & 1) ? "(G)" : " ", entryhi & ~ASID_MASK); printk("PA0=0x%08x C0=%x %s%s%s\n", (entrylo0>>6)<<12, (entrylo0>>3) & 7, (entrylo0 & 4) ? "Dirty " : "", (entrylo0 & 2) ? "Valid " : "Invalid ", (entrylo0 & 1) ? "Global" : ""); printk("\t\t\t PA1=0x%08x C1=%x %s%s%s\n", (entrylo1>>6)<<12, (entrylo1>>3) & 7, (entrylo1 & 4) ? "Dirty " : "", (entrylo1 & 2) ? "Valid " : "Invalid ", (entrylo1 & 1) ? "Global" : ""); printk("\t\tpagemask=0x%08x", pagemask); printk("\tentryhi=0x%08x\n", entryhi); printk("\t\tentrylo0=0x%08x", entrylo0); printk("\tentrylo1=0x%08x\n", entrylo1); entry++; } BARRIER; write_c0_entryhi(old_ctx); local_irq_restore(flags); }
void dump_cp0(char *key) { if (key == NULL) key = ""; print_cp0(key, 0, "INDEX ", read_c0_index()); print_cp0(key, 2, "ENTRYLO1", read_c0_entrylo0()); print_cp0(key, 3, "ENTRYLO2", read_c0_entrylo1()); print_cp0(key, 4, "CONTEXT ", read_c0_context()); print_cp0(key, 5, "PAGEMASK", read_c0_pagemask()); print_cp0(key, 6, "WIRED ", read_c0_wired()); //print_cp0(key, 8, "BADVADDR", read_c0_badvaddr()); print_cp0(key, 9, "COUNT ", read_c0_count()); print_cp0(key, 10, "ENTRYHI ", read_c0_entryhi()); print_cp0(key, 11, "COMPARE ", read_c0_compare()); print_cp0(key, 12, "STATUS ", read_c0_status()); print_cp0(key, 13, "CAUSE ", read_c0_cause() & 0xffff87ff); print_cp0(key, 16, "CONFIG ", read_c0_config()); return; }
static void dump_tlb(int first, int last) { unsigned long s_entryhi, entryhi, asid; unsigned long long entrylo0, entrylo1, pa; unsigned int s_index, s_pagemask, pagemask, c0, c1, i; #ifdef CONFIG_32BIT bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); int pwidth = xpa ? 11 : 8; int vwidth = 8; #else bool xpa = false; int pwidth = 11; int vwidth = 11; #endif s_pagemask = read_c0_pagemask(); s_entryhi = read_c0_entryhi(); s_index = read_c0_index(); asid = s_entryhi & 0xff; for (i = first; i <= last; i++) { write_c0_index(i); mtc0_tlbr_hazard(); tlb_read(); tlb_read_hazard(); pagemask = read_c0_pagemask(); entryhi = read_c0_entryhi(); entrylo0 = read_c0_entrylo0(); entrylo1 = read_c0_entrylo1(); /* EHINV bit marks entire entry as invalid */ if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV) continue; /* * Prior to tlbinv, unused entries have a virtual address of * CKSEG0. */ if ((entryhi & ~0x1ffffUL) == CKSEG0) continue; /* * ASID takes effect in absence of G (global) bit. * We check both G bits, even though architecturally they should * match one another, because some revisions of the SB1 core may * leave only a single G bit set after a machine check exception * due to duplicate TLB entry. */ if (!((entrylo0 | entrylo1) & MIPS_ENTRYLO_G) && (entryhi & 0xff) != asid) continue; /* * Only print entries in use */ printk("Index: %2d pgmask=%s ", i, msk2str(pagemask)); c0 = (entrylo0 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT; c1 = (entrylo1 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT; printk("va=%0*lx asid=%02lx\n", vwidth, (entryhi & ~0x1fffUL), entryhi & 0xff); /* RI/XI are in awkward places, so mask them off separately */ pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); if (xpa) pa |= (unsigned long long)readx_c0_entrylo0() << 30; pa = (pa << 6) & PAGE_MASK; printk("\t["); if (cpu_has_rixi) printk("ri=%d xi=%d ", (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0, (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0); printk("pa=%0*llx c=%d d=%d v=%d g=%d] [", pwidth, pa, c0, (entrylo0 & MIPS_ENTRYLO_D) ? 1 : 0, (entrylo0 & MIPS_ENTRYLO_V) ? 1 : 0, (entrylo0 & MIPS_ENTRYLO_G) ? 1 : 0); /* RI/XI are in awkward places, so mask them off separately */ pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); if (xpa) pa |= (unsigned long long)readx_c0_entrylo1() << 30; pa = (pa << 6) & PAGE_MASK; if (cpu_has_rixi) printk("ri=%d xi=%d ", (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0, (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0); printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n", pwidth, pa, c1, (entrylo1 & MIPS_ENTRYLO_D) ? 1 : 0, (entrylo1 & MIPS_ENTRYLO_V) ? 1 : 0, (entrylo1 & MIPS_ENTRYLO_G) ? 1 : 0); } printk("\n"); write_c0_entryhi(s_entryhi); write_c0_index(s_index); write_c0_pagemask(s_pagemask); }