Example #1
0
static int
tx4939_proc_show_cp0(char *sysbuf, char **start, off_t off,
		     int count, int *eof, void *data)
{
	int len = 0;
	len += sprintf(sysbuf + len, "INDEX   :0x%08x\n", read_c0_index());
	len += sprintf(sysbuf + len, "ENTRYLO0:0x%08lx\n", read_c0_entrylo0());
	len += sprintf(sysbuf + len, "ENTRYLO1:0x%08lx\n", read_c0_entrylo1());
	len += sprintf(sysbuf + len, "CONTEXT :0x%08lx\n", read_c0_context());
	len += sprintf(sysbuf + len, "PAGEMASK:0x%08x\n", read_c0_pagemask());
	len += sprintf(sysbuf + len, "WIRED   :0x%08x\n", read_c0_wired());
	len += sprintf(sysbuf + len, "COUNT   :0x%08x\n", read_c0_count());
	len += sprintf(sysbuf + len, "ENTRYHI :0x%08lx\n", read_c0_entryhi());
	len += sprintf(sysbuf + len, "COMPARE :0x%08x\n", read_c0_compare());
	len += sprintf(sysbuf + len, "STATUS  :0x%08x\n", read_c0_status());
	len += sprintf(sysbuf + len, "CAUSE   :0x%08x\n", read_c0_cause());
	len += sprintf(sysbuf + len, "PRId    :0x%08x\n", read_c0_prid());
	len += sprintf(sysbuf + len, "CONFIG  :0x%08x\n", read_c0_config());
	len += sprintf(sysbuf + len, "XCONTEXT:0x%08lx\n", read_c0_xcontext());
	len += sprintf(sysbuf + len, "TagLo   :0x%08x\n", read_c0_taglo());
	len += sprintf(sysbuf + len, "TagHi   :0x%08x\n", read_c0_taghi());
	len += sprintf(sysbuf + len, "ErrorEPC:0x%08lx\n", read_c0_errorepc());
	*eof = 1;
	return len;
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long size, flags;
		unsigned long config6_flags;

		ENTER_CRITICAL(flags);
		disable_pgwalker(config6_flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if (size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
#ifndef CONFIG_NLM_VMIPS
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
#else
				__write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(idx)));
#endif
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		FLUSH_ITLB;
		enable_pgwalker(config6_flags);
		EXIT_CRITICAL(flags);
	}
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
				unsigned long end)
{
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK),
		       start, end);
#endif
		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if(size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi() & ASID_MASK;
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while(start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				BARRIER;
				tlb_probe();
				BARRIER;
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if(idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(XKPHYS+idx*0x2000);
				BARRIER;
				tlb_write_indexed();
				BARRIER;
			}
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		local_irq_restore(flags);
	}
Example #4
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long size, flags;

		local_irq_save(flags);
		start = round_down(start, PAGE_SIZE << 1);
		end = round_up(end, PAGE_SIZE << 1);
		size = (end - start) >> (PAGE_SHIFT + 1);
		if (size <= (current_cpu_data.tlbsizeftlbsets ?
			     current_cpu_data.tlbsize / 8 :
			     current_cpu_data.tlbsize / 2)) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			htw_stop();
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
			htw_start();
		} else {
			drop_mmu_context(mm, cpu);
		}
		flush_micro_tlb();
		local_irq_restore(flags);
	}
Example #5
0
static void refill_tbl_to(struct km_walk_ctx * ctx, unsigned int asid, int write, int pos)
{	
	unsigned long entry, oldl1, oldl2;
	unsigned long G_FLAG;
	int idx;
	int oldpid;

	/* Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obay this rule. */
	oldpid = read_c0_entryhi();

	/* Entry HI */	
	asid = asid & CPU_PAGE_FALG_ASID_MASK;
	entry = get_vpn2(ctx->current_virtual_address);
	entry |= asid;
	write_c0_entryhi(entry);
	mtc0_tlbw_hazard();
	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	oldl1 = read_c0_entrylo0();
	oldl2 = read_c0_entrylo1();
	/* Add the G_FLAG if ASID == 0, because the entry is from kernel and shared by all process */
	G_FLAG = (ctx->mem == &kp_get_system()->mem_ctx)? 1 : 0;

	/* Entry Low0 and Low1 */
	WRITE_LO;

	/* Write by type, the write is random if the TLB entry is flushed for R/W flags changing */
	mtc0_tlbw_hazard();
	if (unlikely(idx < 0))
		tlb_write_random();
	else
	{
		if (write == 2)
		{
			printk("Write is forced index for %x, pos %d, idx %d,asid %d, %x %x.\n", ctx->current_virtual_address, pos, idx, asid, oldl1, oldl2);
		}
		
		tlb_write_indexed();
	}
	tlbw_use_hazard();

	/* Sanity: Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obey this rule. */
	if ((oldpid & 0xff) != (asid & 0xff) && asid != 0/*kernel asid*/)
 		printk("Why old = %x, asid = %x. ", oldpid, asid);
}
Example #6
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

		ENTER_CRITICAL(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		local_irq_save(flags);
		if (size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		EXIT_CRITICAL(flags);
	}
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
                           unsigned long end)
{
	int cpu = smp_processor_id();
	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%02x,%08lx,%08lx>]",
		       (mm->context & ASID_MASK), start, end);
#endif
		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if (size <= NTLB_ENTRIES_HALF) {
			int oldpid = (read_c0_entryhi() & ASID_MASK);
			int newpid = (cpu_context(smp_processor_id(), mm)
				      & ASID_MASK);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while(start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				tlb_probe();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				write_c0_entryhi(KSEG0);
				if(idx < 0)
					continue;
				tlb_write_indexed();
			}
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		local_irq_restore(flags);
	}
Example #8
0
/**
	@brief Flush memory range

	If the memory range is too big, we flush all entries with this ASID
*/
void local_flush_tlb_range(unsigned int asid, unsigned long start, unsigned long end)
{
	unsigned long size, flags;

	ENTER_CRITICAL(flags);
	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	if (size <= current_cpu_data.tlbsize / 2)
	{
		int oldpid = read_c0_entryhi();
		int newpid = asid;

		start &= (PAGE_MASK << 1);
		end += ((PAGE_SIZE << 1) - 1);
		end &= (PAGE_MASK << 1);
		while (start < end) {
			int idx;

			write_c0_entryhi(start | newpid);
			start += (PAGE_SIZE << 1);
			mtc0_tlbw_hazard();
			tlb_probe();
			tlb_probe_hazard();
			idx = read_c0_index();
			write_c0_entrylo0(0);
			write_c0_entrylo1(0);
			if (idx < 0)
				continue;
			/* Make sure all entries differ. */
			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
			mtc0_tlbw_hazard();
			tlb_write_indexed();
		}
		tlbw_use_hazard();
		write_c0_entryhi(oldpid);
	} 
	else
		local_flush_asid(asid);

	FLUSH_ITLB;
	EXIT_CRITICAL(flags);
}
Example #9
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
			   unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
			cpu_context(cpu, mm) & ASID_MASK, start, end);
#endif
		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		if (size <= current_cpu_data.tlbsize) {
			int oldpid = read_c0_entryhi() & ASID_MASK;
			int newpid = cpu_context(cpu, mm) & ASID_MASK;

			start &= PAGE_MASK;
			end += PAGE_SIZE - 1;
			end &= PAGE_MASK;
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += PAGE_SIZE;	/* BARRIER */
				tlb_probe();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entryhi(KSEG0);
				if (idx < 0)		/* BARRIER */
					continue;
				tlb_write_indexed();
			}
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		local_irq_restore(flags);
	}
Example #10
0
void
dump_cp0(char *key)
{
	if (key == NULL)
		key = "";

	print_cp0(key, 0, "INDEX   ", read_c0_index());
	print_cp0(key, 2, "ENTRYLO1", read_c0_entrylo0());
	print_cp0(key, 3, "ENTRYLO2", read_c0_entrylo1());
	print_cp0(key, 4, "CONTEXT ", read_c0_context());
	print_cp0(key, 5, "PAGEMASK", read_c0_pagemask());
	print_cp0(key, 6, "WIRED   ", read_c0_wired());
	//print_cp0(key, 8, "BADVADDR",  read_c0_badvaddr());
	print_cp0(key, 9, "COUNT   ", read_c0_count());
	print_cp0(key, 10, "ENTRYHI ", read_c0_entryhi());
	print_cp0(key, 11, "COMPARE ", read_c0_compare());
	print_cp0(key, 12, "STATUS  ", read_c0_status());
	print_cp0(key, 13, "CAUSE   ", read_c0_cause() & 0xffff87ff);
	print_cp0(key, 16, "CONFIG  ", read_c0_config());
	return;
}
void
dump_tlb_addr(unsigned long addr)
{
	unsigned long flags, oldpid;
	int index;

	local_irq_save(flags);
	oldpid = read_c0_entryhi() & 0xff;
	write_c0_entryhi((addr & PAGE_MASK) | oldpid);
	tlb_probe();
	index = read_c0_index();
	write_c0_entryhi(oldpid);
	local_irq_restore(flags);

	if (index < 0) {
		printk("No entry for address 0x%08lx in TLB\n", addr);
		return;
	}

	printk("Entry %d maps address 0x%08lx\n", index, addr);
	dump_tlb(index, index);
}
Example #12
0
static void dump_tlb(int first, int last)
{
	unsigned long s_entryhi, entryhi, asid;
	unsigned long long entrylo0, entrylo1, pa;
	unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
#ifdef CONFIG_32BIT
	bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
	int pwidth = xpa ? 11 : 8;
	int vwidth = 8;
#else
	bool xpa = false;
	int pwidth = 11;
	int vwidth = 11;
#endif

	s_pagemask = read_c0_pagemask();
	s_entryhi = read_c0_entryhi();
	s_index = read_c0_index();
	asid = s_entryhi & 0xff;

	for (i = first; i <= last; i++) {
		write_c0_index(i);
		mtc0_tlbr_hazard();
		tlb_read();
		tlb_read_hazard();
		pagemask = read_c0_pagemask();
		entryhi	 = read_c0_entryhi();
		entrylo0 = read_c0_entrylo0();
		entrylo1 = read_c0_entrylo1();

		/* EHINV bit marks entire entry as invalid */
		if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
			continue;
		/*
		 * Prior to tlbinv, unused entries have a virtual address of
		 * CKSEG0.
		 */
		if ((entryhi & ~0x1ffffUL) == CKSEG0)
			continue;
		/*
		 * ASID takes effect in absence of G (global) bit.
		 * We check both G bits, even though architecturally they should
		 * match one another, because some revisions of the SB1 core may
		 * leave only a single G bit set after a machine check exception
		 * due to duplicate TLB entry.
		 */
		if (!((entrylo0 | entrylo1) & MIPS_ENTRYLO_G) &&
		    (entryhi & 0xff) != asid)
			continue;

		/*
		 * Only print entries in use
		 */
		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));

		c0 = (entrylo0 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
		c1 = (entrylo1 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;

		printk("va=%0*lx asid=%02lx\n",
		       vwidth, (entryhi & ~0x1fffUL),
		       entryhi & 0xff);
		/* RI/XI are in awkward places, so mask them off separately */
		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
		if (xpa)
			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
		pa = (pa << 6) & PAGE_MASK;
		printk("\t[");
		if (cpu_has_rixi)
			printk("ri=%d xi=%d ",
			       (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
			       (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
		printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
		       pwidth, pa, c0,
		       (entrylo0 & MIPS_ENTRYLO_D) ? 1 : 0,
		       (entrylo0 & MIPS_ENTRYLO_V) ? 1 : 0,
		       (entrylo0 & MIPS_ENTRYLO_G) ? 1 : 0);
		/* RI/XI are in awkward places, so mask them off separately */
		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
		if (xpa)
			pa |= (unsigned long long)readx_c0_entrylo1() << 30;
		pa = (pa << 6) & PAGE_MASK;
		if (cpu_has_rixi)
			printk("ri=%d xi=%d ",
			       (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
			       (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
		printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
		       pwidth, pa, c1,
		       (entrylo1 & MIPS_ENTRYLO_D) ? 1 : 0,
		       (entrylo1 & MIPS_ENTRYLO_V) ? 1 : 0,
		       (entrylo1 & MIPS_ENTRYLO_G) ? 1 : 0);
	}
	printk("\n");

	write_c0_entryhi(s_entryhi);
	write_c0_index(s_index);
	write_c0_pagemask(s_pagemask);
}