示例#1
0
static void ipu_add_wired_entry(unsigned long pid,
				unsigned long entrylo0, unsigned long entrylo1,
				unsigned long entryhi, unsigned long pagemask)
{
	unsigned long flags;
	unsigned long wired;
	unsigned long old_pagemask;
	unsigned long old_ctx;
	struct task_struct *g, *p;

	/* We will lock an 4MB page size entry to map the 4MB reserved IPU memory */
	wired = read_c0_wired();
	if (wired) return;

	do_each_thread(g, p) {
		if (p->pid == pid )
			g_asid = p->mm->context[0];
	} while_each_thread(g, p);
	

	local_irq_save(flags);

	entrylo0 = entrylo0 >> 6;   /* PFN */
	entrylo0 |= 0x6 | (0 << 3); /* Write-through cacheable, dirty, valid */

	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi() & 0xff;
	old_pagemask = read_c0_pagemask();
	wired = read_c0_wired();
	write_c0_wired(wired + 1);
	write_c0_index(wired);
	BARRIER;
	entryhi &= ~0xff;	/* new add, 20070906 */
	entryhi |= g_asid;	/* new add, 20070906 */
//	entryhi |= old_ctx;	/* new add, 20070906 */
	write_c0_pagemask(pagemask);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	BARRIER;
	tlb_write_indexed();
	BARRIER;

	write_c0_entryhi(old_ctx);
	BARRIER;
	write_c0_pagemask(old_pagemask);
	local_flush_tlb_all();
	local_irq_restore(flags);
#if defined(DEBUG)
	printk("\nold_ctx=%03d\n", old_ctx);

	show_tlb();
#endif
}
示例#2
0
文件: prom.c 项目: janfj/dd-wrt
/* Initialize the wired register and all tlb entries to
 * known good state.
 */
void __init
early_tlb_init(void)
{
    unsigned long  index;
    struct cpuinfo_mips *c = &current_cpu_data;

    tmp_tlb_ent = c->tlbsize;

    /* printk(KERN_ALERT "%s: tlb size %ld\n", __FUNCTION__, c->tlbsize); */

    /*
    * initialize entire TLB to uniqe virtual addresses
    * but with the PAGE_VALID bit not set
    */
    write_c0_pagemask(PM_DEFAULT_MASK);
    write_c0_wired(0);

    write_c0_entrylo0(0);   /* not _PAGE_VALID */
    write_c0_entrylo1(0);

    for (index = 0; index < c->tlbsize; index++) {
        /* Make sure all entries differ. */
        write_c0_entryhi(UNIQUE_ENTRYHI(index+32));
        write_c0_index(index);
        mtc0_tlbw_hazard();
        tlb_write_indexed();
    }

    tlbw_use_hazard();

}
示例#3
0
static void ipu_add_wired_entry(unsigned long pid,
				unsigned long entrylo0, unsigned long entrylo1,
				unsigned long entryhi, unsigned long pagemask)
{
	unsigned long flags;
	unsigned long wired;
	unsigned long old_pagemask;
	unsigned long old_ctx;
	struct task_struct *g, *p;

	do_each_thread(g, p) {
		if (p->pid == pid )
			g_asid = p->mm->context[0];
	} while_each_thread(g, p);
	
	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi() & 0xff;
	old_pagemask = read_c0_pagemask();
	wired = read_c0_wired();
	write_c0_wired(wired + 1);
	write_c0_index(wired);
	BARRIER;
	entryhi &= ~0xff;	/* new add, 20070906 */
	entryhi |= g_asid;	/* new add, 20070906 */
//	entryhi |= old_ctx;	/* new add, 20070906 */
	write_c0_pagemask(pagemask);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	BARRIER;
	tlb_write_indexed();
	BARRIER;

	write_c0_entryhi(old_ctx);
	BARRIER;
	write_c0_pagemask(old_pagemask);
	local_flush_tlb_all();
	local_irq_restore(flags);
#if defined(DEBUG)
	printk("\nold_ctx=%03d\n", old_ctx);

	show_tlb();
#endif
}
示例#4
0
void write_one_tlb(int index, u32 pagemask, u32 hi, u32 low0, u32 low1)
{
	write_c0_entrylo0(low0);
	write_c0_pagemask(pagemask);
	write_c0_entrylo1(low1);
	write_c0_entryhi(hi);
	write_c0_index(index);
	tlb_write_indexed();
}
示例#5
0
void __init
add_tmptlb_entry(unsigned long entrylo0, unsigned long entrylo1,
		 unsigned long entryhi, unsigned long pagemask)
{
/* write one tlb entry */
	--tmp_tlb_ent;
	write_c0_index(tmp_tlb_ent);
	write_c0_pagemask(pagemask);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	tlbw_use_hazard();
}
示例#6
0
void __cpuinit tlb_init(void)
{
	/*
	 * You should never change this register:
	 *   - On R4600 1.7 the tlbp never hits for pages smaller than
	 *     the value in the c0_pagemask register.
	 *   - The entire mm handling assumes the c0_pagemask register to
	 *     be set to fixed-size pages.
	 */
	write_c0_pagemask(PM_DEFAULT_MASK);
	write_c0_wired(0);
	if (current_cpu_type() == CPU_R10000 ||
	    current_cpu_type() == CPU_R12000 ||
	    current_cpu_type() == CPU_R14000)
		write_c0_framemask(0);

	if (kernel_uses_smartmips_rixi) {
		/*
		 * Enable the no read, no exec bits, and enable large virtual
		 * address.
		 */
		u32 pg = PG_RIE | PG_XIE;
#ifdef CONFIG_64BIT
		pg |= PG_ELPA;
#endif
		write_c0_pagegrain(pg);
	}

	//temp_tlb_entry = current_cpu_data.tlbsize - 1;
	printk("TLB ÊýÁ¿%d.\n", current_cpu_data.tlbsize);
        /* From this point on the ARC firmware is dead.  */
	local_flush_tlb_all();
	
	/* Did I tell you that ARC SUCKS?  */

// 	if (ntlb) {
// 		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
// 			int wired = current_cpu_data.tlbsize - ntlb;
// 			write_c0_wired(wired);
// 			write_c0_index(wired-1);
// 			printk("Restricting TLB to %d entries\n", ntlb);
// 		} else
// 			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
// 	}

// 	build_tlb_refill_handler();
}
示例#7
0
static int add_wired_tlb_entry(uint32_t entrylo0, uint32_t entrylo1,
			       uint32_t entryhi, uint32_t pgsize)
{
	uint32_t tlbindex;

	tlbindex = read_c0_wired();
	if (tlbindex >= get_tlb_size() || tlbindex >= C0_WIRED_MASK) {
		printk(BIOS_ERR, "Ran out of TLB entries\n");
		return -1;
	}
	write_c0_wired(tlbindex + 1);
	write_c0_index(tlbindex);
	write_c0_pagemask(((pgsize / MIN_PAGE_SIZE) - 1) << C0_PAGEMASK_SHIFT);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	tlbw_use_hazard();

	return 0;
}
示例#8
0
文件: dump_tlb.c 项目: DIGImend/linux
static void dump_tlb(int first, int last)
{
	unsigned long s_entryhi, entryhi, asid;
	unsigned long long entrylo0, entrylo1, pa;
	unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
#ifdef CONFIG_32BIT
	bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
	int pwidth = xpa ? 11 : 8;
	int vwidth = 8;
#else
	bool xpa = false;
	int pwidth = 11;
	int vwidth = 11;
#endif

	s_pagemask = read_c0_pagemask();
	s_entryhi = read_c0_entryhi();
	s_index = read_c0_index();
	asid = s_entryhi & 0xff;

	for (i = first; i <= last; i++) {
		write_c0_index(i);
		mtc0_tlbr_hazard();
		tlb_read();
		tlb_read_hazard();
		pagemask = read_c0_pagemask();
		entryhi	 = read_c0_entryhi();
		entrylo0 = read_c0_entrylo0();
		entrylo1 = read_c0_entrylo1();

		/* EHINV bit marks entire entry as invalid */
		if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
			continue;
		/*
		 * Prior to tlbinv, unused entries have a virtual address of
		 * CKSEG0.
		 */
		if ((entryhi & ~0x1ffffUL) == CKSEG0)
			continue;
		/*
		 * ASID takes effect in absence of G (global) bit.
		 * We check both G bits, even though architecturally they should
		 * match one another, because some revisions of the SB1 core may
		 * leave only a single G bit set after a machine check exception
		 * due to duplicate TLB entry.
		 */
		if (!((entrylo0 | entrylo1) & MIPS_ENTRYLO_G) &&
		    (entryhi & 0xff) != asid)
			continue;

		/*
		 * Only print entries in use
		 */
		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));

		c0 = (entrylo0 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
		c1 = (entrylo1 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;

		printk("va=%0*lx asid=%02lx\n",
		       vwidth, (entryhi & ~0x1fffUL),
		       entryhi & 0xff);
		/* RI/XI are in awkward places, so mask them off separately */
		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
		if (xpa)
			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
		pa = (pa << 6) & PAGE_MASK;
		printk("\t[");
		if (cpu_has_rixi)
			printk("ri=%d xi=%d ",
			       (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
			       (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
		printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
		       pwidth, pa, c0,
		       (entrylo0 & MIPS_ENTRYLO_D) ? 1 : 0,
		       (entrylo0 & MIPS_ENTRYLO_V) ? 1 : 0,
		       (entrylo0 & MIPS_ENTRYLO_G) ? 1 : 0);
		/* RI/XI are in awkward places, so mask them off separately */
		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
		if (xpa)
			pa |= (unsigned long long)readx_c0_entrylo1() << 30;
		pa = (pa << 6) & PAGE_MASK;
		if (cpu_has_rixi)
			printk("ri=%d xi=%d ",
			       (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
			       (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
		printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
		       pwidth, pa, c1,
		       (entrylo1 & MIPS_ENTRYLO_D) ? 1 : 0,
		       (entrylo1 & MIPS_ENTRYLO_V) ? 1 : 0,
		       (entrylo1 & MIPS_ENTRYLO_G) ? 1 : 0);
	}
	printk("\n");

	write_c0_entryhi(s_entryhi);
	write_c0_index(s_index);
	write_c0_pagemask(s_pagemask);
}