Beispiel #1
0
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry;

	ENTER_CRITICAL(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	entry = read_c0_wired();

	/* Blast 'em all away. */
	while (entry < current_cpu_data.tlbsize) {
		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
		write_c0_index(entry);
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		entry++;
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	EXIT_CRITICAL(flags);
}
Beispiel #2
0
/* Initialize the wired register and all tlb entries to 
 * known good state.
 */
void __init
early_tlb_init(void)
{
	unsigned long  index;
	struct cpuinfo_mips *c = &current_cpu_data;

	tmp_tlb_ent = c->tlbsize;

	/* printk(KERN_ALERT "%s: tlb size %ld\n", __FUNCTION__, c->tlbsize); */

	/*
	* initialize entire TLB to uniqe virtual addresses
	* but with the PAGE_VALID bit not set
	*/
	write_c0_wired(0);
	write_c0_pagemask(PM_DEFAULT_MASK);

	write_c0_entrylo0(0);   /* not _PAGE_VALID */
	write_c0_entrylo1(0);

	for (index = 0; index < c->tlbsize; index++) {
		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(index+32));
		write_c0_index(index);
		mtc0_tlbw_hazard();
		tlb_write_indexed();
	}

	tlbw_use_hazard();

}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long size, flags;
		unsigned long config6_flags;

		ENTER_CRITICAL(flags);
		disable_pgwalker(config6_flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if (size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
#ifndef CONFIG_NLM_VMIPS
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
#else
				__write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(idx)));
#endif
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		FLUSH_ITLB;
		enable_pgwalker(config6_flags);
		EXIT_CRITICAL(flags);
	}
Beispiel #4
0
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry, ftlbhighset;

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	htw_stop();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	entry = read_c0_wired();

	/*
	 * Blast 'em all away.
	 * If there are any wired entries, fall back to iterating
	 */
	if (cpu_has_tlbinv && !entry) {
		if (current_cpu_data.tlbsizevtlb) {
			write_c0_index(0);
			mtc0_tlbw_hazard();
			tlbinvf();  /* invalidate VTLB */
		}
		ftlbhighset = current_cpu_data.tlbsizevtlb +
			current_cpu_data.tlbsizeftlbsets;
		for (entry = current_cpu_data.tlbsizevtlb;
		     entry < ftlbhighset;
		     entry++) {
			write_c0_index(entry);
			mtc0_tlbw_hazard();
			tlbinvf();  /* invalidate one FTLB set */
		}
	} else {
		while (entry < current_cpu_data.tlbsize) {
			/* Make sure all entries differ. */
			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
			write_c0_index(entry);
			mtc0_tlbw_hazard();
			tlb_write_indexed();
			entry++;
		}
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	htw_start();
	flush_micro_tlb();
	local_irq_restore(flags);
}
Beispiel #5
0
void __init
add_tmptlb_entry(unsigned long entrylo0, unsigned long entrylo1,
		 unsigned long entryhi, unsigned long pagemask)
{
/* write one tlb entry */
	--tmp_tlb_ent;
	write_c0_index(tmp_tlb_ent);
	write_c0_pagemask(pagemask);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	tlbw_use_hazard();
}
Beispiel #6
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long size, flags;

		local_irq_save(flags);
		start = round_down(start, PAGE_SIZE << 1);
		end = round_up(end, PAGE_SIZE << 1);
		size = (end - start) >> (PAGE_SHIFT + 1);
		if (size <= (current_cpu_data.tlbsizeftlbsets ?
			     current_cpu_data.tlbsize / 8 :
			     current_cpu_data.tlbsize / 2)) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			htw_stop();
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
			htw_start();
		} else {
			drop_mmu_context(mm, cpu);
		}
		flush_micro_tlb();
		local_irq_restore(flags);
	}
Beispiel #7
0
static void refill_tbl_to(struct km_walk_ctx * ctx, unsigned int asid, int write, int pos)
{	
	unsigned long entry, oldl1, oldl2;
	unsigned long G_FLAG;
	int idx;
	int oldpid;

	/* Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obay this rule. */
	oldpid = read_c0_entryhi();

	/* Entry HI */	
	asid = asid & CPU_PAGE_FALG_ASID_MASK;
	entry = get_vpn2(ctx->current_virtual_address);
	entry |= asid;
	write_c0_entryhi(entry);
	mtc0_tlbw_hazard();
	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	oldl1 = read_c0_entrylo0();
	oldl2 = read_c0_entrylo1();
	/* Add the G_FLAG if ASID == 0, because the entry is from kernel and shared by all process */
	G_FLAG = (ctx->mem == &kp_get_system()->mem_ctx)? 1 : 0;

	/* Entry Low0 and Low1 */
	WRITE_LO;

	/* Write by type, the write is random if the TLB entry is flushed for R/W flags changing */
	mtc0_tlbw_hazard();
	if (unlikely(idx < 0))
		tlb_write_random();
	else
	{
		if (write == 2)
		{
			printk("Write is forced index for %x, pos %d, idx %d,asid %d, %x %x.\n", ctx->current_virtual_address, pos, idx, asid, oldl1, oldl2);
		}
		
		tlb_write_indexed();
	}
	tlbw_use_hazard();

	/* Sanity: Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obey this rule. */
	if ((oldpid & 0xff) != (asid & 0xff) && asid != 0/*kernel asid*/)
 		printk("Why old = %x, asid = %x. ", oldpid, asid);
}
Beispiel #8
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

		ENTER_CRITICAL(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		local_irq_save(flags);
		if (size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		EXIT_CRITICAL(flags);
	}
Beispiel #9
0
/**
	@brief Flush memory range

	If the memory range is too big, we flush all entries with this ASID
*/
void local_flush_tlb_range(unsigned int asid, unsigned long start, unsigned long end)
{
	unsigned long size, flags;

	ENTER_CRITICAL(flags);
	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	if (size <= current_cpu_data.tlbsize / 2)
	{
		int oldpid = read_c0_entryhi();
		int newpid = asid;

		start &= (PAGE_MASK << 1);
		end += ((PAGE_SIZE << 1) - 1);
		end &= (PAGE_MASK << 1);
		while (start < end) {
			int idx;

			write_c0_entryhi(start | newpid);
			start += (PAGE_SIZE << 1);
			mtc0_tlbw_hazard();
			tlb_probe();
			tlb_probe_hazard();
			idx = read_c0_index();
			write_c0_entrylo0(0);
			write_c0_entrylo1(0);
			if (idx < 0)
				continue;
			/* Make sure all entries differ. */
			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
			mtc0_tlbw_hazard();
			tlb_write_indexed();
		}
		tlbw_use_hazard();
		write_c0_entryhi(oldpid);
	} 
	else
		local_flush_asid(asid);

	FLUSH_ITLB;
	EXIT_CRITICAL(flags);
}
Beispiel #10
0
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry;

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo(0);

	for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
		write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
		write_c0_vaddr(entry << PAGE_SHIFT);
		write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
		mtc0_tlbw_hazard();
		tlb_write();
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	local_irq_restore(flags);
}
Beispiel #11
0
static int add_wired_tlb_entry(uint32_t entrylo0, uint32_t entrylo1,
			       uint32_t entryhi, uint32_t pgsize)
{
	uint32_t tlbindex;

	tlbindex = read_c0_wired();
	if (tlbindex >= get_tlb_size() || tlbindex >= C0_WIRED_MASK) {
		printk(BIOS_ERR, "Ran out of TLB entries\n");
		return -1;
	}
	write_c0_wired(tlbindex + 1);
	write_c0_index(tlbindex);
	write_c0_pagemask(((pgsize / MIN_PAGE_SIZE) - 1) << C0_PAGEMASK_SHIFT);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	mtc0_tlbw_hazard();
	tlb_write_indexed();
	tlbw_use_hazard();

	return 0;
}
void local_flush_tlb_all(void)
{
	unsigned long flags, config6_flags;
	unsigned long old_ctx;
	int entry;

	ENTER_CRITICAL(flags);
	disable_pgwalker(config6_flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	entry = read_c0_wired();

#if defined(CONFIG_MAPPED_KERNEL)
	if (!entry) printk("[%s] flushing entry=%d in MAPPED_KERNEL mode!\n",
			   __FUNCTION__, entry);
#endif
	/* Blast 'em all away. */
	while (entry < current_cpu_data.tlbsize) {
		/* Make sure all entries differ. */
#ifndef CONFIG_NLM_VMIPS
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
#else
        __write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(entry)));
#endif
		write_c0_index(entry);
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		entry++;
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	FLUSH_ITLB;
	enable_pgwalker(config6_flags);
	EXIT_CRITICAL(flags);
}
Beispiel #13
0
/**
	@brief Refill the TLB entry

	@note
		Have to in interrupt disabled
*/
static void refill_tbl(struct km_walk_ctx * ctx)
{
	unsigned char asid;
	unsigned long entry;
	unsigned long G_FLAG;

	/* Entry HI */
	asid = ctx->mem->asid & CPU_PAGE_FALG_ASID_MASK;
	entry = get_vpn2(ctx->current_virtual_address);
	entry |= asid;
	write_c0_entryhi(entry);

	/* Add the G_FLAG if ASID == 0, because the entry is from kernel and shared by all process */
	G_FLAG = (ctx->mem == &kp_get_system()->mem_ctx)? 1 : 0;

	/* Entry Low0 and Low1 */
	WRITE_LO;
	
	/* Write by type */
	mtc0_tlbw_hazard();
	tlb_write_random();
	tlbw_use_hazard();
}
Beispiel #14
0
/**
 * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
 * @bus:	Pointer to bus information for current CPU.
 *		IS_ERR(bus) is checked, so no need for caller to check.
 *
 * The caller must prevent migration to another CPU, either by disabling
 * pre-emption or by running from a pinned kernel thread.
 *
 * Returns	0 on success, -errno on failure.
 */
static int mips_cdmm_setup(struct mips_cdmm_bus *bus)
{
	unsigned long cdmmbase, flags;
	int ret = 0;

	if (IS_ERR(bus))
		return PTR_ERR(bus);

	local_irq_save(flags);
	/* Don't set up bus a second time unless marked offline */
	if (bus->offline) {
		/* If CDMM region is still set up, nothing to do */
		if (bus->phys == mips_cdmm_cur_base())
			goto out;
		/*
		 * The CDMM region isn't set up as expected, so it needs
		 * reconfiguring, but then we can stop checking it.
		 */
		bus->offline = false;
	} else if (bus->phys > 1) {
		goto out;
	}

	/* If the CDMM region is already configured, inherit that setup */
	if (!bus->phys)
		bus->phys = mips_cdmm_cur_base();
	/* Otherwise, ask platform code for suggestions */
	if (!bus->phys && mips_cdmm_phys_base)
		bus->phys = mips_cdmm_phys_base();
	/* Otherwise, copy what other CPUs have done */
	if (!bus->phys)
		bus->phys = mips_cdmm_default_base;
	/* Otherwise, complain once */
	if (!bus->phys) {
		bus->phys = 1;
		/*
		 * If you hit this, either your bootloader needs to set up the
		 * CDMM on the boot CPU, or else you need to implement
		 * mips_cdmm_phys_base() for your platform (see asm/cdmm.h).
		 */
		pr_err("cdmm%u: Failed to choose a physical base\n",
		       smp_processor_id());
	}
	/* Already complained? */
	if (bus->phys == 1) {
		ret = -ENOMEM;
		goto out;
	}
	/* Record our success for other CPUs to copy */
	mips_cdmm_default_base = bus->phys;

	pr_debug("cdmm%u: Enabling CDMM region at %pa\n",
		 smp_processor_id(), &bus->phys);

	/* Enable CDMM */
	cdmmbase = read_c0_cdmmbase();
	cdmmbase &= (1ul << MIPS_CDMMBASE_ADDR_SHIFT) - 1;
	cdmmbase |= (bus->phys >> MIPS_CDMMBASE_ADDR_START)
			<< MIPS_CDMMBASE_ADDR_SHIFT;
	cdmmbase |= MIPS_CDMMBASE_EN;
	write_c0_cdmmbase(cdmmbase);
	tlbw_use_hazard();

	bus->regs = (void __iomem *)CKSEG1ADDR(bus->phys);
	bus->drbs = 1 + ((cdmmbase & MIPS_CDMMBASE_SIZE) >>
			 MIPS_CDMMBASE_SIZE_SHIFT);
	bus->drbs_reserved = !!(cdmmbase & MIPS_CDMMBASE_CI);

out:
	local_irq_restore(flags);
	return ret;
}