Ejemplo n.º 1
0
int arch_vcpu_regs_init(struct vmm_vcpu *vcpu)
{
	vmm_memset(mips_uregs(vcpu), 0, sizeof(arch_regs_t));

        if (!vcpu->is_normal) {
		/* For orphan vcpu */
                mips_uregs(vcpu)->cp0_epc = vcpu->start_pc;
                mips_uregs(vcpu)->regs[SP_IDX] = vcpu->start_sp;
		mips_uregs(vcpu)->regs[S8_IDX] = mips_uregs(vcpu)->regs[SP_IDX];
		mips_uregs(vcpu)->cp0_status = read_c0_status();
		mips_uregs(vcpu)->cp0_entryhi = read_c0_entryhi();
        } else {
		/* For normal vcpu running guests */
		mips_sregs(vcpu)->cp0_regs[CP0_CAUSE_IDX] = 0x400;
		mips_sregs(vcpu)->cp0_regs[CP0_STATUS_IDX] = 0x40004;
		mips_uregs(vcpu)->cp0_status = read_c0_status() | (0x01UL << CP0_STATUS_UM_SHIFT);
		mips_uregs(vcpu)->cp0_entryhi = read_c0_entryhi();
		mips_uregs(vcpu)->cp0_entryhi &= ASID_MASK;
		mips_uregs(vcpu)->cp0_entryhi |= (0x2 << ASID_SHIFT);
		mips_uregs(vcpu)->cp0_epc = vcpu->start_pc;

		/* All guest run from 0 and fault */
		mips_sregs(vcpu)->cp0_regs[CP0_EPC_IDX] = vcpu->start_pc;
		/* Give guest the same CPU cap as we have */
		mips_sregs(vcpu)->cp0_regs[CP0_PRID_IDX] = read_c0_prid();
	}

	return VMM_OK;
}
Ejemplo n.º 2
0
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	unsigned long entry;

#ifdef DEBUG_TLB
	printk("[tlball]");
#endif

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi() & ASID_MASK;
	write_c0_entryhi(CKSEG0);
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	entry = read_c0_wired();

	/* Blast 'em all away. */
	while (entry < NTLB_ENTRIES) {
		write_c0_index(entry);
		tlb_write_indexed();
		entry++;
	}
	write_c0_entryhi(old_ctx);
	local_irq_restore(flags);
}
Ejemplo n.º 3
0
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry;

	ENTER_CRITICAL(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	entry = read_c0_wired();

	/* Blast 'em all away. */
	while (entry < current_cpu_data.tlbsize) {
		/* Make sure all entries differ. */
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
		write_c0_index(entry);
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		entry++;
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	FLUSH_ITLB;
	EXIT_CRITICAL(flags);
}
Ejemplo n.º 4
0
static int
tx4939_proc_show_cp0(char *sysbuf, char **start, off_t off,
		     int count, int *eof, void *data)
{
	int len = 0;
	len += sprintf(sysbuf + len, "INDEX   :0x%08x\n", read_c0_index());
	len += sprintf(sysbuf + len, "ENTRYLO0:0x%08lx\n", read_c0_entrylo0());
	len += sprintf(sysbuf + len, "ENTRYLO1:0x%08lx\n", read_c0_entrylo1());
	len += sprintf(sysbuf + len, "CONTEXT :0x%08lx\n", read_c0_context());
	len += sprintf(sysbuf + len, "PAGEMASK:0x%08x\n", read_c0_pagemask());
	len += sprintf(sysbuf + len, "WIRED   :0x%08x\n", read_c0_wired());
	len += sprintf(sysbuf + len, "COUNT   :0x%08x\n", read_c0_count());
	len += sprintf(sysbuf + len, "ENTRYHI :0x%08lx\n", read_c0_entryhi());
	len += sprintf(sysbuf + len, "COMPARE :0x%08x\n", read_c0_compare());
	len += sprintf(sysbuf + len, "STATUS  :0x%08x\n", read_c0_status());
	len += sprintf(sysbuf + len, "CAUSE   :0x%08x\n", read_c0_cause());
	len += sprintf(sysbuf + len, "PRId    :0x%08x\n", read_c0_prid());
	len += sprintf(sysbuf + len, "CONFIG  :0x%08x\n", read_c0_config());
	len += sprintf(sysbuf + len, "XCONTEXT:0x%08lx\n", read_c0_xcontext());
	len += sprintf(sysbuf + len, "TagLo   :0x%08x\n", read_c0_taglo());
	len += sprintf(sysbuf + len, "TagHi   :0x%08x\n", read_c0_taghi());
	len += sprintf(sysbuf + len, "ErrorEPC:0x%08lx\n", read_c0_errorepc());
	*eof = 1;
	return len;
}
Ejemplo n.º 5
0
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
	int cpu = smp_processor_id();
	unsigned long flags;
	int oldpid, newpid;
	signed long idx;

	if (!cpu_context(cpu, vma->vm_mm))
		return;

	newpid = cpu_asid(cpu, vma->vm_mm);
	page &= PAGE_MASK;
	local_irq_save(flags);
	oldpid = read_c0_entryhi();
	write_c0_vaddr(page);
	write_c0_entryhi(newpid);
	tlb_probe();
	idx = read_c0_tlbset();
	if (idx < 0)
		goto finish;

	write_c0_entrylo(0);
	write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
	tlb_write();

finish:
	write_c0_entryhi(oldpid);
	local_irq_restore(flags);
}
Ejemplo n.º 6
0
/*
 * We will need multiple versions of update_mmu_cache(), one that just
 * updates the TLB with the new pte(s), and another which also checks
 * for the R4k "end of page" hardware bug and does the needy.
 */
void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
	unsigned long flags;
	pgd_t *pgdp;
	pmd_t *pmdp;
	pte_t *ptep;
	int pid;

	/*
	 * Handle debugger faulting in for debugee.
	 */
	if (current->active_mm != vma->vm_mm)
		return;

	pid = read_c0_entryhi() & ASID_MASK;

	local_irq_save(flags);
	address &= PAGE_MASK;
	write_c0_vaddr(address);
	write_c0_entryhi(pid);
	pgdp = pgd_offset(vma->vm_mm, address);
	pmdp = pmd_offset(pgdp, address);
	ptep = pte_offset_map(pmdp, address);
	tlb_probe();

	write_c0_entrylo(pte_val(*ptep++) >> 6);
	tlb_write();

	write_c0_entryhi(pid);
	local_irq_restore(flags);
}
Ejemplo n.º 7
0
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry;

#ifdef DEBUG_TLB
	printk("[tlball]");
#endif

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = (read_c0_entryhi() & ASID_MASK);
	write_c0_entryhi(XKPHYS);
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);
	BARRIER;

	entry = read_c0_wired();

	/* Blast 'em all away. */
	while(entry < current_cpu_data.tlbsize) {
	        /* Make sure all entries differ. */
	        write_c0_entryhi(XKPHYS+entry*0x2000);
		write_c0_index(entry);
		BARRIER;
		tlb_write_indexed();
		BARRIER;
		entry++;
	}
	BARRIER;
	write_c0_entryhi(old_ctx);
	local_irq_restore(flags);
}
Ejemplo n.º 8
0
u32 do_general_exception(arch_regs_t *uregs)
{
    u32 cp0_cause = read_c0_cause();
    u32 cp0_status = read_c0_status();
    mips32_entryhi_t ehi;
    u32 victim_asid;
    u32 victim_inst;
    struct vmm_vcpu *c_vcpu;
    u8 delay_slot_exception = IS_BD_SET(cp0_cause);

    ehi._entryhi = read_c0_entryhi();
    victim_asid = ehi._s_entryhi.asid >> ASID_SHIFT;
    c_vcpu = vmm_scheduler_current_vcpu();

    /*
     * When exception is happening in the delay slot. We need to emulate
     * the corresponding branch instruction first. If its one of the "likely"
     * instructions, we don't need to emulate the faulting instruction since
     * "likely" instructions don't allow slot to be executed if branch is not
     * taken.
     */
    if (delay_slot_exception) {
        victim_inst = *((u32 *)(uregs->cp0_epc + 4));

        /*
         * If this function returns zero, the branch instruction was a
         * "likely" instruction and the branch wasn't taken. So don't
         * execute the delay slot, just return. The correct EPC to return
         * to will be programmed under our feet.
         */
        if (!cpu_vcpu_emulate_branch_and_jump_inst(c_vcpu, *((u32 *)uregs->cp0_epc), uregs)) {
            return VMM_OK;
        }
    } else {
        victim_inst = *((u32 *)uregs->cp0_epc);
    }

    switch (EXCEPTION_CAUSE(cp0_cause)) {
    case EXEC_CODE_COPU:
        cpu_vcpu_emulate_cop_inst(c_vcpu, victim_inst, uregs);

        if (!delay_slot_exception)
            uregs->cp0_epc += 4;

        break;

    case EXEC_CODE_TLBL:
        if (CPU_IN_USER_MODE(cp0_status) && is_vmm_asid(ehi._s_entryhi.asid)) {
            ehi._s_entryhi.asid = (0x1 << ASID_SHIFT);
            write_c0_entryhi(ehi._entryhi);
            vmm_panic("CPU is in user mode and ASID is pointing to VMM!!\n");
        }
        break;
    }

    return VMM_OK;
}
Ejemplo n.º 9
0
static void print_registers(struct pt_regs *regs)
{
	  	printk("Panic status %p, cause %p, epc %p,bd %p, entry hi %x\n", 
	  		regs->cp0_status,
	  		regs->cp0_cause,
	  		regs->cp0_epc,
	  		read_c0_badvaddr(),
	 		read_c0_entryhi());
}
void
dump_tlb(int first, int last)
{
	int	i;
	unsigned int asid;
	unsigned long entryhi, entrylo0;

	asid = read_c0_entryhi() & 0xfc0;

	for(i=first;i<=last;i++)
	{
		write_c0_index(i<<8);
		__asm__ __volatile__(
			".set\tnoreorder\n\t"
			"tlbr\n\t"
			"nop\n\t"
			".set\treorder");
		entryhi  = read_c0_entryhi();
		entrylo0 = read_c0_entrylo0();

		/* Unused entries have a virtual address of KSEG0.  */
		if ((entryhi & 0xffffe000) != 0x80000000
		    && (entryhi & 0xfc0) == asid) {
			/*
			 * Only print entries in use
			 */
			printk("Index: %2d ", i);

			printk("va=%08lx asid=%08lx"
			       "  [pa=%06lx n=%d d=%d v=%d g=%d]",
			       (entryhi & 0xffffe000),
			       entryhi & 0xfc0,
			       entrylo0 & PAGE_MASK,
			       (entrylo0 & (1 << 11)) ? 1 : 0,
			       (entrylo0 & (1 << 10)) ? 1 : 0,
			       (entrylo0 & (1 << 9)) ? 1 : 0,
			       (entrylo0 & (1 << 8)) ? 1 : 0);
		}
	}
	printk("\n");

	write_c0_entryhi(asid);
}
Ejemplo n.º 11
0
void show_tlb(void)
{
#define ASID_MASK 0xFF

        unsigned long flags;
        unsigned int old_ctx;
	unsigned int entry;
	unsigned int entrylo0, entrylo1, entryhi;
	unsigned int pagemask;

	local_irq_save(flags);

	/* Save old context */
	old_ctx = (read_c0_entryhi() & 0xff);

	printk("TLB content:\n");
	entry = 0;
	while(entry < 32) {
		write_c0_index(entry);
		BARRIER;
		tlb_read();
		BARRIER;
		entryhi = read_c0_entryhi();
		entrylo0 = read_c0_entrylo0();
		entrylo1 = read_c0_entrylo1();
		pagemask = read_c0_pagemask();
		printk("%02d: ASID=%02d%s VA=0x%08x ", entry, entryhi & ASID_MASK, (entrylo0 & entrylo1 & 1) ? "(G)" : "   ", entryhi & ~ASID_MASK);
		printk("PA0=0x%08x C0=%x %s%s%s\n", (entrylo0>>6)<<12, (entrylo0>>3) & 7, (entrylo0 & 4) ? "Dirty " : "", (entrylo0 & 2) ? "Valid " : "Invalid ", (entrylo0 & 1) ? "Global" : "");
		printk("\t\t\t     PA1=0x%08x C1=%x %s%s%s\n", (entrylo1>>6)<<12, (entrylo1>>3) & 7, (entrylo1 & 4) ? "Dirty " : "", (entrylo1 & 2) ? "Valid " : "Invalid ", (entrylo1 & 1) ? "Global" : "");

		printk("\t\tpagemask=0x%08x", pagemask);
		printk("\tentryhi=0x%08x\n", entryhi);
		printk("\t\tentrylo0=0x%08x", entrylo0);
		printk("\tentrylo1=0x%08x\n", entrylo1);

		entry++;
	}
	BARRIER;
	write_c0_entryhi(old_ctx);

	local_irq_restore(flags);
}
Ejemplo n.º 12
0
static void dump_tlb(int first, int last)
{
	int	i;
	unsigned int asid;
	unsigned long entryhi, entrylo0;

	asid = read_c0_entryhi() & ASID_MASK;

	for (i = first; i <= last; i++) {
		write_c0_index(i<<8);
		__asm__ __volatile__(
			".set\tnoreorder\n\t"
			"tlbr\n\t"
			"nop\n\t"
			".set\treorder");
		entryhi	 = read_c0_entryhi();
		entrylo0 = read_c0_entrylo0();

		/* Unused entries have a virtual address of KSEG0.  */
		if ((entryhi & PAGE_MASK) != KSEG0 &&
		    (entrylo0 & R3K_ENTRYLO_G ||
		     (entryhi & ASID_MASK) == asid)) {
			/*
			 * Only print entries in use
			 */
			printk("Index: %2d ", i);

			printk("va=%08lx asid=%08lx"
			       "  [pa=%06lx n=%d d=%d v=%d g=%d]",
			       entryhi & PAGE_MASK,
			       entryhi & ASID_MASK,
			       entrylo0 & PAGE_MASK,
			       (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
			       (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
			       (entrylo0 & R3K_ENTRYLO_V) ? 1 : 0,
			       (entrylo0 & R3K_ENTRYLO_G) ? 1 : 0);
		}
	}
	printk("\n");

	write_c0_entryhi(asid);
}
Ejemplo n.º 13
0
static void ipu_add_wired_entry(unsigned long pid,
				unsigned long entrylo0, unsigned long entrylo1,
				unsigned long entryhi, unsigned long pagemask)
{
	unsigned long flags;
	unsigned long wired;
	unsigned long old_pagemask;
	unsigned long old_ctx;
	struct task_struct *g, *p;

	/* We will lock an 4MB page size entry to map the 4MB reserved IPU memory */
	wired = read_c0_wired();
	if (wired) return;

	do_each_thread(g, p) {
		if (p->pid == pid )
			g_asid = p->mm->context[0];
	} while_each_thread(g, p);
	

	local_irq_save(flags);

	entrylo0 = entrylo0 >> 6;   /* PFN */
	entrylo0 |= 0x6 | (0 << 3); /* Write-through cacheable, dirty, valid */

	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi() & 0xff;
	old_pagemask = read_c0_pagemask();
	wired = read_c0_wired();
	write_c0_wired(wired + 1);
	write_c0_index(wired);
	BARRIER;
	entryhi &= ~0xff;	/* new add, 20070906 */
	entryhi |= g_asid;	/* new add, 20070906 */
//	entryhi |= old_ctx;	/* new add, 20070906 */
	write_c0_pagemask(pagemask);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	BARRIER;
	tlb_write_indexed();
	BARRIER;

	write_c0_entryhi(old_ctx);
	BARRIER;
	write_c0_pagemask(old_pagemask);
	local_flush_tlb_all();
	local_irq_restore(flags);
#if defined(DEBUG)
	printk("\nold_ctx=%03d\n", old_ctx);

	show_tlb();
#endif
}
Ejemplo n.º 14
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long size, flags;
		unsigned long config6_flags;

		ENTER_CRITICAL(flags);
		disable_pgwalker(config6_flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if (size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
#ifndef CONFIG_NLM_VMIPS
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
#else
				__write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(idx)));
#endif
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		FLUSH_ITLB;
		enable_pgwalker(config6_flags);
		EXIT_CRITICAL(flags);
	}
Ejemplo n.º 15
0
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry, ftlbhighset;

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	htw_stop();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	entry = read_c0_wired();

	/*
	 * Blast 'em all away.
	 * If there are any wired entries, fall back to iterating
	 */
	if (cpu_has_tlbinv && !entry) {
		if (current_cpu_data.tlbsizevtlb) {
			write_c0_index(0);
			mtc0_tlbw_hazard();
			tlbinvf();  /* invalidate VTLB */
		}
		ftlbhighset = current_cpu_data.tlbsizevtlb +
			current_cpu_data.tlbsizeftlbsets;
		for (entry = current_cpu_data.tlbsizevtlb;
		     entry < ftlbhighset;
		     entry++) {
			write_c0_index(entry);
			mtc0_tlbw_hazard();
			tlbinvf();  /* invalidate one FTLB set */
		}
	} else {
		while (entry < current_cpu_data.tlbsize) {
			/* Make sure all entries differ. */
			write_c0_entryhi(UNIQUE_ENTRYHI(entry));
			write_c0_index(entry);
			mtc0_tlbw_hazard();
			tlb_write_indexed();
			entry++;
		}
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	htw_start();
	flush_micro_tlb();
	local_irq_restore(flags);
}
Ejemplo n.º 16
0
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
				unsigned long end)
{
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & ASID_MASK),
		       start, end);
#endif
		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if(size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi() & ASID_MASK;
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while(start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				BARRIER;
				tlb_probe();
				BARRIER;
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if(idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(XKPHYS+idx*0x2000);
				BARRIER;
				tlb_write_indexed();
				BARRIER;
			}
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		local_irq_restore(flags);
	}
Ejemplo n.º 17
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();
	unsigned long flags;
	int oldpid, newpid, size;

	if (!cpu_context(cpu, mm))
		return;

	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	local_irq_save(flags);

	if (size > TFP_TLB_SIZE / 2) {
		drop_mmu_context(mm, cpu);
		goto out_restore;
	}

	oldpid = read_c0_entryhi();
	newpid = cpu_asid(cpu, mm);

	write_c0_entrylo(0);

	start &= PAGE_MASK;
	end += (PAGE_SIZE - 1);
	end &= PAGE_MASK;
	while (start < end) {
		signed long idx;

		write_c0_vaddr(start);
		write_c0_entryhi(start);
		start += PAGE_SIZE;
		tlb_probe();
		idx = read_c0_tlbset();
		if (idx < 0)
			continue;

		write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1)));
		tlb_write();
	}
	write_c0_entryhi(oldpid);

out_restore:
	local_irq_restore(flags);
}
Ejemplo n.º 18
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long size, flags;

		local_irq_save(flags);
		start = round_down(start, PAGE_SIZE << 1);
		end = round_up(end, PAGE_SIZE << 1);
		size = (end - start) >> (PAGE_SHIFT + 1);
		if (size <= (current_cpu_data.tlbsizeftlbsets ?
			     current_cpu_data.tlbsize / 8 :
			     current_cpu_data.tlbsize / 2)) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			htw_stop();
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
			htw_start();
		} else {
			drop_mmu_context(mm, cpu);
		}
		flush_micro_tlb();
		local_irq_restore(flags);
	}
Ejemplo n.º 19
0
static void refill_tbl_to(struct km_walk_ctx * ctx, unsigned int asid, int write, int pos)
{	
	unsigned long entry, oldl1, oldl2;
	unsigned long G_FLAG;
	int idx;
	int oldpid;

	/* Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obay this rule. */
	oldpid = read_c0_entryhi();

	/* Entry HI */	
	asid = asid & CPU_PAGE_FALG_ASID_MASK;
	entry = get_vpn2(ctx->current_virtual_address);
	entry |= asid;
	write_c0_entryhi(entry);
	mtc0_tlbw_hazard();
	tlb_probe();
	tlb_probe_hazard();
	idx = read_c0_index();

	oldl1 = read_c0_entrylo0();
	oldl2 = read_c0_entrylo1();
	/* Add the G_FLAG if ASID == 0, because the entry is from kernel and shared by all process */
	G_FLAG = (ctx->mem == &kp_get_system()->mem_ctx)? 1 : 0;

	/* Entry Low0 and Low1 */
	WRITE_LO;

	/* Write by type, the write is random if the TLB entry is flushed for R/W flags changing */
	mtc0_tlbw_hazard();
	if (unlikely(idx < 0))
		tlb_write_random();
	else
	{
		if (write == 2)
		{
			printk("Write is forced index for %x, pos %d, idx %d,asid %d, %x %x.\n", ctx->current_virtual_address, pos, idx, asid, oldl1, oldl2);
		}
		
		tlb_write_indexed();
	}
	tlbw_use_hazard();

	/* Sanity: Just test ASID consistency: Current ASID must equal to Given ASID, kernel process do not obey this rule. */
	if ((oldpid & 0xff) != (asid & 0xff) && asid != 0/*kernel asid*/)
 		printk("Why old = %x, asid = %x. ", oldpid, asid);
}
Ejemplo n.º 20
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
	unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

		ENTER_CRITICAL(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		local_irq_save(flags);
		if (size <= current_cpu_data.tlbsize/2) {
			int oldpid = read_c0_entryhi();
			int newpid = cpu_asid(cpu, mm);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				mtc0_tlbw_hazard();
				tlb_probe();
				tlb_probe_hazard();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				if (idx < 0)
					continue;
				/* Make sure all entries differ. */
				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
				mtc0_tlbw_hazard();
				tlb_write_indexed();
			}
			tlbw_use_hazard();
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		EXIT_CRITICAL(flags);
	}
Ejemplo n.º 21
0
static void ipu_add_wired_entry(unsigned long pid,
				unsigned long entrylo0, unsigned long entrylo1,
				unsigned long entryhi, unsigned long pagemask)
{
	unsigned long flags;
	unsigned long wired;
	unsigned long old_pagemask;
	unsigned long old_ctx;
	struct task_struct *g, *p;

	do_each_thread(g, p) {
		if (p->pid == pid )
			g_asid = p->mm->context[0];
	} while_each_thread(g, p);
	
	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi() & 0xff;
	old_pagemask = read_c0_pagemask();
	wired = read_c0_wired();
	write_c0_wired(wired + 1);
	write_c0_index(wired);
	BARRIER;
	entryhi &= ~0xff;	/* new add, 20070906 */
	entryhi |= g_asid;	/* new add, 20070906 */
//	entryhi |= old_ctx;	/* new add, 20070906 */
	write_c0_pagemask(pagemask);
	write_c0_entryhi(entryhi);
	write_c0_entrylo0(entrylo0);
	write_c0_entrylo1(entrylo1);
	BARRIER;
	tlb_write_indexed();
	BARRIER;

	write_c0_entryhi(old_ctx);
	BARRIER;
	write_c0_pagemask(old_pagemask);
	local_flush_tlb_all();
	local_irq_restore(flags);
#if defined(DEBUG)
	printk("\nold_ctx=%03d\n", old_ctx);

	show_tlb();
#endif
}
void handle_tlb_refill(struct trapframe *tf) {
	unsigned long	entryhi=read_c0_entryhi();
	unsigned long	vpn=TLB_VPN(entryhi);
	unsigned long	pfn=pt[ENTRYHI_ASID(entryhi)][vpn];
	if(pfn) {
		unsigned long entrylo=( TLB_PFN(pfn, entryhi) | TLB_COHERENT | TLB_VALID | TLB_DIRTY | TLB_GLOBAL)^TLB_GLOBAL;
		write_c0_entrylo0(entrylo);
		write_c0_entrylo1(TLB_ELO0TO1(entrylo));
		tlbwr();
	} else {
		kprintf("Fatal error, invalied page: %x with ASID= %d, rebooting...\n",vpn, ENTRYHI_ASID(entryhi));
		unsigned long* reg=(unsigned long*)tf;
		write_c0_status((read_c0_status()|ST_KSU)^ST_KSU);
		reg[ORD_STATUS]=(read_c0_status()|ST_KSU)^ST_KSU;
		reg[ORD_EPC]=__reset;
	}
	return;
}
PUBLIC
void
Jdb_kern_info_cpu::dump_cp0_regs()
{
  Mword val;

  DUMP_CP0("EBase", read_c0_ebase(), val);
  DUMP_INT("Ebase.CPUNum", (val & 0x3ff));
  DUMP_CP0("EntryHi", read_c0_entryhi(), val);
  DUMP_HEX("EntryHi.ASID", (val & 0xff));
  DUMP_CP0("EPC", read_c0_epc(), val);
  DUMP_CP0("Status", read_c0_status(), val);
  DUMP_CP0("Cause", read_c0_cause(), val);
  DUMP_CP0("PRId", read_c0_prid(), val);
  DUMP_CP0("HWREna", read_c0_hwrena(), val);
  DUMP_CP0("Config", read_c0_config(), val);
  if (val & MIPS_CONF_M) {
    DUMP_CP0("Config1", read_c0_config1(), val);
    if (val & MIPS_CONF_M) {
      DUMP_CP0("Config2", read_c0_config2(), val);
      if (val & MIPS_CONF_M) {
        DUMP_CP0("Config3", read_c0_config3(), val);
        if (val & MIPS_CONF3_ULRI)
          DUMP_CP0("UserLocal", read_c0_userlocal(), val);
      }
    }
  }

  if (cpu_has_vz)
    DUMP_CP0("GuestCtl0", read_c0_guestctl0(), val);
  if (cpu_has_guestctl0ext)
    DUMP_CP0("GuestCtl0Ext", read_c0_guestctl0ext(), val);
  if (cpu_has_vz)
    DUMP_CP0("GTOffset", read_c0_gtoffset(), val);
  if (cpu_has_guestctl1) {
    DUMP_CP0("GuestCtl1", read_c0_guestctl1(), val);
    DUMP_HEX("GuestCtl1.ID", (val & GUESTCTL1_ID));
  }
  if (cpu_has_guestctl2) {
    DUMP_CP0("GuestCtl2", read_c0_guestctl2(), val);
    DUMP_HEX("GuestCtl2.VIP", (val & GUESTCTL2_VIP));
  }
}
Ejemplo n.º 24
0
/**
	@brief Flush memory range

	If the memory range is too big, we flush all entries with this ASID
*/
void local_flush_tlb_range(unsigned int asid, unsigned long start, unsigned long end)
{
	unsigned long size, flags;

	ENTER_CRITICAL(flags);
	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	size = (size + 1) >> 1;

	if (size <= current_cpu_data.tlbsize / 2)
	{
		int oldpid = read_c0_entryhi();
		int newpid = asid;

		start &= (PAGE_MASK << 1);
		end += ((PAGE_SIZE << 1) - 1);
		end &= (PAGE_MASK << 1);
		while (start < end) {
			int idx;

			write_c0_entryhi(start | newpid);
			start += (PAGE_SIZE << 1);
			mtc0_tlbw_hazard();
			tlb_probe();
			tlb_probe_hazard();
			idx = read_c0_index();
			write_c0_entrylo0(0);
			write_c0_entrylo1(0);
			if (idx < 0)
				continue;
			/* Make sure all entries differ. */
			write_c0_entryhi(UNIQUE_ENTRYHI(idx));
			mtc0_tlbw_hazard();
			tlb_write_indexed();
		}
		tlbw_use_hazard();
		write_c0_entryhi(oldpid);
	} 
	else
		local_flush_asid(asid);

	FLUSH_ITLB;
	EXIT_CRITICAL(flags);
}
Ejemplo n.º 25
0
void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
                           unsigned long end)
{
	int cpu = smp_processor_id();
	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%02x,%08lx,%08lx>]",
		       (mm->context & ASID_MASK), start, end);
#endif
		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		size = (size + 1) >> 1;
		if (size <= NTLB_ENTRIES_HALF) {
			int oldpid = (read_c0_entryhi() & ASID_MASK);
			int newpid = (cpu_context(smp_processor_id(), mm)
				      & ASID_MASK);

			start &= (PAGE_MASK << 1);
			end += ((PAGE_SIZE << 1) - 1);
			end &= (PAGE_MASK << 1);
			while(start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += (PAGE_SIZE << 1);
				tlb_probe();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entrylo1(0);
				write_c0_entryhi(KSEG0);
				if(idx < 0)
					continue;
				tlb_write_indexed();
			}
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		local_irq_restore(flags);
	}
Ejemplo n.º 26
0
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
			   unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;
	int cpu = smp_processor_id();

	if (cpu_context(cpu, mm) != 0) {
		unsigned long flags;
		int size;

#ifdef DEBUG_TLB
		printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
			cpu_context(cpu, mm) & ASID_MASK, start, end);
#endif
		local_irq_save(flags);
		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
		if (size <= current_cpu_data.tlbsize) {
			int oldpid = read_c0_entryhi() & ASID_MASK;
			int newpid = cpu_context(cpu, mm) & ASID_MASK;

			start &= PAGE_MASK;
			end += PAGE_SIZE - 1;
			end &= PAGE_MASK;
			while (start < end) {
				int idx;

				write_c0_entryhi(start | newpid);
				start += PAGE_SIZE;	/* BARRIER */
				tlb_probe();
				idx = read_c0_index();
				write_c0_entrylo0(0);
				write_c0_entryhi(KSEG0);
				if (idx < 0)		/* BARRIER */
					continue;
				tlb_write_indexed();
			}
			write_c0_entryhi(oldpid);
		} else {
			drop_mmu_context(mm, cpu);
		}
		local_irq_restore(flags);
	}
Ejemplo n.º 27
0
void
dump_cp0(char *key)
{
	if (key == NULL)
		key = "";

	print_cp0(key, 0, "INDEX   ", read_c0_index());
	print_cp0(key, 2, "ENTRYLO1", read_c0_entrylo0());
	print_cp0(key, 3, "ENTRYLO2", read_c0_entrylo1());
	print_cp0(key, 4, "CONTEXT ", read_c0_context());
	print_cp0(key, 5, "PAGEMASK", read_c0_pagemask());
	print_cp0(key, 6, "WIRED   ", read_c0_wired());
	//print_cp0(key, 8, "BADVADDR",  read_c0_badvaddr());
	print_cp0(key, 9, "COUNT   ", read_c0_count());
	print_cp0(key, 10, "ENTRYHI ", read_c0_entryhi());
	print_cp0(key, 11, "COMPARE ", read_c0_compare());
	print_cp0(key, 12, "STATUS  ", read_c0_status());
	print_cp0(key, 13, "CAUSE   ", read_c0_cause() & 0xffff87ff);
	print_cp0(key, 16, "CONFIG  ", read_c0_config());
	return;
}
void
dump_tlb_addr(unsigned long addr)
{
	unsigned long flags, oldpid;
	int index;

	local_irq_save(flags);
	oldpid = read_c0_entryhi() & 0xff;
	write_c0_entryhi((addr & PAGE_MASK) | oldpid);
	tlb_probe();
	index = read_c0_index();
	write_c0_entryhi(oldpid);
	local_irq_restore(flags);

	if (index < 0) {
		printk("No entry for address 0x%08lx in TLB\n", addr);
		return;
	}

	printk("Entry %d maps address 0x%08lx\n", index, addr);
	dump_tlb(index, index);
}
Ejemplo n.º 29
0
void local_flush_tlb_all(void)
{
	unsigned long flags;
	unsigned long old_ctx;
	int entry;

	local_irq_save(flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo(0);

	for (entry = 0; entry < TFP_TLB_SIZE; entry++) {
		write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT);
		write_c0_vaddr(entry << PAGE_SHIFT);
		write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
		mtc0_tlbw_hazard();
		tlb_write();
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	local_irq_restore(flags);
}
Ejemplo n.º 30
0
void local_flush_tlb_all(void)
{
	unsigned long flags, config6_flags;
	unsigned long old_ctx;
	int entry;

	ENTER_CRITICAL(flags);
	disable_pgwalker(config6_flags);
	/* Save old context and create impossible VPN2 value */
	old_ctx = read_c0_entryhi();
	write_c0_entrylo0(0);
	write_c0_entrylo1(0);

	entry = read_c0_wired();

#if defined(CONFIG_MAPPED_KERNEL)
	if (!entry) printk("[%s] flushing entry=%d in MAPPED_KERNEL mode!\n",
			   __FUNCTION__, entry);
#endif
	/* Blast 'em all away. */
	while (entry < current_cpu_data.tlbsize) {
		/* Make sure all entries differ. */
#ifndef CONFIG_NLM_VMIPS
		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
#else
        __write_64bit_c0_register($10, 0, (UNIQUE_VMIPS_ENTRYHI(entry)));
#endif
		write_c0_index(entry);
		mtc0_tlbw_hazard();
		tlb_write_indexed();
		entry++;
	}
	tlbw_use_hazard();
	write_c0_entryhi(old_ctx);
	FLUSH_ITLB;
	enable_pgwalker(config6_flags);
	EXIT_CRITICAL(flags);
}