示例#1
0
/*
 * Writeback&Invalidate the D-cache of the page
 */
static void __flush_dcache_page(unsigned long phys)
{
	unsigned long ways, waysize, addrstart;
	unsigned long flags;

	phys |= SH_CACHE_VALID;

	/*
	 * Here, phys is the physical address of the page. We check all the
	 * tags in the cache for those with the same page number as this page
	 * (by masking off the lowest 2 bits of the 19-bit tag; these bits are
	 * derived from the offset within in the 4k page). Matching valid
	 * entries are invalidated.
	 *
	 * Since 2 bits of the cache index are derived from the virtual page
	 * number, knowing this would reduce the number of cache entries to be
	 * searched by a factor of 4. However this function exists to deal with
	 * potential cache aliasing, therefore the optimisation is probably not
	 * possible.
	 */
	local_irq_save(flags);
	jump_to_uncached();

	ways = current_cpu_data.dcache.ways;
	waysize = current_cpu_data.dcache.sets;
	waysize <<= current_cpu_data.dcache.entry_shift;

	addrstart = CACHE_OC_ADDRESS_ARRAY;

	do {
		unsigned long addr;

		for (addr = addrstart;
		     addr < addrstart + waysize;
		     addr += current_cpu_data.dcache.linesz) {
			unsigned long data;

			data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
		        if (data == phys) {
				data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED);
				ctrl_outl(data, addr);
			}
		}

		addrstart += current_cpu_data.dcache.way_incr;
	} while (--ways);

	back_to_cached();
	local_irq_restore(flags);
}
示例#2
0
void __flush_purge_region(void *start, int size)
{
	unsigned long v;
	unsigned long begin, end;
	unsigned long flags;

	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
		& ~(L1_CACHE_BYTES-1);

	local_irq_save(flags);
	jump_to_uncached();

	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
		ctrl_outl((v & CACHE_PHYSADDR_MASK),
			  CACHE_OC_ADDRESS_ARRAY | (v & 0x000007f0) | 0x00000008);
	}
	back_to_cached();
	local_irq_restore(flags);
}
示例#3
0
文件: pmb.c 项目: AppEngine/linux-2.6
static int __uses_jump_to_uncached pmb_init(void)
{
	unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
	unsigned int entry, i;

	BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES));

	pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
				      SLAB_PANIC, pmb_cache_ctor);

	jump_to_uncached();

	/*
	 * Ordering is important, P2 must be mapped in the PMB before we
	 * can set PMB.SE, and P1 must be mapped before we jump back to
	 * P1 space.
	 */
	for (entry = 0; entry < nr_entries; entry++) {
		struct pmb_entry *pmbe = pmb_init_map + entry;

		__set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry);
	}

	ctrl_outl(0, PMB_IRMCR);

	/* PMB.SE and UB[7] */
	ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR);

	/* Flush out the TLB */
	i =  ctrl_inl(MMUCR);
	i |= MMUCR_TI;
	ctrl_outl(i, MMUCR);

	back_to_cached();

	return 0;
}
static int tlb_seq_show(struct seq_file *file, void *iter)
{
	unsigned int tlb_type = (unsigned int)file->private;
	unsigned long addr1, addr2, data1, data2;
	unsigned long flags;
	unsigned long mmucr;
	unsigned int nentries, entry;
	unsigned int urb;

	mmucr = __raw_readl(MMUCR);
	if ((mmucr & 0x1) == 0) {
		seq_printf(file, "address translation disabled\n");
		return 0;
	}

	if (tlb_type == TLB_TYPE_ITLB) {
		addr1 = MMU_ITLB_ADDRESS_ARRAY;
		addr2 = MMU_ITLB_ADDRESS_ARRAY2;
		data1 = MMU_ITLB_DATA_ARRAY;
		data2 = MMU_ITLB_DATA_ARRAY2;
		nentries = 4;
	} else {
		addr1 = MMU_UTLB_ADDRESS_ARRAY;
		addr2 = MMU_UTLB_ADDRESS_ARRAY2;
		data1 = MMU_UTLB_DATA_ARRAY;
		data2 = MMU_UTLB_DATA_ARRAY2;
		nentries = 64;
	}

	local_irq_save(flags);
	jump_to_uncached();

	urb = (mmucr & MMUCR_URB) >> MMUCR_URB_SHIFT;

	/* Make the "entry >= urb" test fail. */
	if (urb == 0)
		urb = MMUCR_URB_NENTRIES + 1;

	if (tlb_type == TLB_TYPE_ITLB) {
		addr1 = MMU_ITLB_ADDRESS_ARRAY;
		addr2 = MMU_ITLB_ADDRESS_ARRAY2;
		data1 = MMU_ITLB_DATA_ARRAY;
		data2 = MMU_ITLB_DATA_ARRAY2;
		nentries = 4;
	} else {
		addr1 = MMU_UTLB_ADDRESS_ARRAY;
		addr2 = MMU_UTLB_ADDRESS_ARRAY2;
		data1 = MMU_UTLB_DATA_ARRAY;
		data2 = MMU_UTLB_DATA_ARRAY2;
		nentries = 64;
	}

	seq_printf(file, "entry:     vpn        ppn     asid  size valid wired\n");

	for (entry = 0; entry < nentries; entry++) {
		unsigned long vpn, ppn, asid, size;
		unsigned long valid;
		unsigned long val;
		const char *sz = "    ?";
		int i;

		val = __raw_readl(addr1 | (entry << MMU_TLB_ENTRY_SHIFT));
		ctrl_barrier();
		vpn = val & 0xfffffc00;
		valid = val & 0x100;

		val = __raw_readl(addr2 | (entry << MMU_TLB_ENTRY_SHIFT));
		ctrl_barrier();
		asid = val & MMU_CONTEXT_ASID_MASK;

		val = __raw_readl(data1 | (entry << MMU_TLB_ENTRY_SHIFT));
		ctrl_barrier();
		ppn = (val & 0x0ffffc00) << 4;

		val = __raw_readl(data2 | (entry << MMU_TLB_ENTRY_SHIFT));
		ctrl_barrier();
		size = (val & 0xf0) >> 4;

		for (i = 0; i < ARRAY_SIZE(tlb_sizes); i++) {
			if (tlb_sizes[i].bits == size)
				break;
		}

		if (i != ARRAY_SIZE(tlb_sizes))
			sz = tlb_sizes[i].size;

		seq_printf(file, "%2d:    0x%08lx 0x%08lx %5lu %s   %s     %s\n",
			   entry, vpn, ppn, asid,
			   sz, valid ? "V" : "-",
			   (urb <= entry) ? "W" : "-");
	}

	back_to_cached();
	local_irq_restore(flags);

	return 0;
}
int __init __uses_jump_to_uncached detect_cpu_and_cache_system(void)
{
	unsigned long addr0, addr1, data0, data1, data2, data3;

	jump_to_uncached();
	/*
	 * Check if the entry shadows or not.
	 * When shadowed, it's 128-entry system.
	 * Otherwise, it's 256-entry system.
	 */
	addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
	addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);

	/* First, write back & invalidate */
	data0  = ctrl_inl(addr0);
	ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
	data1  = ctrl_inl(addr1);
	ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);

	/* Next, check if there's shadow or not */
	data0 = ctrl_inl(addr0);
	data0 ^= SH_CACHE_VALID;
	ctrl_outl(data0, addr0);
	data1 = ctrl_inl(addr1);
	data2 = data1 ^ SH_CACHE_VALID;
	ctrl_outl(data2, addr1);
	data3 = ctrl_inl(addr0);

	/* Lastly, invaliate them. */
	ctrl_outl(data0&~SH_CACHE_VALID, addr0);
	ctrl_outl(data2&~SH_CACHE_VALID, addr1);

	back_to_cached();

	boot_cpu_data.dcache.ways		= 4;
	boot_cpu_data.dcache.entry_shift	= 4;
	boot_cpu_data.dcache.linesz		= L1_CACHE_BYTES;
	boot_cpu_data.dcache.flags		= 0;

	/*
	 * 7709A/7729 has 16K cache (256-entry), while 7702 has only
	 * 2K(direct) 7702 is not supported (yet)
	 */
	if (data0 == data1 && data2 == data3) {	/* Shadow */
		boot_cpu_data.dcache.way_incr	= (1 << 11);
		boot_cpu_data.dcache.entry_mask	= 0x7f0;
		boot_cpu_data.dcache.sets	= 128;
		boot_cpu_data.type = CPU_SH7708;

		boot_cpu_data.flags |= CPU_HAS_MMU_PAGE_ASSOC;
	} else {				/* 7709A or 7729  */
		boot_cpu_data.dcache.way_incr	= (1 << 12);
		boot_cpu_data.dcache.entry_mask	= 0xff0;
		boot_cpu_data.dcache.sets	= 256;
		boot_cpu_data.type = CPU_SH7729;

#if defined(CONFIG_CPU_SUBTYPE_SH7706)
		boot_cpu_data.type = CPU_SH7706;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7710)
		boot_cpu_data.type = CPU_SH7710;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7712)
		boot_cpu_data.type = CPU_SH7712;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7720)
		boot_cpu_data.type = CPU_SH7720;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7705)
		boot_cpu_data.type = CPU_SH7705;

#if defined(CONFIG_SH7705_CACHE_32KB)
		boot_cpu_data.dcache.way_incr	= (1 << 13);
		boot_cpu_data.dcache.entry_mask	= 0x1ff0;
		boot_cpu_data.dcache.sets	= 512;
		ctrl_outl(CCR_CACHE_32KB, CCR3);
#else
		ctrl_outl(CCR_CACHE_16KB, CCR3);
#endif
#endif
	}

	/*
	 * SH-3 doesn't have separate caches
	 */
	boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
	boot_cpu_data.icache = boot_cpu_data.dcache;

	return 0;
}