Ejemplo n.º 1
0
void flush_cache_all(void)
{
	unsigned long flags;

	local_irq_save(flags);
	jump_to_P2();

	cache_wback_all();
	back_to_P1();
	local_irq_restore(flags);
}
Ejemplo n.º 2
0
/*
 * Writeback&Invalidate the D-cache of the page
 */
static void __flush_dcache_page(unsigned long phys)
{
	unsigned long ways, waysize, addrstart;
	unsigned long flags;

	phys |= SH_CACHE_VALID;

	/*
	 * Here, phys is the physical address of the page. We check all the
	 * tags in the cache for those with the same page number as this page
	 * (by masking off the lowest 2 bits of the 19-bit tag; these bits are
	 * derived from the offset within in the 4k page). Matching valid
	 * entries are invalidated.
	 *
	 * Since 2 bits of the cache index are derived from the virtual page
	 * number, knowing this would reduce the number of cache entries to be
	 * searched by a factor of 4. However this function exists to deal with
	 * potential cache aliasing, therefore the optimisation is probably not
	 * possible.
	 */
	local_irq_save(flags);
	jump_to_P2();

	ways = cpu_data->dcache.ways;
	waysize = cpu_data->dcache.sets;
	waysize <<= cpu_data->dcache.entry_shift;

	addrstart = CACHE_OC_ADDRESS_ARRAY;

	do {
		unsigned long addr;

		for (addr = addrstart;
		     addr < addrstart + waysize;
		     addr += cpu_data->dcache.linesz) {
			unsigned long data;

			data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
		        if (data == phys) {
				data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED);
				ctrl_outl(data, addr);
			}
		}

		addrstart += cpu_data->dcache.way_incr;
	} while (--ways);

	back_to_P1();
	local_irq_restore(flags);
}
Ejemplo n.º 3
0
static void __init
detect_cpu_and_cache_system(void)
{
	unsigned long addr0, addr1, data0, data1, data2, data3;

	jump_to_P2();
	/*
	 * Check if the entry shadows or not.
	 * When shadowed, it's 128-entry system.
	 * Otherwise, it's 256-entry system.
	 */
	addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
	addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);

	/* First, write back & invalidate */
	data0  = ctrl_inl(addr0);
	ctrl_outl(data0&~(CACHE_VALID|CACHE_UPDATED), addr0);
	data1  = ctrl_inl(addr1);
	ctrl_outl(data1&~(CACHE_VALID|CACHE_UPDATED), addr1);

	/* Next, check if there's shadow or not */
	data0 = ctrl_inl(addr0);
	data0 ^= CACHE_VALID;
	ctrl_outl(data0, addr0);
	data1 = ctrl_inl(addr1);
	data2 = data1 ^ CACHE_VALID;
	ctrl_outl(data2, addr1);
	data3 = ctrl_inl(addr0);

	/* Lastly, invaliate them. */
	ctrl_outl(data0&~CACHE_VALID, addr0);
	ctrl_outl(data2&~CACHE_VALID, addr1);
	back_to_P1();

	if (data0 == data1 && data2 == data3) {	/* Shadow */
		cache_system_info.way_shift = 11;
		cache_system_info.entry_mask = 0x7f0;
		cache_system_info.num_entries = 128;
		cpu_data->type = CPU_SH7708;
	} else {				/* 7709A or 7729  */
		cache_system_info.way_shift = 12;
		cache_system_info.entry_mask = 0xff0;
		cache_system_info.num_entries = 256;
		cpu_data->type = CPU_SH7729;
	}
}
Ejemplo n.º 4
0
int cache_control(unsigned int cmd)
{
	unsigned long ccr;

	jump_to_P2();
	ccr = inl(CCR);

	if (ccr & CCR_CACHE_ENABLE)
		cache_wback_all();

	if (cmd == CACHE_DISABLE)
		outl(CCR_CACHE_STOP, CCR);
	else
		outl(CCR_CACHE_INIT, CCR);
	back_to_P1();

	return 0;
}
Ejemplo n.º 5
0
static inline void cache_wback_all(void)
{
	unsigned long addr, data, i, j;

	jump_to_P2();
	for (i = 0; i < CACHE_OC_NUM_ENTRIES; i++){
		for (j = 0; j < CACHE_OC_NUM_WAYS; j++) {
			addr = CACHE_OC_ADDRESS_ARRAY | (j << CACHE_OC_WAY_SHIFT)
				| (i << CACHE_OC_ENTRY_SHIFT);
			data = inl(addr);
			if (data & CACHE_UPDATED) {
				data &= ~CACHE_UPDATED;
				outl(data, addr);
			}
		}
	}
	back_to_P1();
}
Ejemplo n.º 6
0
void __init cache_init(void)
{
	unsigned long ccr;

	detect_cpu_and_cache_system();

	jump_to_P2();
	ccr = ctrl_inl(CCR);
	if (ccr & CCR_CACHE_CE)
		/*
		 * XXX: Should check RA here. 
		 * If RA was 1, we only need to flush the half of the caches.
		 */
		cache_wback_all();

	ctrl_outl(CCR_CACHE_INIT, CCR);
	back_to_P1();
}
Ejemplo n.º 7
0
int __init detect_cpu_and_cache_system(void)
{
	unsigned long addr0, addr1, data0, data1, data2, data3;

	jump_to_P2();
	/*
	 * Check if the entry shadows or not.
	 * When shadowed, it's 128-entry system.
	 * Otherwise, it's 256-entry system.
	 */
	addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12);
	addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12);

	/* First, write back & invalidate */
	data0  = ctrl_inl(addr0);
	ctrl_outl(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0);
	data1  = ctrl_inl(addr1);
	ctrl_outl(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1);

	/* Next, check if there's shadow or not */
	data0 = ctrl_inl(addr0);
	data0 ^= SH_CACHE_VALID;
	ctrl_outl(data0, addr0);
	data1 = ctrl_inl(addr1);
	data2 = data1 ^ SH_CACHE_VALID;
	ctrl_outl(data2, addr1);
	data3 = ctrl_inl(addr0);

	/* Lastly, invaliate them. */
	ctrl_outl(data0&~SH_CACHE_VALID, addr0);
	ctrl_outl(data2&~SH_CACHE_VALID, addr1);

	back_to_P1();

	cpu_data->dcache.ways		= 4;
	cpu_data->dcache.entry_shift	= 4;
	cpu_data->dcache.linesz		= L1_CACHE_BYTES;
	cpu_data->dcache.flags		= 0;

	/*
	 * 7709A/7729 has 16K cache (256-entry), while 7702 has only
	 * 2K(direct) 7702 is not supported (yet)
	 */
	if (data0 == data1 && data2 == data3) {	/* Shadow */
		cpu_data->dcache.way_incr	= (1 << 11);
		cpu_data->dcache.entry_mask	= 0x7f0;
		cpu_data->dcache.sets		= 128;
		cpu_data->type = CPU_SH7708;

		cpu_data->flags |= CPU_HAS_MMU_PAGE_ASSOC;
	} else {				/* 7709A or 7729  */
		cpu_data->dcache.way_incr	= (1 << 12);
		cpu_data->dcache.entry_mask	= 0xff0;
		cpu_data->dcache.sets		= 256;
		cpu_data->type = CPU_SH7729;

#if defined(CONFIG_CPU_SUBTYPE_SH7705)
		cpu_data->type = CPU_SH7705;

#if defined(CONFIG_SH7705_CACHE_32KB)
		cpu_data->dcache.way_incr	= (1 << 13);
		cpu_data->dcache.entry_mask	= 0x1ff0;
		cpu_data->dcache.sets		= 512;
		ctrl_outl(CCR_CACHE_32KB, CCR3);
#else
		ctrl_outl(CCR_CACHE_16KB, CCR3);
#endif
#endif
	}

	/*
	 * SH-3 doesn't have separate caches
	 */
	cpu_data->dcache.flags |= SH_CACHE_COMBINED;
	cpu_data->icache = cpu_data->dcache;

	return 0;
}