Exemple #1
0
static void __clear_pmb_entry(struct pmb_entry *pmbe)
{
	unsigned long addr, data;
	unsigned long addr_val, data_val;

	addr = mk_pmb_addr(pmbe->entry);
	data = mk_pmb_data(pmbe->entry);

	addr_val = __raw_readl(addr);
	data_val = __raw_readl(data);

	/* Clear V-bit */
	writel_uncached(addr_val & ~PMB_V, addr);
	writel_uncached(data_val & ~PMB_V, data);
}
Exemple #2
0
void __init shx3_cache_init(void)
{
	unsigned int ccr;

	ccr = __raw_readl(CCR);

	/*
	 * If we've got cache aliases, resolve them in hardware.
	 */
	if (boot_cpu_data.dcache.n_aliases || boot_cpu_data.icache.n_aliases) {
		ccr |= CCR_CACHE_SNM;

		boot_cpu_data.icache.n_aliases = 0;
		boot_cpu_data.dcache.n_aliases = 0;

		pr_info("Enabling hardware synonym avoidance\n");
	}

#ifdef CONFIG_SMP
	/*
	 * Broadcast I-cache block invalidations by default.
	 */
	ccr |= CCR_CACHE_IBE;
#endif

	writel_uncached(ccr, CCR);
}
void __init shx3_cache_init(void)
{
	unsigned int ccr;

	ccr = __raw_readl(CCR);

	if (boot_cpu_data.dcache.n_aliases || boot_cpu_data.icache.n_aliases) {
		ccr |= CCR_CACHE_SNM;

		boot_cpu_data.icache.n_aliases = 0;
		boot_cpu_data.dcache.n_aliases = 0;

		pr_info("Enabling hardware synonym avoidance\n");
	}

#ifdef CONFIG_SMP
	ccr |= CCR_CACHE_IBE;
#endif

	writel_uncached(ccr, CCR);
}
Exemple #4
0
void __init pmb_init(void)
{
	/* Synchronize software state */
	pmb_synchronize();

	/* Attempt to combine compound mappings */
	pmb_coalesce();

#ifdef CONFIG_UNCACHED_MAPPING
	/* Resize initial mappings, if necessary */
	pmb_resize();
#endif

	/* Log them */
	pmb_notify();

	writel_uncached(0, PMB_IRMCR);

	/* Flush out the TLB */
	__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
	ctrl_barrier();
}
Exemple #5
0
/*
 * Sync our software copy of the PMB mappings with those in hardware. The
 * mappings in the hardware PMB were either set up by the bootloader or
 * very early on by the kernel.
 */
static void __init pmb_synchronize(void)
{
	struct pmb_entry *pmbp = NULL;
	int i, j;

	/*
	 * Run through the initial boot mappings, log the established
	 * ones, and blow away anything that falls outside of the valid
	 * PPN range. Specifically, we only care about existing mappings
	 * that impact the cached/uncached sections.
	 *
	 * Note that touching these can be a bit of a minefield; the boot
	 * loader can establish multi-page mappings with the same caching
	 * attributes, so we need to ensure that we aren't modifying a
	 * mapping that we're presently executing from, or may execute
	 * from in the case of straddling page boundaries.
	 *
	 * In the future we will have to tidy up after the boot loader by
	 * jumping between the cached and uncached mappings and tearing
	 * down alternating mappings while executing from the other.
	 */
	for (i = 0; i < NR_PMB_ENTRIES; i++) {
		unsigned long addr, data;
		unsigned long addr_val, data_val;
		unsigned long ppn, vpn, flags;
		unsigned long irqflags;
		unsigned int size;
		struct pmb_entry *pmbe;

		addr = mk_pmb_addr(i);
		data = mk_pmb_data(i);

		addr_val = __raw_readl(addr);
		data_val = __raw_readl(data);

		/*
		 * Skip over any bogus entries
		 */
		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
			continue;

		ppn = data_val & PMB_PFN_MASK;
		vpn = addr_val & PMB_PFN_MASK;

		/*
		 * Only preserve in-range mappings.
		 */
		if (!pmb_ppn_in_range(ppn)) {
			/*
			 * Invalidate anything out of bounds.
			 */
			writel_uncached(addr_val & ~PMB_V, addr);
			writel_uncached(data_val & ~PMB_V, data);
			continue;
		}

		/*
		 * Update the caching attributes if necessary
		 */
		if (data_val & PMB_C) {
			data_val &= ~PMB_CACHE_MASK;
			data_val |= pmb_cache_flags();

			writel_uncached(data_val, data);
		}

		size = data_val & PMB_SZ_MASK;
		flags = size | (data_val & PMB_CACHE_MASK);

		pmbe = pmb_alloc(vpn, ppn, flags, i);
		if (IS_ERR(pmbe)) {
			WARN_ON_ONCE(1);
			continue;
		}

		spin_lock_irqsave(&pmbe->lock, irqflags);

		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
			if (pmb_sizes[j].flag == size)
				pmbe->size = pmb_sizes[j].size;

		if (pmbp) {
			spin_lock(&pmbp->lock);

			/*
			 * Compare the previous entry against the current one to
			 * see if the entries span a contiguous mapping. If so,
			 * setup the entry links accordingly. Compound mappings
			 * are later coalesced.
			 */
			if (pmb_can_merge(pmbp, pmbe))
				pmbp->link = pmbe;

			spin_unlock(&pmbp->lock);
		}

		pmbp = pmbe;

		spin_unlock_irqrestore(&pmbe->lock, irqflags);
	}
}
Exemple #6
0
/*
 * Must be run uncached.
 */
static void __set_pmb_entry(struct pmb_entry *pmbe)
{
	writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
	writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
			mk_pmb_data(pmbe->entry));
}