Beispiel #1
0
char *arc_cache_mumbojumbo(int c, char *buf, int len)
{
	int n = 0;
	struct cpuinfo_arc_cache *p;

#define IS_USED_RUN(v)		((v) ? "" : "(disabled) ")
#define PR_CACHE(p, cfg, str)						\
	if (!(p)->ver)							\
		n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");	\
	else								\
		n += scnprintf(buf + n, len - n,			\
			str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",	\
			(p)->sz_k, (p)->assoc, (p)->line_len,		\
			(p)->vipt ? "VIPT" : "PIPT",			\
			(p)->alias ? " aliasing" : "",			\
			IS_ENABLED(cfg) ? "" : " (not used)");

	PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
	PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");

	if (!is_isa_arcv2())
                return buf;

	p = &cpuinfo_arc700[c].slc;
	if (p->ver)
		n += scnprintf(buf + n, len - n,
			       "SLC\t\t: %uK, %uB Line%s\n",
			       p->sz_k, p->line_len, IS_USED_RUN(slc_enable));

	if (ioc_exists)
		n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
				IS_USED_RUN(ioc_enable));

	return buf;
}
Beispiel #2
0
/*
 * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
 *   -Prints 3 regs per line and a CR.
 *   -To continue, callee regs right after scratch, special handling of CR
 */
static noinline void print_reg_file(long *reg_rev, int start_num)
{
	unsigned int i;
	char buf[512];
	int n = 0, len = sizeof(buf);

	for (i = start_num; i < start_num + 13; i++) {
		n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
			       i, (unsigned long)*reg_rev);

		if (((i + 1) % 3) == 0)
			n += scnprintf(buf + n, len - n, "\n");

		/* because pt_regs has regs reversed: r12..r0, r25..r13 */
		if (is_isa_arcv2() && start_num == 0)
			reg_rev++;
		else
			reg_rev--;
	}

	if (start_num != 0)
		n += scnprintf(buf + n, len - n, "\n\n");

	/* To continue printing callee regs on same line as scratch regs */
	if (start_num == 0)
		pr_info("%s", buf);
	else
		pr_cont("%s\n", buf);
}
Beispiel #3
0
void read_decode_cache_bcr(void)
{
	struct cpuinfo_arc_cache *p_ic, *p_dc;
	unsigned int cpu = smp_processor_id();
	struct bcr_cache {
#ifdef CONFIG_CPU_BIG_ENDIAN
		unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
#else
		unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
#endif
	} ibcr, dbcr;

	p_ic = &cpuinfo_arc700[cpu].icache;
	READ_BCR(ARC_REG_IC_BCR, ibcr);

	if (!ibcr.ver)
		goto dc_chk;

	if (ibcr.ver <= 3) {
		BUG_ON(ibcr.config != 3);
		p_ic->assoc = 2;		/* Fixed to 2w set assoc */
	} else if (ibcr.ver >= 4) {
		p_ic->assoc = 1 << ibcr.config;	/* 1,2,4,8 */
	}

	p_ic->line_len = 8 << ibcr.line_len;
	p_ic->sz_k = 1 << (ibcr.sz - 1);
	p_ic->ver = ibcr.ver;
	p_ic->vipt = 1;
	p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;

dc_chk:
	p_dc = &cpuinfo_arc700[cpu].dcache;
	READ_BCR(ARC_REG_DC_BCR, dbcr);

	if (!dbcr.ver)
		goto slc_chk;

	if (dbcr.ver <= 3) {
		BUG_ON(dbcr.config != 2);
		p_dc->assoc = 4;		/* Fixed to 4w set assoc */
		p_dc->vipt = 1;
		p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
	} else if (dbcr.ver >= 4) {
		p_dc->assoc = 1 << dbcr.config;	/* 1,2,4,8 */
		p_dc->vipt = 0;
		p_dc->alias = 0;		/* PIPT so can't VIPT alias */
	}

	p_dc->line_len = 16 << dbcr.line_len;
	p_dc->sz_k = 1 << (dbcr.sz - 1);
	p_dc->ver = dbcr.ver;

slc_chk:
	if (is_isa_arcv2())
                read_decode_cache_bcr_arcv2(cpu);
}
Beispiel #4
0
static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
{
	int n = 0;
	struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];

	FIX_PTR(cpu);

	n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);

	if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
		n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
			       IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
			       IS_AVAIL1(cpu->extn.fpu_dp, "DP "));

	if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
		n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
			       IS_AVAIL1(cpu->extn.smart, "smaRT "),
			       IS_AVAIL1(cpu->extn.rtt, "RTT "));
		if (cpu->extn.ap_num) {
			n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
				       cpu->extn.ap_num,
				       cpu->extn.ap_full ? "full":"min");
		}
		n += scnprintf(buf + n, len - n, "\n");
	}

	if (cpu->dccm.sz || cpu->iccm.sz)
		n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
			       cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
			       cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));

	if (is_isa_arcv2()) {

		/* Error Protection: ECC/Parity */
		struct bcr_erp erp;
		READ_BCR(ARC_REG_ERP_BUILD, erp);

		if (erp.ver) {
			struct  ctl_erp ctl;
			READ_BCR(ARC_REG_ERP_CTRL, ctl);

			/* inverted bits: 0 means enabled */
			n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
				IS_AVAIL3(erp.ic,  !ctl.dpi, "IC "),
				IS_AVAIL3(erp.dc,  !ctl.dpd, "DC "),
				IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
		}
	}

	n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
			EF_ARC_OSABI_CURRENT >> 8,
			EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
			"no-legacy-syscalls" : "64-bit data any register aligned");

	return buf;
}
Beispiel #5
0
void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
		       dma_addr_t dma_handle)
{
	if (is_isa_arcv2() && ioc_exists)
		return dma_free_noncoherent(dev, size, kvaddr, dma_handle);

	iounmap((void __force __iomem *)kvaddr);

	free_pages_exact((void *)dma_handle, size);
}
Beispiel #6
0
void arc_cache_init(void)
{
	unsigned int __maybe_unused cpu = smp_processor_id();
	char str[256];

	printk(arc_cache_mumbojumbo(0, str, sizeof(str)));

	if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
		struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;

		if (!ic->ver)
			panic("cache support enabled but non-existent cache\n");

		if (ic->line_len != L1_CACHE_BYTES)
			panic("ICache line [%d] != kernel Config [%d]",
			      ic->line_len, L1_CACHE_BYTES);

		if (ic->ver != CONFIG_ARC_MMU_VER)
			panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
			      ic->ver, CONFIG_ARC_MMU_VER);

		/*
		 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG
		 * pair to provide vaddr/paddr respectively, just as in MMU v3
		 */
		if (is_isa_arcv2() && ic->alias)
			_cache_line_loop_ic_fn = __cache_line_loop_v3;
		else
			_cache_line_loop_ic_fn = __cache_line_loop;
	}

	if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
		struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;

		if (!dc->ver)
			panic("cache support enabled but non-existent cache\n");

		if (dc->line_len != L1_CACHE_BYTES)
			panic("DCache line [%d] != kernel Config [%d]",
			      dc->line_len, L1_CACHE_BYTES);

		/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
		if (is_isa_arcompact()) {
			int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);

			if (dc->alias && !handled)
				panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
			else if (!dc->alias && handled)
				panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
		}
	}
}
Beispiel #7
0
static void arc_chk_core_config(void)
{
	struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
	int fpu_enabled;

	if (!cpu->extn.timer0)
		panic("Timer0 is not present!\n");

	if (!cpu->extn.timer1)
		panic("Timer1 is not present!\n");

	if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc)
		panic("RTC is not present\n");

#ifdef CONFIG_ARC_HAS_DCCM
	/*
	 * DCCM can be arbit placed in hardware.
	 * Make sure it's placement/sz matches what Linux is built with
	 */
	if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
		panic("Linux built with incorrect DCCM Base address\n");

	if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
		panic("Linux built with incorrect DCCM Size\n");
#endif

#ifdef CONFIG_ARC_HAS_ICCM
	if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
		panic("Linux built with incorrect ICCM Size\n");
#endif

	/*
	 * FP hardware/software config sanity
	 * -If hardware contains DPFP, kernel needs to save/restore FPU state
	 * -If not, it will crash trying to save/restore the non-existant regs
	 *
	 * (only DPDP checked since SP has no arch visible regs)
	 */
	fpu_enabled = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);

	if (cpu->extn.fpu_dp && !fpu_enabled)
		pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
	else if (!cpu->extn.fpu_dp && fpu_enabled)
		panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");

	if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
	    IS_ENABLED(CONFIG_ARC_HAS_LLSC) &&
	    !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
		panic("llock/scond livelock workaround missing\n");
}
Beispiel #8
0
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, struct dma_attrs *attrs)
{
	struct page *page = virt_to_page(dma_handle);
	int is_non_coh = 1;

	is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
			(is_isa_arcv2() && ioc_exists);

	if (PageHighMem(page) || !is_non_coh)
		iounmap((void __force __iomem *)vaddr);

	__free_pages(page, get_order(size));
}
Beispiel #9
0
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
		dma_addr_t dma_handle, unsigned long attrs)
{
	phys_addr_t paddr = dma_handle;
	struct page *page = virt_to_page(paddr);
	int is_non_coh = 1;

	is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
			(is_isa_arcv2() && ioc_enable);

	if (PageHighMem(page) || !is_non_coh)
		iounmap((void __force __iomem *)vaddr);

	__free_pages(page, get_order(size));
}
Beispiel #10
0
void *dma_alloc_coherent(struct device *dev, size_t size,
			 dma_addr_t *dma_handle, gfp_t gfp)
{
	void *paddr, *kvaddr;

	/*
	 * IOC relies on all data (even coherent DMA data) being in cache
	 * Thus allocate normal cached memory
	 *
	 * The gains with IOC are two pronged:
	 *   -For streaming data, elides needs for cache maintenance, saving
	 *    cycles in flush code, and bus bandwidth as all the lines of a
	 *    buffer need to be flushed out to memory
	 *   -For coherent data, Read/Write to buffers terminate early in cache
	 *   (vs. always going to memory - thus are faster)
	 */
	if (is_isa_arcv2() && ioc_exists)
		return dma_alloc_noncoherent(dev, size, dma_handle, gfp);

	/* This is linear addr (0x8000_0000 based) */
	paddr = alloc_pages_exact(size, gfp);
	if (!paddr)
		return NULL;

	/* This is kernel Virtual address (0x7000_0000 based) */
	kvaddr = ioremap_nocache((unsigned long)paddr, size);
	if (kvaddr == NULL)
		return NULL;

	/* This is bus address, platform dependent */
	*dma_handle = (dma_addr_t)paddr;

	/*
	 * Evict any existing L1 and/or L2 lines for the backing page
	 * in case it was used earlier as a normal "cached" page.
	 * Yeah this bit us - STAR 9000898266
	 *
	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
	 * can't be used to efficiently flush L1 and/or L2 which need paddr
	 * Currently flush_cache_vmap nukes the L1 cache completely which
	 * will be optimized as a separate commit
	 */
	dma_cache_wback_inv((unsigned long)paddr, size);

	return kvaddr;
}
Beispiel #11
0
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp, unsigned long attrs)
{
	unsigned long order = get_order(size);
	struct page *page;
	phys_addr_t paddr;
	void *kvaddr;
	int need_coh = 1, need_kvaddr = 0;

	page = alloc_pages(gfp, order);
	if (!page)
		return NULL;

	/*
	 * IOC relies on all data (even coherent DMA data) being in cache
	 * Thus allocate normal cached memory
	 *
	 * The gains with IOC are two pronged:
	 *   -For streaming data, elides need for cache maintenance, saving
	 *    cycles in flush code, and bus bandwidth as all the lines of a
	 *    buffer need to be flushed out to memory
	 *   -For coherent data, Read/Write to buffers terminate early in cache
	 *   (vs. always going to memory - thus are faster)
	 */
	if ((is_isa_arcv2() && ioc_enable) ||
	    (attrs & DMA_ATTR_NON_CONSISTENT))
		need_coh = 0;

	/*
	 * - A coherent buffer needs MMU mapping to enforce non-cachability
	 * - A highmem page needs a virtual handle (hence MMU mapping)
	 *   independent of cachability
	 */
	if (PageHighMem(page) || need_coh)
		need_kvaddr = 1;

	/* This is linear addr (0x8000_0000 based) */
	paddr = page_to_phys(page);

	*dma_handle = paddr;

	/* This is kernel Virtual address (0x7000_0000 based) */
	if (need_kvaddr) {
		kvaddr = ioremap_nocache(paddr, size);
		if (kvaddr == NULL) {
			__free_pages(page, order);
			return NULL;
		}
	} else {
		kvaddr = (void *)(u32)paddr;
	}

	/*
	 * Evict any existing L1 and/or L2 lines for the backing page
	 * in case it was used earlier as a normal "cached" page.
	 * Yeah this bit us - STAR 9000898266
	 *
	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
	 * can't be used to efficiently flush L1 and/or L2 which need paddr
	 * Currently flush_cache_vmap nukes the L1 cache completely which
	 * will be optimized as a separate commit
	 */
	if (need_coh)
		dma_cache_wback_inv(paddr, size);

	return kvaddr;
}
Beispiel #12
0
static inline int need_slc_flush(void)
{
	return is_isa_arcv2() && l2_line_sz;
}
Beispiel #13
0
static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
	struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
	struct bcr_identity *core = &cpu->core;
	int i, n = 0, ua = 0;

	FIX_PTR(cpu);

	n += scnprintf(buf + n, len - n,
		       "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
		       core->family, core->cpu_id, core->chip_id);

	n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
		       cpu_id, cpu->name, cpu->details,
		       is_isa_arcompact() ? "ARCompact" : "ARCv2",
		       IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
		       IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));

	n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
		       IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
		       IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
		       IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
		       IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));

#ifdef __ARC_UNALIGNED__
	ua = 1;
#endif
	n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
			   IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
			   IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
			   IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));

	if (i)
		n += scnprintf(buf + n, len - n, "\n\t\t: ");

	if (cpu->extn_mpy.ver) {
		if (cpu->extn_mpy.ver <= 0x2) {	/* ARCompact */
			n += scnprintf(buf + n, len - n, "mpy ");
		} else {
			int opt = 2;	/* stock MPY/MPYH */

			if (cpu->extn_mpy.dsp)	/* OPT 7-9 */
				opt = cpu->extn_mpy.dsp + 6;

			n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
		}
	}

	n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
		       IS_AVAIL1(cpu->isa.div_rem, "div_rem "),
		       IS_AVAIL1(cpu->extn.norm, "norm "),
		       IS_AVAIL1(cpu->extn.barrel, "barrel-shift "),
		       IS_AVAIL1(cpu->extn.swap, "swap "),
		       IS_AVAIL1(cpu->extn.minmax, "minmax "),
		       IS_AVAIL1(cpu->extn.crc, "crc "),
		       IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE));

	if (cpu->bpu.ver)
		n += scnprintf(buf + n, len - n,
			      "BPU\t\t: %s%s match, cache:%d, Predict Table:%d",
			      IS_AVAIL1(cpu->bpu.full, "full"),
			      IS_AVAIL1(!cpu->bpu.full, "partial"),
			      cpu->bpu.num_cache, cpu->bpu.num_pred);

	if (is_isa_arcv2()) {
		struct bcr_lpb lpb;

		READ_BCR(ARC_REG_LPB_BUILD, lpb);
		if (lpb.ver) {
			unsigned int ctl;
			ctl = read_aux_reg(ARC_REG_LPB_CTRL);

			n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
				lpb.entries,
				IS_DISABLED_RUN(!ctl));
		}
	}

	n += scnprintf(buf + n, len - n, "\n");
	return buf;
}