Esempio n. 1
0
File: smp.c Progetto: Mellanox/linux
static void eznps_init_core(unsigned int cpu)
{
	u32 sync_value;
	struct nps_host_reg_aux_hw_comply hw_comply;
	struct nps_host_reg_aux_lpc lpc;

	if (NPS_CPU_TO_THREAD_NUM(cpu) != 0)
		return;

	hw_comply.value = read_aux_reg(AUX_REG_HW_COMPLY);
	hw_comply.me  = 1;
	hw_comply.le  = 1;
#ifdef CONFIG_EZNPS_SHARED_TIMER
	hw_comply.te  = 1;
#endif
	write_aux_reg(AUX_REG_HW_COMPLY, hw_comply.value);

	/* Enable MMU clock */
	lpc.mep = 1;
	write_aux_reg(CTOP_AUX_LPC, lpc.value);

	/* Boot CPU only */
	if (!cpu) {
		/* Write to general purpose register in CRG */
		sync_value = ioread32be(REG_GEN_PURP_0);
		sync_value |= NPS_CRG_SYNC_BIT;
		iowrite32be(sync_value, REG_GEN_PURP_0);
	}
}
Esempio n. 2
0
/*
 * Arm the timer to interrupt after @limit cycles
 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
 */
static void arc_timer_event_setup(unsigned int limit)
{
	write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
	write_aux_reg(ARC_REG_TIMER0_CNT, 0);	/* start from 0 */

	write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
}
Esempio n. 3
0
static void tlb_entry_insert(unsigned int pd0, unsigned int pd1)
{
	unsigned int idx;

	/*
	 * First verify if entry for this vaddr+ASID already exists
	 * This also sets up PD0 (vaddr, ASID..) for final commit
	 */
	idx = tlb_entry_lkup(pd0);

	/*
	 * If Not already present get a free slot from MMU.
	 * Otherwise, Probe would have located the entry and set INDEX Reg
	 * with existing location. This will cause Write CMD to over-write
	 * existing entry with new PD0 and PD1
	 */
	if (likely(idx & TLB_LKUP_ERR))
		write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);

	/* setup the other half of TLB entry (pfn, rwx..) */
	write_aux_reg(ARC_REG_TLBPD1, pd1);

	/*
	 * Commit the Entry to MMU
	 * It doesnt sound safe to use the TLBWriteNI cmd here
	 * which doesn't flush uTLBs. I'd rather be safe than sorry.
	 */
	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
}
Esempio n. 4
0
void arc_mmu_init(void)
{
	char str[256];
	struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;

	printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));

	/* For efficiency sake, kernel is compile time built for a MMU ver
	 * This must match the hardware it is running on.
	 * Linux built for MMU V2, if run on MMU V1 will break down because V1
	 *  hardware doesn't understand cmds such as WriteNI, or IVUTLB
	 * On the other hand, Linux built for V1 if run on MMU V2 will do
	 *   un-needed workarounds to prevent memcpy thrashing.
	 * Similarly MMU V3 has new features which won't work on older MMU
	 */
	if (mmu->ver != CONFIG_ARC_MMU_VER) {
		panic("MMU ver %d doesn't match kernel built for %d...\n",
		      mmu->ver, CONFIG_ARC_MMU_VER);
	}

	if (mmu->pg_sz != PAGE_SIZE)
		panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));

	/* Enable the MMU */
	write_aux_reg(ARC_REG_PID, MMU_ENABLE);

	/* In smp we use this reg for interrupt 1 scratch */
#ifndef CONFIG_SMP
	/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
	write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
#endif
}
Esempio n. 5
0
/*
 * Early Hardware specific Interrupt setup
 * -Called very early (start_kernel -> setup_arch -> setup_processor)
 * -Platform Independent (must for any ARC Core)
 * -Needed for each CPU (hence not foldable into init_IRQ)
 */
void arc_init_IRQ(void)
{
	unsigned int tmp, irq_prio, i;
	struct bcr_irq_arcv2 irq_bcr;

	struct aux_irq_ctrl {
#ifdef CONFIG_CPU_BIG_ENDIAN
		unsigned int res3:18, save_idx_regs:1, res2:1,
			     save_u_to_u:1, save_lp_regs:1, save_blink:1,
			     res:4, save_nr_gpr_pairs:5;
#else
		unsigned int save_nr_gpr_pairs:5, res:4,
			     save_blink:1, save_lp_regs:1, save_u_to_u:1,
			     res2:1, save_idx_regs:1, res3:18;
#endif
	} ictrl;

	*(unsigned int *)&ictrl = 0;

	ictrl.save_nr_gpr_pairs = 6;	/* r0 to r11 (r12 saved manually) */
	ictrl.save_blink = 1;
	ictrl.save_lp_regs = 1;		/* LP_COUNT, LP_START, LP_END */
	ictrl.save_u_to_u = 0;		/* user ctxt saved on kernel stack */
	ictrl.save_idx_regs = 1;	/* JLI, LDI, EI */

	WRITE_AUX(AUX_IRQ_CTRL, ictrl);

	/*
	 * ARCv2 core intc provides multiple interrupt priorities (upto 16).
	 * Typical builds though have only two levels (0-high, 1-low)
	 * Linux by default uses lower prio 1 for most irqs, reserving 0 for
	 * NMI style interrupts in future (say perf)
	 */

	READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);

	irq_prio = irq_bcr.prio;	/* Encoded as N-1 for N levels */
	pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
		irq_prio + 1, ARCV2_IRQ_DEF_PRIO,
		irq_bcr.firq ? " FIRQ (not used)":"");

	/*
	 * Set a default priority for all available interrupts to prevent
	 * switching of register banks if Fast IRQ and multiple register banks
	 * are supported by CPU.
	 * Also disable all IRQ lines so faulty external hardware won't
	 * trigger interrupt that kernel is not ready to handle.
	 */
	for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
		write_aux_reg(AUX_IRQ_SELECT, i);
		write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
		write_aux_reg(AUX_IRQ_ENABLE, 0);
	}

	/* setup status32, don't enable intr yet as kernel doesn't want */
	tmp = read_aux_reg(ARC_REG_STATUS32);
	tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1);
	tmp &= ~STATUS_IE_MASK;
	asm volatile("kflag %0	\n"::"r"(tmp));
}
Esempio n. 6
0
static void utlb_invalidate(void)
{
#if (CONFIG_ARC_MMU_VER >= 2)

#if (CONFIG_ARC_MMU_VER == 2)
	/* MMU v2 introduced the uTLB Flush command.
	 * There was however an obscure hardware bug, where uTLB flush would
	 * fail when a prior probe for J-TLB (both totally unrelated) would
	 * return lkup err - because the entry didnt exist in MMU.
	 * The Workround was to set Index reg with some valid value, prior to
	 * flush. This was fixed in MMU v3 hence not needed any more
	 */
	unsigned int idx;

	/* make sure INDEX Reg is valid */
	idx = read_aux_reg(ARC_REG_TLBINDEX);

	/* If not write some dummy val */
	if (unlikely(idx & TLB_LKUP_ERR))
		write_aux_reg(ARC_REG_TLBINDEX, 0xa);
#endif

	write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
#endif

}
Esempio n. 7
0
/*
 * Common Helper for Line Operations on {I,D}-Cache
 */
static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
				     unsigned long sz, const int cacheop)
{
	unsigned int aux_cmd, aux_tag;
	int num_lines;
	const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;

	if (cacheop == OP_INV_IC) {
		aux_cmd = ARC_REG_IC_IVIL;
#if (CONFIG_ARC_MMU_VER > 2)
		aux_tag = ARC_REG_IC_PTAG;
#endif
	}
	else {
		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
		aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
#if (CONFIG_ARC_MMU_VER > 2)
		aux_tag = ARC_REG_DC_PTAG;
#endif
	}

	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	 * and have @paddr - aligned to cache line and integral @num_lines.
	 * This however can be avoided for page sized since:
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!full_page_op) {
		sz += paddr & ~CACHE_LINE_MASK;
		paddr &= CACHE_LINE_MASK;
		vaddr &= CACHE_LINE_MASK;
	}

	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);

#if (CONFIG_ARC_MMU_VER <= 2)
	/* MMUv2 and before: paddr contains stuffed vaddrs bits */
	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
#else
	/* if V-P const for loop, PTAG can be written once outside loop */
	if (full_page_op)
		write_aux_reg(aux_tag, paddr);
#endif

	while (num_lines-- > 0) {
#if (CONFIG_ARC_MMU_VER > 2)
		/* MMUv3, cache ops require paddr seperately */
		if (!full_page_op) {
			write_aux_reg(aux_tag, paddr);
			paddr += L1_CACHE_BYTES;
		}

		write_aux_reg(aux_cmd, vaddr);
		vaddr += L1_CACHE_BYTES;
#else
		write_aux_reg(aux_cmd, paddr);
		paddr += L1_CACHE_BYTES;
#endif
	}
}
Esempio n. 8
0
/*
 * Operation on Entire D-Cache
 * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
 * Note that constant propagation ensures all the checks are gone
 * in generated code
 */
static inline void __dc_entire_op(const int cacheop)
{
	unsigned int tmp = tmp;
	int aux;

	if (cacheop == OP_FLUSH_N_INV) {
		/* Dcache provides 2 cmd: FLUSH or INV
		 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
		 * flush-n-inv is achieved by INV cmd but with IM=1
		 * Default INV sub-mode is DISCARD, which needs to be toggled
		 */
		tmp = read_aux_reg(ARC_REG_DC_CTRL);
		write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
	}

	if (cacheop & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
		aux = ARC_REG_DC_IVDC;
	else
		aux = ARC_REG_DC_FLSH;

	write_aux_reg(aux, 0x1);

	if (cacheop & OP_FLUSH)	/* flush / flush-n-inv both wait */
		wait_for_flush();

	/* Switch back the DISCARD ONLY Invalidate mode */
	if (cacheop == OP_FLUSH_N_INV)
		write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
}
Esempio n. 9
0
/*
 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
 */
static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
				unsigned long sz, const int cacheop)
{
	unsigned long flags, tmp = tmp;

	local_irq_save(flags);

	if (cacheop == OP_FLUSH_N_INV) {
		/*
		 * Dcache provides 2 cmd: FLUSH or INV
		 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
		 * flush-n-inv is achieved by INV cmd but with IM=1
		 * Default INV sub-mode is DISCARD, which needs to be toggled
		 */
		tmp = read_aux_reg(ARC_REG_DC_CTRL);
		write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
	}

	__cache_line_loop(paddr, vaddr, sz, cacheop);

	if (cacheop & OP_FLUSH)	/* flush / flush-n-inv both wait */
		wait_for_flush();

	/* Switch back the DISCARD ONLY Invalidate mode */
	if (cacheop == OP_FLUSH_N_INV)
		write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);

	local_irq_restore(flags);
}
Esempio n. 10
0
/*
 * set 32bit TIMER1 to keep counting monotonically and wraparound
 */
int __cpuinit arc_counter_setup(void)
{
	write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
	write_aux_reg(ARC_REG_TIMER1_CNT, 0);
	write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);

	return is_usable_as_clocksource();
}
Esempio n. 11
0
/*
 * set 32bit TIMER1 to keep counting monotonically and wraparound
 */
int arc_counter_setup(void)
{
	write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
	write_aux_reg(ARC_REG_TIMER1_CNT, 0);
	write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);

	/* Not usable in SMP */
	return !IS_ENABLED(CONFIG_SMP);
}
Esempio n. 12
0
static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
{
	unsigned int idx;

	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);

	write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
	idx = read_aux_reg(ARC_REG_TLBINDEX);

	return idx;
}
void arcv2_irq_enable(struct irq_data *data)
{
	/* set default priority */
	write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
	write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);

	/*
	 * hw auto enables (linux unmask) all by default
	 * So no need to do IRQ_ENABLE here
	 * XXX: However OSCI LAN need it
	 */
	write_aux_reg(AUX_IRQ_ENABLE, 1);
}
Esempio n. 14
0
/*
 * Master kick starting another CPU
 */
static void iss_model_smp_wakeup_cpu(int cpu, unsigned long pc)
{
	/* setup the start PC */
	write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc);

	/* Trigger WRITE_PC cmd for this cpu */
	write_aux_reg(ARC_AUX_XTL_REG_CMD,
			(ARC_XTL_CMD_WRITE_PC | (cpu << 8)));

	/* Take the cpu out of Halt */
	write_aux_reg(ARC_AUX_XTL_REG_CMD,
			(ARC_XTL_CMD_CLEAR_HALT | (cpu << 8)));

}
Esempio n. 15
0
int arc_counter_setup(void)
{
	write_aux_reg(AUX_RTC_CTRL, 1);

	/* Not usable in SMP */
	return !IS_ENABLED(CONFIG_SMP);
}
Esempio n. 16
0
/*
 * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache
 * maintenance ops (in IVIL reg), as long as icache doesn't alias.
 *
 * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is
 * specified in PTAG (similar to MMU v3)
 */
static inline
void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr,
			  unsigned long sz, const int cacheop)
{
	unsigned int aux_cmd;
	int num_lines;
	const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;

	if (cacheop == OP_INV_IC) {
		aux_cmd = ARC_REG_IC_IVIL;
	} else {
		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
		aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
	}

	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	 * and have @paddr - aligned to cache line and integral @num_lines.
	 * This however can be avoided for page sized since:
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!full_page_op) {
		sz += paddr & ~CACHE_LINE_MASK;
		paddr &= CACHE_LINE_MASK;
	}

	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);

	while (num_lines-- > 0) {
		write_aux_reg(aux_cmd, paddr);
		paddr += L1_CACHE_BYTES;
	}
}
Esempio n. 17
0
static int __init arc_cs_setup_rtc(struct device_node *node)
{
	struct bcr_timer timer;
	int ret;

	READ_BCR(ARC_REG_TIMERS_BCR, timer);
	if (!timer.rtc) {
		pr_warn("Local-64-bit-Ctr clocksource not detected");
		return -ENXIO;
	}

	/* Local to CPU hence not usable in SMP */
	if (IS_ENABLED(CONFIG_SMP)) {
		pr_warn("Local-64-bit-Ctr not usable in SMP");
		return -EINVAL;
	}

	ret = arc_get_timer_clk(node);
	if (ret)
		return ret;

	write_aux_reg(AUX_RTC_CTRL, 1);

	return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
}
Esempio n. 18
0
File: irq.c Progetto: 1youhun1/linux
/*
 * Early Hardware specific Interrupt setup
 * -Called very early (start_kernel -> setup_arch -> setup_processor)
 * -Platform Independent (must for any ARC700)
 * -Needed for each CPU (hence not foldable into init_IRQ)
 *
 * what it does ?
 * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
 * -Disable all IRQs (on CPU side)
 * -Optionally, setup the High priority Interrupts as Level 2 IRQs
 */
void arc_init_IRQ(void)
{
	int level_mask = 0;

	/* Disable all IRQs: enable them as devices request */
	write_aux_reg(AUX_IENABLE, 0);

       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;

	if (level_mask) {
		pr_info("Level-2 interrupts bitset %x\n", level_mask);
		write_aux_reg(AUX_IRQ_LEV, level_mask);
	}
}
Esempio n. 19
0
static inline
void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
			  unsigned long sz, const int op)
{
	unsigned int aux_cmd, aux_tag;
	int num_lines;
	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;

	if (op == OP_INV_IC) {
		aux_cmd = ARC_REG_IC_IVIL;
		aux_tag = ARC_REG_IC_PTAG;
	} else {
		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
		aux_tag = ARC_REG_DC_PTAG;
	}

	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	 * and have @paddr - aligned to cache line and integral @num_lines.
	 * This however can be avoided for page sized since:
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!full_page) {
		sz += paddr & ~CACHE_LINE_MASK;
		paddr &= CACHE_LINE_MASK;
		vaddr &= CACHE_LINE_MASK;
	}
	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);

	/*
	 * MMUv3, cache ops require paddr in PTAG reg
	 * if V-P const for loop, PTAG can be written once outside loop
	 */
	if (full_page)
		write_aux_reg(aux_tag, paddr);

	while (num_lines-- > 0) {
		if (!full_page) {
			write_aux_reg(aux_tag, paddr);
			paddr += L1_CACHE_BYTES;
		}

		write_aux_reg(aux_cmd, vaddr);
		vaddr += L1_CACHE_BYTES;
	}
}
Esempio n. 20
0
static void arc_irq_unmask(struct irq_data *data)
{
	unsigned int ienb;

	ienb = read_aux_reg(AUX_IENABLE);
	ienb |= (1 << data->irq);
	write_aux_reg(AUX_IENABLE, ienb);
}
Esempio n. 21
0
static void __after_dc_op(const int op, unsigned int reg)
{
	if (op & OP_FLUSH)	/* flush / flush-n-inv both wait */
		while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);

	/* Switch back to default Invalidate mode */
	if (op == OP_FLUSH_N_INV)
		write_aux_reg(ARC_REG_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
}
Esempio n. 22
0
File: time.c Progetto: 1314cc/linux
static void __init arc_cs_setup_timer1(struct device_node *node)
{
	int ret;

	/* Local to CPU hence not usable in SMP */
	if (IS_ENABLED(CONFIG_SMP))
		return;

	ret = arc_get_timer_clk(node);
	if (ret)
		return;

	write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
	write_aux_reg(ARC_REG_TIMER1_CNT, 0);
	write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);

	clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
}
Esempio n. 23
0
static void nps400_irq_unmask(struct irq_data *irqd)
{
	unsigned int ienb;
	unsigned int irq = irqd_to_hwirq(irqd);

	ienb = read_aux_reg(AUX_IENABLE);
	ienb |= (1 << irq);
	write_aux_reg(AUX_IENABLE, ienb);
}
Esempio n. 24
0
/***********************************************************
 * Machine specific helper for per line I-Cache invalidate.
 */
static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
				unsigned long sz)
{
	unsigned long flags;
	int num_lines;

	/*
	 * Ensure we properly floor/ceil the non-line aligned/sized requests:
	 * However page sized flushes can be compile time optimised.
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
		sz += paddr & ~ICACHE_LINE_MASK;
		paddr &= ICACHE_LINE_MASK;
		vaddr &= ICACHE_LINE_MASK;
	}

	num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);

#if (CONFIG_ARC_MMU_VER <= 2)
	/* bits 17:13 of vaddr go as bits 4:0 of paddr */
	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
#endif

	local_irq_save(flags);
	while (num_lines-- > 0) {
#if (CONFIG_ARC_MMU_VER > 2)
		/* tag comes from phy addr */
		write_aux_reg(ARC_REG_IC_PTAG, paddr);

		/* index bits come from vaddr */
		write_aux_reg(ARC_REG_IC_IVIL, vaddr);
		vaddr += ARC_ICACHE_LINE_LEN;
#else
		/* paddr contains stuffed vaddrs bits */
		write_aux_reg(ARC_REG_IC_IVIL, paddr);
#endif
		paddr += ARC_ICACHE_LINE_LEN;
	}
	local_irq_restore(flags);
}
Esempio n. 25
0
noinline void slc_op(unsigned long paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
	unsigned long flags;
	unsigned int ctrl;

	local_irq_save(flags);

	/*
	 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
	 *  - b'000 (default) is Flush,
	 *  - b'001 is Invalidate if CTRL.IM == 0
	 *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
	 */
	ctrl = read_aux_reg(ARC_REG_SLC_CTRL);

	/* Don't rely on default value of IM bit */
	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
	else
		ctrl |= SLC_CTRL_IM;

	if (op & OP_INV)
		ctrl |= SLC_CTRL_RGN_OP_INV;	/* Inv or flush-n-inv */
	else
		ctrl &= ~SLC_CTRL_RGN_OP_INV;

	write_aux_reg(ARC_REG_SLC_CTRL, ctrl);

	/*
	 * Lower bits are ignored, no need to clip
	 * END needs to be setup before START (latter triggers the operation)
	 * END can't be same as START, so add (l2_line_sz - 1) to sz
	 */
	write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
	write_aux_reg(ARC_REG_SLC_RGN_START, paddr);

	while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);

	local_irq_restore(flags);
#endif
}
Esempio n. 26
0
static void nps400_irq_eoi_global(struct irq_data *irqd)
{
	unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);

	write_aux_reg(CTOP_AUX_IACK, 1 << irq);

	/* Don't ack GIC before all device access attempts are done */
	mb();

	nps_ack_gic();
}
Esempio n. 27
0
/*
 * Early Hardware specific Interrupt setup
 * -Called very early (start_kernel -> setup_arch -> setup_processor)
 * -Platform Independent (must for any ARC700)
 * -Needed for each CPU (hence not foldable into init_IRQ)
 *
 * what it does ?
 * -Disable all IRQs (on CPU side)
 * -Optionally, setup the High priority Interrupts as Level 2 IRQs
 */
void arc_init_IRQ(void)
{
	int level_mask = 0;

	/* Disable all IRQs: enable them as devices request */
	write_aux_reg(AUX_IENABLE, 0);

       /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3;
	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5;
	level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6;

	/*
	 * Write to register, even if no LV2 IRQs configured to reset it
	 * in case bootloader had mucked with it
	 */
	write_aux_reg(AUX_IRQ_LEV, level_mask);

	if (level_mask)
		pr_info("Level-2 interrupts bitset %x\n", level_mask);
}
Esempio n. 28
0
static void nps_clkevent_add_thread(unsigned long delta)
{
	int thread;
	unsigned int cflags, enabled_threads;

	hw_schd_save(&cflags);

	/* add thread to TSI1 */
	thread = read_aux_reg(CTOP_AUX_THREAD_ID);
	enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI);
	enabled_threads |= (1 << thread);
	write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads);

	/* set next timer event */
	write_aux_reg(NPS_REG_TIMER0_LIMIT, delta);
	write_aux_reg(NPS_REG_TIMER0_CNT, 0);
	write_aux_reg(NPS_REG_TIMER0_CTRL,
		      TIMER0_CTRL_IE | TIMER0_CTRL_NH);

	hw_schd_restore(cflags);
}
Esempio n. 29
0
static inline void __before_dc_op(const int op)
{
	if (op == OP_FLUSH_N_INV) {
		/* Dcache provides 2 cmd: FLUSH or INV
		 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
		 * flush-n-inv is achieved by INV cmd but with IM=1
		 * So toggle INV sub-mode depending on op request and default
		 */
		const unsigned int ctl = ARC_REG_DC_CTRL;
		write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
	}
}
Esempio n. 30
0
/*
 * Per Line Operation on D-Cache
 * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
 * It's sole purpose is to help gcc generate ZOL
 * (aliasing VIPT dcache flushing needs both vaddr and paddr)
 */
static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr,
				  unsigned long sz, const int aux_reg)
{
	int num_lines;

	/* Ensure we properly floor/ceil the non-line aligned/sized requests
	 * and have @paddr - aligned to cache line and integral @num_lines.
	 * This however can be avoided for page sized since:
	 *  -@paddr will be cache-line aligned already (being page aligned)
	 *  -@sz will be integral multiple of line size (being page sized).
	 */
	if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
		sz += paddr & ~DCACHE_LINE_MASK;
		paddr &= DCACHE_LINE_MASK;
		vaddr &= DCACHE_LINE_MASK;
	}

	num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);

#if (CONFIG_ARC_MMU_VER <= 2)
	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
#endif

	while (num_lines-- > 0) {
#if (CONFIG_ARC_MMU_VER > 2)
		/*
		 * Just as for I$, in MMU v3, D$ ops also require
		 * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
		 */
		write_aux_reg(ARC_REG_DC_PTAG, paddr);

		write_aux_reg(aux_reg, vaddr);
		vaddr += ARC_DCACHE_LINE_LEN;
#else
		/* paddr contains stuffed vaddrs bits */
		write_aux_reg(aux_reg, paddr);
#endif
		paddr += ARC_DCACHE_LINE_LEN;
	}
}