Пример #1
0
void
caches_on(void)
{
	uint32 config1;
	uint start, end, size, lsize;

	/* Save cache config */
	config1 = MFC0(C0_CONFIG, 1);
	
	icache_probe(config1, &size, &lsize);
	icache_size = size;
	ic_lsize = lsize;
	
	dcache_probe(config1, &size, &lsize);
	dcache_size = size;
	dc_lsize = lsize;
	
	/* If caches are not in the default state then
	 * presume that caches are already init'd
	 */
	if ((MFC0(C0_CONFIG, 0) & CONF_CM_CMASK) != CONF_CM_UNCACHED) {
		blast_dcache();
		blast_icache();
		return;
	}

	/* init icache */
	start = KSEG0;
	end = (start + icache_size);
	MTC0(C0_TAGLO, 0, 0);
	MTC0(C0_TAGHI, 0, 0);
	while (start < end) {
		cache_op(start, Index_Store_Tag_I);
		start += ic_lsize;
	}
	
	/* init dcache */
	start = KSEG0;
	end = (start + dcache_size);
	MTC0(C0_TAGLO, 0, 0);
	MTC0(C0_TAGHI, 0, 0);
	while (start < end) {
		cache_op(start, Index_Store_Tag_D);
		start += dc_lsize;
	}

	/* Must be in KSEG1 to change cachability */
	change_cachability = (void (*)(uint32)) KSEG1ADDR(_change_cachability);
	change_cachability(CONF_CM_CACHABLE_NONCOHERENT);
}
Пример #2
0
static void
_change_cachability(uint32 cm)
{
	uint32 prid, c0reg;

	c0reg = MFC0(C0_CONFIG, 0);
	c0reg &= ~CONF_CM_CMASK;
	c0reg |= (cm & CONF_CM_CMASK);
	MTC0(C0_CONFIG, 0, c0reg);
	prid = MFC0(C0_PRID, 0);
	if ((prid & (PRID_COMP_MASK | PRID_IMP_MASK)) ==
	    (PRID_COMP_BROADCOM | PRID_IMP_BCM3302)) {
		c0reg = MFC0(C0_BROADCOM, 0);
		/* Enable icache & dcache */
		c0reg |= BRCM_IC_ENABLE | BRCM_DC_ENABLE;
		MTC0(C0_BROADCOM, 0, c0reg);
	}
}	
Пример #3
0
static int
ingenic_send_ipi(struct cpu_info *ci, int tag)
{
	uint32_t msg;

	msg = 1 << tag;

	mutex_enter(&ingenic_ipi_lock);
	if (kcpuset_isset(cpus_running, cpu_index(ci))) {
		if (cpu_index(ci) == 0) {
			MTC0(msg, CP0_CORE_MBOX, 0);
		} else {
			MTC0(msg, CP0_CORE_MBOX, 1);
		}
	}
	mutex_exit(&ingenic_ipi_lock);
	return 0;
}
Пример #4
0
void
ingenic_clockintr(struct clockframe *cf)
{
	int s = splsched();
	struct cpu_info * const ci = curcpu();
#ifdef USE_OST
	uint32_t new_cnt;
#endif

	/* clear flags */
	writereg(JZ_TC_TFCR, TFR_OSTFLAG);

	ci->ci_next_cp0_clk_intr += (uint32_t)(ci->ci_cycles_per_hz & 0xffffffff);
#ifdef USE_OST
	writereg(JZ_OST_DATA, ci->ci_next_cp0_clk_intr);

	/* Check for lost clock interrupts */
	new_cnt = readreg(JZ_OST_CNT_LO);

	/* 
	 * Missed one or more clock interrupts, so let's start 
	 * counting again from the current value.
	 */
	if ((ci->ci_next_cp0_clk_intr - new_cnt) & 0x80000000) {

		ci->ci_next_cp0_clk_intr = new_cnt + curcpu()->ci_cycles_per_hz;
		writereg(JZ_OST_DATA, ci->ci_next_cp0_clk_intr);
		curcpu()->ci_ev_count_compare_missed.ev_count++;
	}
	writereg(JZ_TC_TFCR, TFR_OSTFLAG);
#else
	writereg(JZ_TC_TFCR, TFR_FFLAG5);
#endif

#ifdef INGENIC_CLOCK_DEBUG
	cnt++;
	if (cnt == 100) {
		cnt = 0;
		ingenic_puts("+");
	}
#endif
#ifdef MULTIPROCESSOR
	/*
	 * XXX
	 * needs to take the IPI lock and ping all online CPUs, not just core 1
	 */
	MTC0(1 << IPI_CLOCK, 20, 1);
#endif
	hardclock(cf);
	splx(s);
}
Пример #5
0
static void
ingenic_cpu_init(struct cpu_info *ci)
{
	uint32_t reg;

	/* enable IPIs for this core */
	reg = MFC0(12, 4);	/* reset entry and interrupts */
	if (cpu_index(ci) == 1) {
		reg |= REIM_MIRQ1_M;
	} else
		reg |= REIM_MIRQ0_M;
	MTC0(reg, 12, 4);
	printf("%s %d %08x\n", __func__, cpu_index(ci), reg);
}
Пример #6
0
void *
osl_init()
{
	uint32 c0reg;
	sb_t *sbh;

	/* Disable interrupts */
	c0reg = MFC0(C0_STATUS, 0);
	c0reg &= ~ST0_IE;
	MTC0(C0_STATUS, 0, c0reg);

	/* Scan backplane */
	sbh = sb_kattach(SB_OSH);

	sb_mips_init(sbh, 0);
	sb_serial_init(sbh, serial_add);

	/* Init malloc */
	free_mem_ptr = (ulong) bss_end;
	free_mem_ptr_end = ((ulong)&c0reg) - 8192;	/* Enough stack? */

	return ((void *)sbh);
}