示例#1
0
void
cpu_intr(int ppl, vaddr_t pc, uint32_t status)
{
	struct cpu_info * const ci = curcpu();
	uint32_t pending;
	int ipl;
#ifdef DIAGNOSTIC
	const int mtx_count = ci->ci_mtx_count;
	const u_int biglock_count = ci->ci_biglock_count;
	const u_int blcnt = curlwp->l_blcnt;
#endif
	KASSERT(ci->ci_cpl == IPL_HIGH);
	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

	ci->ci_data.cpu_nintr++;

	while (ppl < (ipl = splintr(&pending))) {
		KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
		splx(ipl);	/* lower to interrupt level */
		KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

		KASSERTMSG(ci->ci_cpl == ipl,
		    "%s: cpl (%d) != ipl (%d)", __func__, ci->ci_cpl, ipl);
		KASSERT(pending != 0);

		cf.pc = pc;
		cf.sr = status;
		cf.intr = (ci->ci_idepth > 1);

#ifdef MIPS3_ENABLE_CLOCK_INTR
		if (pending & MIPS_INT_MASK_5) {
			KASSERTMSG(ipl == IPL_SCHED,
			    "%s: ipl (%d) != IPL_SCHED (%d)",
			     __func__, ipl, IPL_SCHED);
			/* call the common MIPS3 clock interrupt handler */ 
			mips3_clockintr(&cf);
			pending ^= MIPS_INT_MASK_5;
		}
#endif

		if (pending != 0) {
			/* Process I/O and error interrupts. */
			evbmips_iointr(ipl, pc, pending);
		}
		KASSERT(biglock_count == ci->ci_biglock_count);
		KASSERT(blcnt == curlwp->l_blcnt);
		KASSERT(mtx_count == ci->ci_mtx_count);

		/*
		 * If even our spl is higher now (due to interrupting while
		 * spin-lock is held and higher IPL spin-lock is locked, it
		 * can no longer be locked so it's safe to lower IPL back
		 * to ppl.
		 */
		(void) splhigh();	/* disable interrupts */
	}

	KASSERT(ci->ci_cpl == IPL_HIGH);
	KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
}
示例#2
0
文件: cpu_subr.c 项目: ryo/netbsd-src
void
cpu_vmspace_exec(lwp_t *l, vaddr_t start, vaddr_t end)
{
	/*
	 * We need to turn on/off UX so that copyout/copyin will work
	 * well before setreg gets called.
	 */
	uint32_t sr = mips_cp0_status_read();
	if (end != (uint32_t) end) {
		mips_cp0_status_write(sr | MIPS3_SR_UX);
	} else {
		mips_cp0_status_write(sr & ~MIPS3_SR_UX);
	}
}
示例#3
0
文件: cpu_subr.c 项目: ryo/netbsd-src
void
cpu_hatch(struct cpu_info *ci)
{
	struct pmap_tlb_info * const ti = ci->ci_tlb_info;

	/*
	 * Invalidate all the TLB enties (even wired ones) and then reserve
	 * space for the wired TLB entries.
	 */
	mips3_cp0_wired_write(0);
	tlb_invalidate_all();
	mips3_cp0_wired_write(ti->ti_wired);

	/*
	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
	 */
	cpu_hwrena_setup();

	/*
	 * If we are using register zero relative addressing to access cpu_info
	 * in the exception vectors, enter that mapping into TLB now.
	 */
	if (ci->ci_tlb_slot >= 0) {
		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
		const struct tlbmask tlbmask = {
			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
#if (PGSHIFT & 1)
			.tlb_lo0 = tlb_lo,
			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
#else
			.tlb_lo0 = 0,
			.tlb_lo1 = tlb_lo,
#endif
			.tlb_mask = -1,
		};

		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
	}

	/*
	 * Flush the icache just be sure.
	 */
	mips_icache_sync_all();

	/*
	 * Let this CPU do its own initialization (for things that have to be
	 * done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_init)(ci);

	// Show this CPU as present.
	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);

	/*
	 * Announce we are hatched
	 */
	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));

	/*
	 * Now wait to be set free!
	 */
	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
		/* spin, spin, spin */
	}

	/*
	 * initialize the MIPS count/compare clock
	 */
	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
	KASSERT(ci->ci_cycles_per_hz != 0);
	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
	ci->ci_data.cpu_cc_skew = 0;

	/*
	 * Let this CPU do its own post-running initialization
	 * (for things that have to be done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_run)(ci);

	/*
	 * Now turn on interrupts (and verify they are on).
	 */
	spl0();
	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));

	/*
	 * And do a tail call to idle_loop
	 */
	idle_loop(NULL);
}

void
cpu_boot_secondary_processors(void)
{
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;
	for (CPU_INFO_FOREACH(cii, ci)) {
		if (CPU_IS_PRIMARY(ci))
			continue;
		KASSERT(ci->ci_data.cpu_idlelwp);

		/*
		 * Skip this CPU if it didn't sucessfully hatch.
		 */
		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
			continue;

		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
		kcpuset_set(cpus_running, cpu_index(ci));
		// Spin until the cpu calls idle_loop
		for (u_int i = 0; i < 100; i++) {
			if (kcpuset_isset(cpus_running, cpu_index(ci)))
				break;
			delay(1000);
		}
	}
}