/*
 * Quiesce CPUs in a multiprocessor machine before resuming. We need to do
 * this since the APs will be hatched (but waiting for CPUF_GO), and we don't
 * want the APs to be executing code and causing side effects during the
 * unpack operation.
 */
void
hibernate_quiesce_cpus(void)
{
	struct cpu_info *ci;
	u_long i;

	KASSERT(CPU_IS_PRIMARY(curcpu()));

	pmap_kenter_pa(ACPI_TRAMPOLINE, ACPI_TRAMPOLINE, PROT_READ | PROT_EXEC);
	pmap_kenter_pa(ACPI_TRAMP_DATA, ACPI_TRAMP_DATA,
		PROT_READ | PROT_WRITE);

	for (i = 0; i < MAXCPUS; i++) {
		ci = cpu_info[i];
		if (ci == NULL)
			continue;
		if (ci->ci_idle_pcb == NULL)
			continue;
		if ((ci->ci_flags & CPUF_PRESENT) == 0)
			continue;
		if (ci->ci_flags & (CPUF_BSP | CPUF_SP | CPUF_PRIMARY))
			continue;
		atomic_setbits_int(&ci->ci_flags, CPUF_GO | CPUF_PARK);
	}

	/* Wait a bit for the APs to park themselves */
	delay(500000);

	pmap_kremove(ACPI_TRAMPOLINE, PAGE_SIZE);
	pmap_kremove(ACPI_TRAMP_DATA, PAGE_SIZE);
}
Пример #2
0
/*
 * Start the real-time and statistics clocks. Leave stathz 0 since there
 * are no other timers available.
 */
void
cp0_startclock(struct cpu_info *ci)
{
	int s;

#ifdef MULTIPROCESSOR
	if (!CPU_IS_PRIMARY(ci)) {
		s = splhigh();
		nanouptime(&ci->ci_schedstate.spc_runtime);
		splx(s);

		/* try to avoid getting clock interrupts early */
		cp0_set_compare(cp0_get_count() - 1);

		cp0_calibrate(ci);
	}
#endif

	/* Start the clock. */
	s = splclock();
	ci->ci_cpu_counter_interval =
	    (ci->ci_hw.clock / CP0_CYCLE_DIVIDER) / hz;
	ci->ci_cpu_counter_last = cp0_get_count() + ci->ci_cpu_counter_interval;
	cp0_set_compare(ci->ci_cpu_counter_last);
	ci->ci_clock_started++;
	splx(s);
}
/*
 * Quiesce CPUs in a multiprocessor machine before resuming. We need to do
 * this since the APs will be hatched (but waiting for CPUF_GO), and we don't
 * want the APs to be executing code and causing side effects during the
 * unpack operation.
 */
void
hibernate_quiesce_cpus(void)
{
        KASSERT(CPU_IS_PRIMARY(curcpu()));

	/* Start the hatched (but idling) APs */
	cpu_boot_secondary_processors();

	/* Now shut them down */
	acpi_sleep_mp();
}
Пример #4
0
/*
 * The real-time timer, interrupting hz times per second.
 */
void
hardclock(struct clockframe *frame)
{
	struct proc *p;
	struct cpu_info *ci = curcpu();

	p = curproc;
	if (p && ((p->p_flag & (P_SYSTEM | P_WEXIT)) == 0)) {
		struct process *pr = p->p_p;

		/*
		 * Run current process's virtual and profile time, as needed.
		 */
		if (CLKF_USERMODE(frame) &&
		    timerisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
		    itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], tick) == 0) {
			atomic_setbits_int(&p->p_flag, P_ALRMPEND);
			need_proftick(p);
		}
		if (timerisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
		    itimerdecr(&pr->ps_timer[ITIMER_PROF], tick) == 0) {
			atomic_setbits_int(&p->p_flag, P_PROFPEND);
			need_proftick(p);
		}
	}

	/*
	 * If no separate statistics clock is available, run it from here.
	 */
	if (stathz == 0)
		statclock(frame);

	if (--ci->ci_schedstate.spc_rrticks <= 0)
		roundrobin(ci);

	/*
	 * If we are not the primary CPU, we're not allowed to do
	 * any more work.
	 */
	if (CPU_IS_PRIMARY(ci) == 0)
		return;

	tc_ticktock();
	ticks++;

	/*
	 * Update real-time timeout queue.
	 * Process callouts at a very low cpu priority, so we don't keep the
	 * relatively high clock interrupt priority any longer than necessary.
	 */
	if (timeout_hardclock_update())
		softintr_schedule(softclock_si);
}
Пример #5
0
/*
 * Quiesce CPUs in a multiprocessor machine before resuming. We need to do
 * this since the APs will be hatched (but waiting for CPUF_GO), and we don't
 * want the APs to be executing code and causing side effects during the
 * unpack operation.
 */
void
hibernate_quiesce_cpus(void)
{
	KASSERT(CPU_IS_PRIMARY(curcpu()));

	/* Start the hatched (but idling) APs */
	cpu_boot_secondary_processors();

	/* Demote the APs to real mode */
        x86_broadcast_ipi(X86_IPI_HALT_REALMODE);

	/* Wait a bit for the APs to park themselves */
	delay(1000000);

}
Пример #6
0
void
obio_splx(int newipl)
{
	struct cpu_info *ci = curcpu();

	/* Update masks to new ipl. Order highly important! */
	__asm__ (".set noreorder\n");
	ci->ci_ipl = newipl;
	__asm__ ("sync\n\t.set reorder\n");
	if (CPU_IS_PRIMARY(ci))
		obio_setintrmask(newipl);
	/* If we still have softints pending trigger processing. */
	if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
		setsoftintr0();
}
Пример #7
0
int
intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin,
    int *index)
{
	int start, slot, i;
	struct intrsource *isp;

	start = CPU_IS_PRIMARY(ci) ? NUM_LEGACY_IRQS : 0;
	slot = -1;

	for (i = 0; i < start; i++) {
		isp = ci->ci_isources[i];
		if (isp != NULL && isp->is_pic == pic && isp->is_pin == pin) {
			slot = i;
			start = MAX_INTR_SOURCES;
			break;
		}
	}
	for (i = start; i < MAX_INTR_SOURCES ; i++) {
		isp = ci->ci_isources[i];
		if (isp != NULL && isp->is_pic == pic && isp->is_pin == pin) {
			slot = i;
			break;
		}
		if (isp == NULL && slot == -1) {
			slot = i;
			continue;
		}
	}
	if (slot == -1) {
		return EBUSY;
	}

	isp = ci->ci_isources[slot];
	if (isp == NULL) {
		isp = malloc(sizeof (struct intrsource), M_DEVBUF,
		    M_NOWAIT|M_ZERO);
		if (isp == NULL) {
			return ENOMEM;
		}
		snprintf(isp->is_evname, sizeof (isp->is_evname),
		    "pin %d", pin);
		ci->ci_isources[slot] = isp;
	}

	*index = slot;
	return 0;
}
Пример #8
0
/*
 * The real-time timer, interrupting hz times per second.
 */
void
hardclock(struct clockframe *frame)
{
	struct lwp *l;
	struct cpu_info *ci;

	ci = curcpu();
	l = ci->ci_data.cpu_onproc;

	timer_tick(l, CLKF_USERMODE(frame));

	/*
	 * If no separate statistics clock is available, run it from here.
	 */
	if (stathz == 0)
		statclock(frame);
	/*
	 * If no separate schedclock is provided, call it here
	 * at about 16 Hz.
	 */
	if (schedhz == 0) {
		if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
			schedclock(l);
			ci->ci_schedstate.spc_schedticks = hardscheddiv;
		}
	}
	if ((--ci->ci_schedstate.spc_ticks) <= 0)
		sched_tick(ci);

	if (CPU_IS_PRIMARY(ci)) {
		hardclock_ticks++;
		tc_ticktock();
	}

	/*
	 * Update real-time timeout queue.
	 */
	callout_hardclock();

#ifdef KDTRACE_HOOKS
	cyclic_clock_func_t func = cyclic_clock_func[cpu_index(ci)];
	if (func) {
		(*func)((struct clockframe *)frame);
	}
#endif
}
Пример #9
0
/* ARGSUSED */
int
sys_reboot(struct proc *p, void *v, register_t *retval)
{
	struct sys_reboot_args /* {
		syscallarg(int) opt;
	} */ *uap = v;
	int error;

	if ((error = suser(p, 0)) != 0)
		return (error);

#ifdef MULTIPROCESSOR
	sched_stop_secondary_cpus();
	KASSERT(CPU_IS_PRIMARY(curcpu()));
#endif
	reboot(SCARG(uap, opt));
	/* NOTREACHED */
	return (0);
}
Пример #10
0
void
cpu_boot_secondary_processors(void)
{
	for (struct cpu_info *ci = cpu_info_store.ci_next;
	     ci != NULL;
	     ci = ci->ci_next) {
		KASSERT(!CPU_IS_PRIMARY(ci));
		KASSERT(ci->ci_data.cpu_idlelwp);

		/*
		 * Skip this CPU if it didn't sucessfully hatch.
		 */
		if (! CPUSET_HAS_P(cpus_hatched, cpu_index(ci)))
			continue;

		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
		CPUSET_ADD(cpus_running, cpu_index(ci));
	}
}
Пример #11
0
static __inline void
__mp_lock_spin(struct __mp_lock *mpl)
{
#ifndef MP_LOCKDEBUG
	while (mpl->mpl_count != 0)
		SPINLOCK_SPIN_HOOK;
#else
	int ticks = __mp_lock_spinout;
	if (!CPU_IS_PRIMARY(curcpu()))				/* XXX */
		ticks += ticks;					/* XXX */

	while (mpl->mpl_count != 0 && --ticks > 0)
		SPINLOCK_SPIN_HOOK;

	if (ticks == 0) {
		db_printf("__mp_lock(%p): lock spun out", mpl);
		Debugger();
	}
#endif
}
Пример #12
0
/*
 * Call hardclock on all CPUs.
 */
static void
handle_hardclock(struct clockframe *cap)
{
	int s;
#ifdef MULTIPROCESSOR
	struct cpu_info *cpi;
	CPU_INFO_ITERATOR n;

	for (CPU_INFO_FOREACH(n, cpi)) {
		if (cpi == cpuinfo.ci_self) {
			KASSERT(CPU_IS_PRIMARY(cpi));
			continue;
		}
		
		raise_ipi(cpi, IPL_HARDCLOCK);
	}
#endif
	s = splsched();
	hardclock(cap);
	splx(s);
}
Пример #13
0
static int
xen_timer_handler(void *arg, struct trapframe *regs)
{
	int64_t delta;

#if defined(I586_CPU) || defined(I686_CPU)
	static int microset_iter; /* call cc_microset once/sec */
	struct cpu_info *ci = curcpu();
	
	/*
	 * If we have a cycle counter, do the microset thing.
	 */
	if (ci->ci_feature_flags & CPUID_TSC) {
		if (
#if defined(MULTIPROCESSOR)
		    CPU_IS_PRIMARY(ci) &&
#endif
		    (microset_iter--) == 0) {
			microset_iter = hz - 1;
#if defined(MULTIPROCESSOR)
			x86_broadcast_ipi(X86_IPI_MICROSET);
#endif
			cc_microset_time = time;
			cc_microset(ci);
		}
	}
#endif

	get_time_values_from_xen();

	delta = (int64_t)(shadow_system_time + get_tsc_offset_ns() -
			  processed_system_time);
	while (delta >= NS_PER_TICK) {
		hardclock((struct clockframe *)regs);
		delta -= NS_PER_TICK;
		processed_system_time += NS_PER_TICK;
	}

	return 0;
}
Пример #14
0
/*
 * NMI handler. Invoked with interrupts disabled.
 * Returns nonzero if NMI have been reenabled, and the exception handler
 * is allowed to run soft interrupts and AST; nonzero otherwise.
 */
int
m197_nmi(struct trapframe *eframe)
{
	u_int8_t abort;
	int rc;

	/*
	 * Non-maskable interrupts are either the abort switch (on
	 * cpu0 only) or IPIs (on any cpu). We check for IPI first.
	 */
#ifdef MULTIPROCESSOR
	if ((*(volatile u_int8_t *)(BS_BASE + BS_CPINT)) & BS_CPI_INT) {
		/* disable further NMI for now */
		*(volatile u_int8_t *)(BS_BASE + BS_CPINT) = 0;

		rc = m197_ipi_handler(eframe);

		/* acknowledge */
		*(volatile u_int8_t *)(BS_BASE + BS_CPINT) = BS_CPI_ICLR;

		if (rc != 0)
			m197_nmi_wrapup(eframe);
	} else
#endif
		rc = 1;

	if (CPU_IS_PRIMARY(curcpu())) {
		abort = *(u_int8_t *)(BS_BASE + BS_ABORT);
		if (abort & BS_ABORT_INT) {
			*(u_int8_t *)(BS_BASE + BS_ABORT) =
			    (abort & ~BS_ABORT_IEN) | BS_ABORT_ICLR;
			nmihand(eframe);
			*(u_int8_t *)(BS_BASE + BS_ABORT) |= BS_ABORT_IEN;
		}
	}

	return rc;
}
Пример #15
0
/*
 * Special version of delay() for MP kernels.
 * Processors need to use different timers, so we'll use the two
 * BusSwitch timers for this purpose.
 */
void
m197_delay(int us)
{
	if (CPU_IS_PRIMARY(curcpu())) {
		*(volatile u_int32_t *)(BS_BASE + BS_TCOMP1) = 0xffffffff;
		*(volatile u_int32_t *)(BS_BASE + BS_TCOUNT1) = 0;
		*(volatile u_int8_t *)(BS_BASE + BS_TCTRL1) |= BS_TCTRL_CEN;

		while ((*(volatile u_int32_t *)(BS_BASE + BS_TCOUNT1)) <
		    (u_int32_t)us)
			;
		*(volatile u_int8_t *)(BS_BASE + BS_TCTRL1) &= ~BS_TCTRL_CEN;
	} else {
		*(volatile u_int32_t *)(BS_BASE + BS_TCOMP2) = 0xffffffff;
		*(volatile u_int32_t *)(BS_BASE + BS_TCOUNT2) = 0;
		*(volatile u_int8_t *)(BS_BASE + BS_TCTRL2) |= BS_TCTRL_CEN;

		while ((*(volatile u_int32_t *)(BS_BASE + BS_TCOUNT2)) <
		    (u_int32_t)us)
			;
		*(volatile u_int8_t *)(BS_BASE + BS_TCTRL2) &= ~BS_TCTRL_CEN;
	}
}
Пример #16
0
/*
 * Quiesce CPUs in a multiprocessor machine before resuming. We need to do
 * this since the APs will be hatched (but waiting for CPUF_GO), and we don't
 * want the APs to be executing code and causing side effects during the
 * unpack operation.
 */
void
hibernate_quiesce_cpus(void)
{
	struct cpu_info *ci;
	u_long i;

	KASSERT(CPU_IS_PRIMARY(curcpu()));

	for (i = 0; i < MAXCPUS; i++) {
		ci = cpu_info[i];
		if (ci == NULL)
			continue;
		if (ci->ci_idle_pcb == NULL)
			continue;
		if ((ci->ci_flags & CPUF_PRESENT) == 0)
			continue;
		if (ci->ci_flags & (CPUF_BSP | CPUF_SP | CPUF_PRIMARY))
			continue;
		atomic_setbits_int(&ci->ci_flags, CPUF_GO | CPUF_PARK);
	}

	/* Wait a bit for the APs to park themselves */
	delay(500000);
}
Пример #17
0
/*
 * arm32_vector_init:
 *
 *	Initialize the vector page, and select whether or not to
 *	relocate the vectors.
 *
 *	NOTE: We expect the vector page to be mapped at its expected
 *	destination.
 */
void
arm32_vector_init(vaddr_t va, int which)
{
#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
	/*
	 * If this processor has the security extension, don't bother
	 * to move/map the vector page.  Simply point VBAR to the copy
	 * that exists in the .text segment.
	 */
#ifndef ARM_HAS_VBAR
	if (va == ARM_VECTORS_LOW
	    && (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0) {
#endif
		extern const uint32_t page0rel[];
		vector_page = (vaddr_t)page0rel;
		KASSERT((vector_page & 0x1f) == 0);
		armreg_vbar_write(vector_page);
#ifdef VERBOSE_INIT_ARM
		printf(" vbar=%p", page0rel);
#endif
		cpu_control(CPU_CONTROL_VECRELOC, 0);
		return;
#ifndef ARM_HAS_VBAR
	}
#endif
#endif
#ifndef ARM_HAS_VBAR
	if (CPU_IS_PRIMARY(curcpu())) {
		extern unsigned int page0[], page0_data[];
		unsigned int *vectors = (int *) va;
		unsigned int *vectors_data = vectors + (page0_data - page0);
		int vec;

		/*
		 * Loop through the vectors we're taking over, and copy the
		 * vector's insn and data word.
		 */
		for (vec = 0; vec < ARM_NVEC; vec++) {
			if ((which & (1 << vec)) == 0) {
				/* Don't want to take over this vector. */
				continue;
			}
			vectors[vec] = page0[vec];
			vectors_data[vec] = page0_data[vec];
		}

		/* Now sync the vectors. */
		cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));

		vector_page = va;
	}

	if (va == ARM_VECTORS_HIGH) {
		/*
		 * Assume the MD caller knows what it's doing here, and
		 * really does want the vector page relocated.
		 *
		 * Note: This has to be done here (and not just in
		 * cpu_setup()) because the vector page needs to be
		 * accessible *before* cpu_startup() is called.
		 * Think ddb(9) ...
		 *
		 * NOTE: If the CPU control register is not readable,
		 * this will totally fail!  We'll just assume that
		 * any system that has high vector support has a
		 * readable CPU control register, for now.  If we
		 * ever encounter one that does not, we'll have to
		 * rethink this.
		 */
		cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
	}
#endif
}
Пример #18
0
void
interrupt(unsigned long a0, unsigned long a1, unsigned long a2,
    struct trapframe *framep)
{
	struct cpu_info *ci = curcpu();
	extern int schedhz;

	switch (a0) {
	case ALPHA_INTR_XPROC:	/* interprocessor interrupt */
#if defined(MULTIPROCESSOR)
		atomic_add_ulong(&ci->ci_intrdepth, 1);

		alpha_ipi_process(ci, framep);

		/*
		 * Handle inter-console messages if we're the primary
		 * CPU.
		 */
		if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id &&
		    hwrpb->rpb_txrdy != 0)
			cpu_iccb_receive();

		atomic_sub_ulong(&ci->ci_intrdepth, 1);
#else
		printf("WARNING: received interprocessor interrupt!\n");
#endif /* MULTIPROCESSOR */
		break;
		
	case ALPHA_INTR_CLOCK:	/* clock interrupt */
		atomic_add_int(&uvmexp.intrs, 1);
		if (CPU_IS_PRIMARY(ci))
			clk_count.ec_count++;
		if (platform.clockintr) {
			/*
			 * Call hardclock().  This will also call
			 * statclock(). On the primary CPU, it
			 * will also deal with time-of-day stuff.
			 */
			(*platform.clockintr)((struct clockframe *)framep);

			/*
			 * If it's time to call the scheduler clock,
			 * do so.
			 */
			if ((++ci->ci_schedstate.spc_schedticks & 0x3f) == 0 &&
			    schedhz != 0)
				schedclock(ci->ci_curproc);
		}
		break;

	case ALPHA_INTR_ERROR:	/* Machine Check or Correctable Error */
		atomic_add_ulong(&ci->ci_intrdepth, 1);
		a0 = alpha_pal_rdmces();
		if (platform.mcheck_handler)
			(*platform.mcheck_handler)(a0, framep, a1, a2);
		else
			machine_check(a0, framep, a1, a2);
		atomic_sub_ulong(&ci->ci_intrdepth, 1);
		break;

	case ALPHA_INTR_DEVICE:	/* I/O device interrupt */
	    {
		struct scbvec *scb;

		KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE);

		atomic_add_ulong(&ci->ci_intrdepth, 1);
		atomic_add_int(&uvmexp.intrs, 1);
		scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)];
		(*scb->scb_func)(scb->scb_arg, a1);
		atomic_sub_ulong(&ci->ci_intrdepth, 1);
		break;
	    }

	case ALPHA_INTR_PERF:	/* performance counter interrupt */
		printf("WARNING: received performance counter interrupt!\n");
		break;

	case ALPHA_INTR_PASSIVE:
#if 0
		printf("WARNING: received passive release interrupt vec "
		    "0x%lx\n", a1);
#endif
		break;

	default:
		printf("unexpected interrupt: type 0x%lx vec 0x%lx "
		    "a2 0x%lx"
#if defined(MULTIPROCESSOR)
		    " cpu %lu"
#endif
		    "\n", a0, a1, a2
#if defined(MULTIPROCESSOR)
		    , ci->ci_cpuid
#endif
		    );
		panic("interrupt");
		/* NOTREACHED */
	}
}
Пример #19
0
/*
 * Fetch the current mtrr settings from the current CPU (assumed to all
 * be in sync in the SMP case).  Note that if we are here, we assume
 * that MTRRs are enabled, and we may or may not have fixed MTRRs.
 */
void
mrfetch(struct mem_range_softc *sc)
{
	struct mem_range_desc	*mrd;
	u_int64_t		 msrv;
	int			 i, j, msr, mrt;

	mrd = sc->mr_desc;

	/* We should never be fetching MTRRs from an AP */
	KASSERT(CPU_IS_PRIMARY(curcpu()));
	
	/* Get fixed-range MTRRs, if the CPU supports them */
	if (sc->mr_cap & MR_FIXMTRR) {
		msr = MSR_MTRRfix64K_00000;
		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
			msrv = rdmsr(msr);
			for (j = 0; j < 8; j++, mrd++) {
				mrt = mtrr2mrt(msrv & 0xff);
				if (mrt == MDF_UNKNOWN)
					mrt = MDF_UNCACHEABLE;
				mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
					mrt | MDF_ACTIVE;
				if (mrd->mr_owner[0] == 0)
					strlcpy(mrd->mr_owner, mem_owner_bios,
					    sizeof(mrd->mr_owner));
				msrv = msrv >> 8;
			}
		}

		msr = MSR_MTRRfix16K_80000;
		for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
			msrv = rdmsr(msr);
			for (j = 0; j < 8; j++, mrd++) {
				mrt = mtrr2mrt(msrv & 0xff);
				if (mrt == MDF_UNKNOWN)
					mrt = MDF_UNCACHEABLE;
				mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
					mrt | MDF_ACTIVE;
				if (mrd->mr_owner[0] == 0)
					strlcpy(mrd->mr_owner, mem_owner_bios,
					    sizeof(mrd->mr_owner));
				msrv = msrv >> 8;
			}
		}

		msr = MSR_MTRRfix4K_C0000;
		for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
			msrv = rdmsr(msr);
			for (j = 0; j < 8; j++, mrd++) {
				mrt = mtrr2mrt(msrv & 0xff);
				if (mrt == MDF_UNKNOWN)
					mrt = MDF_UNCACHEABLE;
				mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
					mrt | MDF_ACTIVE;
				if (mrd->mr_owner[0] == 0)
					strlcpy(mrd->mr_owner, mem_owner_bios,
					    sizeof(mrd->mr_owner));
				msrv = msrv >> 8;
			}
		}
	}
Пример #20
0
void
interrupt(unsigned long a0, unsigned long a1, unsigned long a2,
    struct trapframe *framep)
{
	struct proc *p;
	struct cpu_info *ci = curcpu();
	extern int schedhz;

	switch (a0) {
	case ALPHA_INTR_XPROC:	/* interprocessor interrupt */
#if defined(MULTIPROCESSOR)
	    {
		u_long pending_ipis, bit;

#if 0
		printf("CPU %lu got IPI\n", cpu_id);
#endif

#ifdef DIAGNOSTIC
		if (ci->ci_dev == NULL) {
			/* XXX panic? */
			printf("WARNING: no device for ID %lu\n", ci->ci_cpuid);
			return;
		}
#endif

		pending_ipis = atomic_loadlatch_ulong(&ci->ci_ipis, 0);
		for (bit = 0; bit < ALPHA_NIPIS; bit++)
			if (pending_ipis & (1UL << bit))
				(*ipifuncs[bit])();

		/*
		 * Handle inter-console messages if we're the primary
		 * CPU.
		 */
		if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id &&
		    hwrpb->rpb_txrdy != 0)
			cpu_iccb_receive();
	    }
#else
		printf("WARNING: received interprocessor interrupt!\n");
#endif /* MULTIPROCESSOR */
		break;
		
	case ALPHA_INTR_CLOCK:	/* clock interrupt */
#if defined(MULTIPROCESSOR)
		/* XXX XXX XXX */
		if (CPU_IS_PRIMARY(ci) == 0)
			return;
#endif
		uvmexp.intrs++;
		clk_count.ec_count++;
		if (platform.clockintr) {
			/*
			 * Call hardclock().  This will also call
			 * statclock(). On the primary CPU, it
			 * will also deal with time-of-day stuff.
			 */
			(*platform.clockintr)((struct clockframe *)framep);

			/*
			 * If it's time to call the scheduler clock,
			 * do so.
			 */
			if ((++schedclk2 & 0x3f) == 0 &&
			    (p = ci->ci_curproc) != NULL && schedhz != 0)
				schedclock(p);
		}
		break;

	case ALPHA_INTR_ERROR:	/* Machine Check or Correctable Error */
		a0 = alpha_pal_rdmces();
		if (platform.mcheck_handler)
			(*platform.mcheck_handler)(a0, framep, a1, a2);
		else
			machine_check(a0, framep, a1, a2);
		break;

	case ALPHA_INTR_DEVICE:	/* I/O device interrupt */
	    {
		struct scbvec *scb;

		KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE);

#if defined(MULTIPROCESSOR)
		/* XXX XXX XXX */
		if (CPU_IS_PRIMARY(ci) == 0)
			return;
#endif
		uvmexp.intrs++;

		scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)];
		(*scb->scb_func)(scb->scb_arg, a1);
		break;
	    }

	case ALPHA_INTR_PERF:	/* performance counter interrupt */
		printf("WARNING: received performance counter interrupt!\n");
		break;

	case ALPHA_INTR_PASSIVE:
#if 0
		printf("WARNING: received passive release interrupt vec "
		    "0x%lx\n", a1);
#endif
		break;

	default:
		printf("unexpected interrupt: type 0x%lx vec 0x%lx "
		    "a2 0x%lx"
#if defined(MULTIPROCESSOR)
		    " cpu %lu"
#endif
		    "\n", a0, a1, a2
#if defined(MULTIPROCESSOR)
		    , ci->ci_cpuid
#endif
		    );
		panic("interrupt");
		/* NOTREACHED */
	}
}
Пример #21
0
void
cpu_hatch(struct cpu_info *ci)
{
	struct pmap_tlb_info * const ti = ci->ci_tlb_info;

	/*
	 * Invalidate all the TLB enties (even wired ones) and then reserve
	 * space for the wired TLB entries.
	 */
	mips3_cp0_wired_write(0);
	tlb_invalidate_all();
	mips3_cp0_wired_write(ti->ti_wired);

	/*
	 * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
	 */
	cpu_hwrena_setup();

	/*
	 * If we are using register zero relative addressing to access cpu_info
	 * in the exception vectors, enter that mapping into TLB now.
	 */
	if (ci->ci_tlb_slot >= 0) {
		const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
		    | mips3_paddr_to_tlbpfn((vaddr_t)ci);
		const struct tlbmask tlbmask = {
			.tlb_hi = -PAGE_SIZE | KERNEL_PID,
#if (PGSHIFT & 1)
			.tlb_lo0 = tlb_lo,
			.tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
#else
			.tlb_lo0 = 0,
			.tlb_lo1 = tlb_lo,
#endif
			.tlb_mask = -1,
		};

		tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
		tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
	}

	/*
	 * Flush the icache just be sure.
	 */
	mips_icache_sync_all();

	/*
	 * Let this CPU do its own initialization (for things that have to be
	 * done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_init)(ci);

	// Show this CPU as present.
	atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);

	/*
	 * Announce we are hatched
	 */
	kcpuset_atomic_set(cpus_hatched, cpu_index(ci));

	/*
	 * Now wait to be set free!
	 */
	while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
		/* spin, spin, spin */
	}

	/*
	 * initialize the MIPS count/compare clock
	 */
	mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
	KASSERT(ci->ci_cycles_per_hz != 0);
	ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
	mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
	ci->ci_data.cpu_cc_skew = 0;

	/*
	 * Let this CPU do its own post-running initialization
	 * (for things that have to be done on the local CPU).
	 */
	(*mips_locoresw.lsw_cpu_run)(ci);

	/*
	 * Now turn on interrupts (and verify they are on).
	 */
	spl0();
	KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
	KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);

	kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
	kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));

	/*
	 * And do a tail call to idle_loop
	 */
	idle_loop(NULL);
}

void
cpu_boot_secondary_processors(void)
{
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;
	for (CPU_INFO_FOREACH(cii, ci)) {
		if (CPU_IS_PRIMARY(ci))
			continue;
		KASSERT(ci->ci_data.cpu_idlelwp);

		/*
		 * Skip this CPU if it didn't sucessfully hatch.
		 */
		if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
			continue;

		ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
		atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
		kcpuset_set(cpus_running, cpu_index(ci));
		// Spin until the cpu calls idle_loop
		for (u_int i = 0; i < 100; i++) {
			if (kcpuset_isset(cpus_running, cpu_index(ci)))
				break;
			delay(1000);
		}
	}
}