Beispiel #1
0
Datei: cpu.c Projekt: argp/xnu
cpu_idle(void)
{
	cpu_data_t     *cpu_data_ptr = getCpuDatap();
	uint64_t	new_idle_timeout_ticks = 0x0ULL, lastPop;

	if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled))
		Idle_load_context();
	if (!SetIdlePop())
		Idle_load_context();
	lastPop = cpu_data_ptr->rtcPop;

	pmap_switch_user_ttb(kernel_pmap);
	cpu_data_ptr->cpu_active_thread = current_thread();
	if (cpu_data_ptr->cpu_user_debug)
		arm_debug_set(NULL);
	cpu_data_ptr->cpu_user_debug = NULL;

	if (cpu_data_ptr->cpu_idle_notify)
		((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks);

	if (cpu_data_ptr->idle_timer_notify != 0) {
		if (new_idle_timeout_ticks == 0x0ULL) {
			/* turn off the idle timer */
			cpu_data_ptr->idle_timer_deadline = 0x0ULL;
		} else {
			/* set the new idle timeout */
			clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
		}
		timer_resync_deadlines();
		if (cpu_data_ptr->rtcPop != lastPop)
			SetIdlePop();
	}

#if KPC
	kpc_idle();
#endif

	platform_cache_idle_enter();
	cpu_idle_wfi((boolean_t) wfi_fast);
	platform_cache_idle_exit();

	ClearIdlePop(TRUE);
	cpu_idle_exit(FALSE);
}
static void
acpi_cpu_mwait_ipi_check_wakeup(void *arg)
{
	volatile uint32_t *mcpu_mwait = (volatile uint32_t *)arg;

	ASSERT(arg != NULL);
	if (*mcpu_mwait != MWAIT_WAKEUP_IPI) {
		/*
		 * CPU has been awakened, notify CPU idle notification system.
		 */
		cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
	} else {
		/*
		 * Toggle interrupt flag to detect pending interrupts.
		 * If interrupt happened, do_interrupt() will notify CPU idle
		 * notification framework so no need to call cpu_idle_exit()
		 * here.
		 */
		sti();
		SMT_PAUSE();
		cli();
	}
}
/*
 * enter deep c-state handler
 */
static void
acpi_cpu_cstate(cpu_acpi_cstate_t *cstate)
{
	volatile uint32_t	*mcpu_mwait = CPU->cpu_m.mcpu_mwait;
	cpu_t			*cpup = CPU;
	processorid_t		cpu_sid = cpup->cpu_seqid;
	cpupart_t		*cp = cpup->cpu_part;
	hrtime_t		lapic_expire;
	uint8_t			type = cstate->cs_addrspace_id;
	uint32_t		cs_type = cstate->cs_type;
	int			hset_update = 1;
	boolean_t		using_timer;
	cpu_idle_check_wakeup_t check_func = &acpi_cpu_check_wakeup;

	/*
	 * Set our mcpu_mwait here, so we can tell if anyone tries to
	 * wake us between now and when we call mwait.  No other cpu will
	 * attempt to set our mcpu_mwait until we add ourself to the haltset.
	 */
	if (mcpu_mwait) {
		if (type == ACPI_ADR_SPACE_SYSTEM_IO) {
			*mcpu_mwait = MWAIT_WAKEUP_IPI;
			check_func = &acpi_cpu_mwait_ipi_check_wakeup;
		} else {
			*mcpu_mwait = MWAIT_HALTED;
			check_func = &acpi_cpu_mwait_check_wakeup;
		}
	}

	/*
	 * If this CPU is online, and there are multiple CPUs
	 * in the system, then we should note our halting
	 * by adding ourselves to the partition's halted CPU
	 * bitmap. This allows other CPUs to find/awaken us when
	 * work becomes available.
	 */
	if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
		hset_update = 0;

	/*
	 * Add ourselves to the partition's halted CPUs bitmask
	 * and set our HALTED flag, if necessary.
	 *
	 * When a thread becomes runnable, it is placed on the queue
	 * and then the halted cpuset is checked to determine who
	 * (if anyone) should be awakened. We therefore need to first
	 * add ourselves to the halted cpuset, and and then check if there
	 * is any work available.
	 *
	 * Note that memory barriers after updating the HALTED flag
	 * are not necessary since an atomic operation (updating the bitmap)
	 * immediately follows. On x86 the atomic operation acts as a
	 * memory barrier for the update of cpu_disp_flags.
	 */
	if (hset_update) {
		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
		bitset_atomic_add(&cp->cp_haltset, cpu_sid);
	}

	/*
	 * Check to make sure there's really nothing to do.
	 * Work destined for this CPU may become available after
	 * this check. We'll be notified through the clearing of our
	 * bit in the halted CPU bitmask, and a write to our mcpu_mwait.
	 *
	 * disp_anywork() checks disp_nrunnable, so we do not have to later.
	 */
	if (disp_anywork()) {
		if (hset_update) {
			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
		}
		return;
	}

	/*
	 * We're on our way to being halted.
	 *
	 * The local APIC timer can stop in ACPI C2 and deeper c-states.
	 * Try to program the HPET hardware to substitute for this CPU's
	 * LAPIC timer.
	 * cstate_use_timer() could disable the LAPIC Timer.  Make sure
	 * to start the LAPIC Timer again before leaving this function.
	 *
	 * Disable interrupts here so we will awaken immediately after halting
	 * if someone tries to poke us between now and the time we actually
	 * halt.
	 */
	cli();
	using_timer = cstate_use_timer(&lapic_expire, CSTATE_USING_HPET);

	/*
	 * We check for the presence of our bit after disabling interrupts.
	 * If it's cleared, we'll return. If the bit is cleared after
	 * we check then the cstate_wakeup() will pop us out of the halted
	 * state.
	 *
	 * This means that the ordering of the cstate_wakeup() and the clearing
	 * of the bit by cpu_wakeup is important.
	 * cpu_wakeup() must clear our mc_haltset bit, and then call
	 * cstate_wakeup().
	 * acpi_cpu_cstate() must disable interrupts, then check for the bit.
	 */
	if (hset_update && bitset_in_set(&cp->cp_haltset, cpu_sid) == 0) {
		(void) cstate_use_timer(&lapic_expire,
		    CSTATE_USING_LAT);
		sti();
		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
		return;
	}

	/*
	 * The check for anything locally runnable is here for performance
	 * and isn't needed for correctness. disp_nrunnable ought to be
	 * in our cache still, so it's inexpensive to check, and if there
	 * is anything runnable we won't have to wait for the poke.
	 */
	if (cpup->cpu_disp->disp_nrunnable != 0) {
		(void) cstate_use_timer(&lapic_expire,
		    CSTATE_USING_LAT);
		sti();
		if (hset_update) {
			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
		}
		return;
	}

	if (using_timer == B_FALSE) {

		(void) cstate_use_timer(&lapic_expire,
		    CSTATE_USING_LAT);
		sti();

		/*
		 * We are currently unable to program the HPET to act as this
		 * CPU's proxy LAPIC timer.  This CPU cannot enter C2 or deeper
		 * because no timer is set to wake it up while its LAPIC timer
		 * stalls in deep C-States.
		 * Enter C1 instead.
		 *
		 * cstate_wake_cpu() will wake this CPU with an IPI which
		 * works with MWAIT.
		 */
		i86_monitor(mcpu_mwait, 0, 0);
		if ((*mcpu_mwait & ~MWAIT_WAKEUP_IPI) == MWAIT_HALTED) {
			if (cpu_idle_enter(IDLE_STATE_C1, 0,
			    check_func, (void *)mcpu_mwait) == 0) {
				if ((*mcpu_mwait & ~MWAIT_WAKEUP_IPI) ==
				    MWAIT_HALTED) {
					i86_mwait(0, 0);
				}
				cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
			}
		}

		/*
		 * We're no longer halted
		 */
		if (hset_update) {
			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
			bitset_atomic_del(&cp->cp_haltset, cpu_sid);
		}
		return;
	}

	if (type == ACPI_ADR_SPACE_FIXED_HARDWARE) {
		/*
		 * We're on our way to being halted.
		 * To avoid a lost wakeup, arm the monitor before checking
		 * if another cpu wrote to mcpu_mwait to wake us up.
		 */
		i86_monitor(mcpu_mwait, 0, 0);
		if (*mcpu_mwait == MWAIT_HALTED) {
			if (cpu_idle_enter((uint_t)cs_type, 0,
			    check_func, (void *)mcpu_mwait) == 0) {
				if (*mcpu_mwait == MWAIT_HALTED) {
					i86_mwait(cstate->cs_address, 1);
				}
				cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
			}
		}
	} else if (type == ACPI_ADR_SPACE_SYSTEM_IO) {
		uint32_t value;
		ACPI_TABLE_FADT *gbl_FADT;

		if (*mcpu_mwait == MWAIT_WAKEUP_IPI) {
			if (cpu_idle_enter((uint_t)cs_type, 0,
			    check_func, (void *)mcpu_mwait) == 0) {
				if (*mcpu_mwait == MWAIT_WAKEUP_IPI) {
					(void) cpu_acpi_read_port(
					    cstate->cs_address, &value, 8);
					acpica_get_global_FADT(&gbl_FADT);
					(void) cpu_acpi_read_port(
					    gbl_FADT->XPmTimerBlock.Address,
					    &value, 32);
				}
				cpu_idle_exit(CPU_IDLE_CB_FLAG_IDLE);
			}
		}
	}

	/*
	 * The LAPIC timer may have stopped in deep c-state.
	 * Reprogram this CPU's LAPIC here before enabling interrupts.
	 */
	(void) cstate_use_timer(&lapic_expire, CSTATE_USING_LAT);
	sti();

	/*
	 * We're no longer halted
	 */
	if (hset_update) {
		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
		bitset_atomic_del(&cp->cp_haltset, cpu_sid);
	}
}
Beispiel #4
0
/*
 * Interrupt service routine, called with interrupts disabled.
 */
void
apix_do_interrupt(struct regs *rp, trap_trace_rec_t *ttp)
{
	struct cpu *cpu = CPU;
	int vector = rp->r_trapno, newipl, oldipl = cpu->cpu_pri, ret;
	apix_vector_t *vecp = NULL;

#ifdef TRAPTRACE
	ttp->ttr_marker = TT_INTERRUPT;
	ttp->ttr_cpuid = cpu->cpu_id;
	ttp->ttr_ipl = 0xff;
	ttp->ttr_pri = (uchar_t)oldipl;
	ttp->ttr_spl = cpu->cpu_base_spl;
	ttp->ttr_vector = 0xff;
#endif	/* TRAPTRACE */

	cpu_idle_exit(CPU_IDLE_CB_FLAG_INTR);

	++*(uint16_t *)&cpu->cpu_m.mcpu_istamp;

	/*
	 * If it's a softint go do it now.
	 */
	if (rp->r_trapno == T_SOFTINT) {
		/*
		 * It might be the case that when an interrupt is triggered,
		 * the spl is raised to high by splhigh(). Later when do_splx()
		 * is called to restore the spl, both hardware and software
		 * interrupt pending flags are check and an SOFTINT is faked
		 * accordingly.
		 */
		(void) apix_do_pending_hilevel(cpu, rp);
		(void) apix_do_pending_hardint(cpu, rp);
		(void) apix_do_softint(rp);
		ASSERT(!interrupts_enabled());
#ifdef TRAPTRACE
	ttp->ttr_vector = T_SOFTINT;
#endif
		return;
	}

	/*
	 * Send EOI to local APIC
	 */
	newipl = (*setlvl)(oldipl, (int *)&rp->r_trapno);
#ifdef TRAPTRACE
	ttp->ttr_ipl = (uchar_t)newipl;
#endif	/* TRAPTRACE */

	/*
	 * Bail if it is a spurious interrupt
	 */
	if (newipl == -1)
		return;

	vector = rp->r_trapno;
	vecp = xv_vector(cpu->cpu_id, vector);
#ifdef TRAPTRACE
	ttp->ttr_vector = (short)vector;
#endif	/* TRAPTRACE */

	/*
	 * Direct dispatch for IPI, MSI, MSI-X
	 */
	if (vecp && vecp->v_type != APIX_TYPE_FIXED &&
	    newipl > MAX(oldipl, cpu->cpu_base_spl)) {
		caddr_t newsp;

		if (newipl > LOCK_LEVEL) {
			if (apix_hilevel_intr_prolog(cpu, newipl, oldipl, rp)
			    == 0) {
				newsp = cpu->cpu_intr_stack;
				switch_sp_and_call(newsp, apix_dispatch_hilevel,
				    vector, 0);
			} else {
				apix_dispatch_hilevel(vector, 0);
			}
			(void) apix_hilevel_intr_epilog(cpu, oldipl);
		} else {
			newsp = apix_intr_thread_prolog(cpu, newipl,
			    (caddr_t)rp);
			switch_sp_and_call(newsp, apix_dispatch_lowlevel,
			    vector, oldipl);
		}
	} else {
		/* Add to per-pil pending queue */
		apix_add_pending_hardint(vector);
		if (newipl <= MAX(oldipl, cpu->cpu_base_spl) ||
		    !apixs[cpu->cpu_id]->x_intr_pending)
			return;
	}

	if (apix_do_pending_hilevel(cpu, rp) < 0)
		return;

	do {
		ret = apix_do_pending_hardint(cpu, rp);

		/*
		 * Deliver any pending soft interrupts.
		 */
		(void) apix_do_softint(rp);
	} while (!ret && LOWLEVEL_PENDING(cpu));
}
Beispiel #5
0
/*ARGSUSED*/
void
do_interrupt(struct regs *rp, trap_trace_rec_t *ttp)
{
	struct cpu *cpu = CPU;
	int newipl, oldipl = cpu->cpu_pri;
	uint_t vector;
	caddr_t newsp;

#ifdef TRAPTRACE
	ttp->ttr_marker = TT_INTERRUPT;
	ttp->ttr_ipl = 0xff;
	ttp->ttr_pri = oldipl;
	ttp->ttr_spl = cpu->cpu_base_spl;
	ttp->ttr_vector = 0xff;
#endif	/* TRAPTRACE */

	cpu_idle_exit(CPU_IDLE_CB_FLAG_INTR);

	++*(uint16_t *)&cpu->cpu_m.mcpu_istamp;

	/*
	 * If it's a softint go do it now.
	 */
	if (rp->r_trapno == T_SOFTINT) {
		dosoftint(rp);
		ASSERT(!interrupts_enabled());
		return;
	}

	/*
	 * Raise the interrupt priority.
	 */
	newipl = (*setlvl)(oldipl, (int *)&rp->r_trapno);
#ifdef TRAPTRACE
	ttp->ttr_ipl = newipl;
#endif	/* TRAPTRACE */

	/*
	 * Bail if it is a spurious interrupt
	 */
	if (newipl == -1)
		return;
	cpu->cpu_pri = newipl;
	vector = rp->r_trapno;
#ifdef TRAPTRACE
	ttp->ttr_vector = vector;
#endif	/* TRAPTRACE */
	if (newipl > LOCK_LEVEL) {
		/*
		 * High priority interrupts run on this cpu's interrupt stack.
		 */
		if (hilevel_intr_prolog(cpu, newipl, oldipl, rp) == 0) {
			newsp = cpu->cpu_intr_stack;
			switch_sp_and_call(newsp, dispatch_hilevel, vector, 0);
		} else { /* already on the interrupt stack */
			dispatch_hilevel(vector, 0);
		}
		(void) hilevel_intr_epilog(cpu, newipl, oldipl, vector);
	} else {
		/*
		 * Run this interrupt in a separate thread.
		 */
		newsp = intr_thread_prolog(cpu, (caddr_t)rp, newipl);
		switch_sp_and_call(newsp, dispatch_hardint, vector, oldipl);
	}

#if !defined(__xpv)
	/*
	 * Deliver any pending soft interrupts.
	 */
	if (cpu->cpu_softinfo.st_pending)
		dosoftint(rp);
#endif	/* !__xpv */
}