Example #1
0
File: pmCPU.c Project: CptFrazz/xnu
/*
 * Called when the CPU is idle.  It calls into the power management kext
 * to determine the best way to idle the CPU.
 */
void
machine_idle(void)
{
    cpu_data_t		*my_cpu		= current_cpu_datap();

    if (my_cpu == NULL)
	goto out;

    my_cpu->lcpu.state = LCPU_IDLE;
    DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
    MARK_CPU_IDLE(cpu_number());

    if (pmInitDone) {
	/*
	 * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
	 * were called prior to the CPU PM kext being registered.  We do
	 * this here since we know at this point the values will be first
	 * used since idle is where the decisions using these values is made.
	 */
	if (earlyMaxBusDelay != DELAY_UNSET)
	    ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));

	if (earlyMaxIntDelay != DELAY_UNSET)
	    ml_set_maxintdelay(earlyMaxIntDelay);
    }

    if (pmInitDone
	&& pmDispatch != NULL
	&& pmDispatch->MachineIdle != NULL)
	(*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
    else {
	/*
	 * If no power management, re-enable interrupts and halt.
	 * This will keep the CPU from spinning through the scheduler
	 * and will allow at least some minimal power savings (but it
	 * cause problems in some MP configurations w.r.t. the APIC
	 * stopping during a GV3 transition).
	 */
	pal_hlt();

	/* Once woken, re-disable interrupts. */
	pal_cli();
    }

    /*
     * Mark the CPU as running again.
     */
    MARK_CPU_ACTIVE(cpu_number());
    DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
    my_cpu->lcpu.state = LCPU_RUN;

    /*
     * Re-enable interrupts.
     */
  out:
    pal_sti();
}
Example #2
0
File: pmCPU.c Project: SbIm/xnu-env
/*
 * Called when the CPU is idle.  It calls into the power management kext
 * to determine the best way to idle the CPU.
 */
void
machine_idle(void)
{
    cpu_data_t		*my_cpu		= current_cpu_datap();

    if (my_cpu == NULL)
	goto out;

    my_cpu->lcpu.state = LCPU_IDLE;
    DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
    MARK_CPU_IDLE(cpu_number());

    if (pmInitDone
	&& pmDispatch != NULL
	&& pmDispatch->MachineIdle != NULL)
	(*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
    else {
	/*
	 * If no power management, re-enable interrupts and halt.
	 * This will keep the CPU from spinning through the scheduler
	 * and will allow at least some minimal power savings (but it
	 * cause problems in some MP configurations w.r.t. the APIC
	 * stopping during a GV3 transition).
	 */
	noasm ("sti; hlt");
    }

    /*
     * Mark the CPU as running again.
     */
    MARK_CPU_ACTIVE(cpu_number());
    DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
    my_cpu->lcpu.state = LCPU_RUN;

    /*
     * Re-enable interrupts.
     */
  out:
    noasm("sti");
}
Example #3
0
/*
 * Called when the CPU is idle.  It calls into the power management kext
 * to determine the best way to idle the CPU.
 */
void
machine_idle(void)
{
	cpu_data_t		*my_cpu		= current_cpu_datap();
	__unused uint32_t	cnum = my_cpu->cpu_number;
	uint64_t		ctime, rtime, itime;
#if CST_DEMOTION_DEBUG
	processor_t		cproc = my_cpu->cpu_processor;
	uint64_t		cwakeups = PROCESSOR_DATA(cproc, wakeups_issued_total);
#endif /* CST_DEMOTION_DEBUG */
	uint64_t esdeadline, ehdeadline;
	boolean_t do_process_pending_timers = FALSE;

	ctime = mach_absolute_time();
	esdeadline = my_cpu->rtclock_timer.queue.earliest_soft_deadline;
	ehdeadline = my_cpu->rtclock_timer.deadline;
/* Determine if pending timers exist */    
	if ((ctime >= esdeadline) && (ctime < ehdeadline) &&
	    ((ehdeadline - ctime) < idle_entry_timer_processing_hdeadline_threshold)) {
		idle_pending_timers_processed++;
		do_process_pending_timers = TRUE;
		goto machine_idle_exit;
	} else {
		TCOAL_DEBUG(0xCCCC0000, ctime, my_cpu->rtclock_timer.queue.earliest_soft_deadline, my_cpu->rtclock_timer.deadline, idle_pending_timers_processed, 0);
	}
    
	my_cpu->lcpu.state = LCPU_IDLE;
	DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
	MARK_CPU_IDLE(cnum);

	rtime = ctime - my_cpu->cpu_ixtime;

	my_cpu->cpu_rtime_total += rtime;
	machine_classify_interval(rtime, &my_cpu->cpu_rtimes[0], &cpu_rtime_bins[0], CPU_RTIME_BINS);
#if CST_DEMOTION_DEBUG
	uint32_t cl = 0, ch = 0;
	uint64_t c3res, c6res, c7res;
	rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch);
	c3res = ((uint64_t)ch << 32) | cl;
	rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY, &cl, &ch);
	c6res = ((uint64_t)ch << 32) | cl;
	rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch);
	c7res = ((uint64_t)ch << 32) | cl;
#endif

	if (pmInitDone) {
		/*
		 * Handle case where ml_set_maxbusdelay() or ml_set_maxintdelay()
		 * were called prior to the CPU PM kext being registered.  We do
		 * this here since we know at this point the values will be first
		 * used since idle is where the decisions using these values is made.
		 */
		if (earlyMaxBusDelay != DELAY_UNSET)
			ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF));
		if (earlyMaxIntDelay != DELAY_UNSET)
			ml_set_maxintdelay(earlyMaxIntDelay);
	}

	if (pmInitDone
	    && pmDispatch != NULL
	    && pmDispatch->MachineIdle != NULL)
		(*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL);
	else {
		/*
		 * If no power management, re-enable interrupts and halt.
		 * This will keep the CPU from spinning through the scheduler
		 * and will allow at least some minimal power savings (but it
		 * cause problems in some MP configurations w.r.t. the APIC
		 * stopping during a GV3 transition).
		 */
		pal_hlt();
		/* Once woken, re-disable interrupts. */
		pal_cli();
	}

	/*
	 * Mark the CPU as running again.
	 */
	MARK_CPU_ACTIVE(cnum);
	DBGLOG(cpu_handle, cnum, MP_UNIDLE);
	my_cpu->lcpu.state = LCPU_RUN;
	uint64_t ixtime = my_cpu->cpu_ixtime = mach_absolute_time();
	itime = ixtime - ctime;
	my_cpu->cpu_idle_exits++;
        my_cpu->cpu_itime_total += itime;
    	machine_classify_interval(itime, &my_cpu->cpu_itimes[0], &cpu_itime_bins[0], CPU_ITIME_BINS);
#if CST_DEMOTION_DEBUG
	cl = ch = 0;
	rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch);
	c3res = (((uint64_t)ch << 32) | cl) - c3res;
	rdmsr_carefully(MSR_IA32_CORE_C6_RESIDENCY, &cl, &ch);
	c6res = (((uint64_t)ch << 32) | cl) - c6res;
	rdmsr_carefully(MSR_IA32_CORE_C7_RESIDENCY, &cl, &ch);
	c7res = (((uint64_t)ch << 32) | cl) - c7res;

	uint64_t ndelta = itime - tmrCvt(c3res + c6res + c7res, tscFCvtt2n);
	KERNEL_DEBUG_CONSTANT(0xcead0000, ndelta, itime, c7res, c6res, c3res);
	if ((itime > 1000000) && (ndelta > 250000))
		KERNEL_DEBUG_CONSTANT(0xceae0000, ndelta, itime, c7res, c6res, c3res);
#endif

	machine_idle_exit:
	/*
	 * Re-enable interrupts.
	 */

	pal_sti();

	if (do_process_pending_timers) {
		TCOAL_DEBUG(0xBBBB0000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, idle_pending_timers_processed, 0);

		/* Adjust to reflect that this isn't truly a package idle exit */
		__sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1);
		lapic_timer_swi(); /* Trigger software timer interrupt */
		__sync_fetch_and_add(&my_cpu->lcpu.package->num_idle, 1);

		TCOAL_DEBUG(0xBBBB0000 | DBG_FUNC_END, ctime, esdeadline, idle_pending_timers_processed, 0, 0);
	}
#if CST_DEMOTION_DEBUG
	uint64_t nwakeups = PROCESSOR_DATA(cproc, wakeups_issued_total);

	if ((nwakeups == cwakeups) && (topoParms.nLThreadsPerPackage == my_cpu->lcpu.package->num_idle)) {
		KERNEL_DEBUG_CONSTANT(0xceaa0000, cwakeups, 0, 0, 0, 0);
	}
#endif    
}
Example #4
0
kern_return_t
pal_efi_call_in_32bit_mode(uint32_t func,
                           struct pal_efi_registers *efi_reg,
                           void *stack_contents,
                           size_t stack_contents_size, /* 16-byte multiple */
                           uint32_t *efi_status)
{
    DBG("pal_efi_call_in_32bit_mode(0x%08x, %p, %p, %lu, %p)\n",
	func, efi_reg, stack_contents, stack_contents_size, efi_status);

    if (func == 0) {
        return KERN_INVALID_ADDRESS;
    }

    if ((efi_reg == NULL)
        || (stack_contents == NULL)
        || (stack_contents_size % 16 != 0)) {
        return KERN_INVALID_ARGUMENT;
    }

    if (!gPEEFISystemTable || !gPEEFIRuntimeServices) {
        return KERN_NOT_SUPPORTED;
    }

    DBG("pal_efi_call_in_32bit_mode() efi_reg:\n");
    DBG("  rcx: 0x%016llx\n", efi_reg->rcx);
    DBG("  rdx: 0x%016llx\n", efi_reg->rdx);
    DBG("   r8: 0x%016llx\n", efi_reg->r8);
    DBG("   r9: 0x%016llx\n", efi_reg->r9);
    DBG("  rax: 0x%016llx\n", efi_reg->rax);

    DBG("pal_efi_call_in_32bit_mode() stack:\n");
#if PAL_DEBUG
    size_t i;
    for (i = 0; i < stack_contents_size; i += sizeof(uint32_t)) {
	uint32_t *p = (uint32_t *) ((uintptr_t)stack_contents + i);
	DBG("  %p: 0x%08x\n", p, *p);
    } 
#endif

#ifdef __x86_64__
    /*
     * Ensure no interruptions.
     * Taking a spinlock for serialization is technically unnecessary
     * because the EFIRuntime kext should serialize.
     */
    boolean_t istate = ml_set_interrupts_enabled(FALSE);
    simple_lock(&pal_efi_lock);

    /*
     * Switch to special page tables with the entire high kernel space
     * double-mapped into the bottom 4GB.
     *
     * NB: We assume that all data passed exchanged with RuntimeServices is
     * located in the 4GB of KVA based at VM_MIN_ADDRESS. In particular, kexts
     * loaded the basement (below VM_MIN_ADDRESS) cannot pass static data.
     * Kernel stack and heap space is OK.
     */
    MARK_CPU_IDLE(cpu_number());
    pal_efi_saved_cr3 = get_cr3_raw();
    pal_efi_saved_cr0 = get_cr0();
    IDPML4[KERNEL_PML4_INDEX] = IdlePML4[KERNEL_PML4_INDEX];
    IDPML4[0]		      = IdlePML4[KERNEL_PML4_INDEX];
    clear_ts();
    set_cr3_raw((uint64_t) ID_MAP_VTOP(IDPML4));
    
    swapgs();			/* Save kernel's GS base */

    /* Set segment state ready for compatibility mode */
    set_gs(NULL_SEG);
    set_fs(NULL_SEG);
    set_es(KERNEL_DS);
    set_ds(KERNEL_DS);
    set_ss(KERNEL_DS);

    _pal_efi_call_in_32bit_mode_asm(func,
                                    efi_reg,
                                    stack_contents,
                                    stack_contents_size);
    
    /* Restore NULL segment state */
    set_ss(NULL_SEG);
    set_es(NULL_SEG);
    set_ds(NULL_SEG);

    swapgs();			/* Restore kernel's GS base */

    /* Restore the 64-bit user GS base we just destroyed */
    wrmsr64(MSR_IA32_KERNEL_GS_BASE,
	    current_cpu_datap()->cpu_uber.cu_user_gs_base);

    /* End of mapping games */
    set_cr3_raw(pal_efi_saved_cr3);
    set_cr0(pal_efi_saved_cr0);
    MARK_CPU_ACTIVE(cpu_number());
    
    simple_unlock(&pal_efi_lock);
    ml_set_interrupts_enabled(istate);
#else
    _pal_efi_call_in_32bit_mode_asm(func,
                                    efi_reg,
                                    stack_contents,
                                    stack_contents_size);
#endif

    *efi_status = (uint32_t)efi_reg->rax;
    DBG("pal_efi_call_in_32bit_mode() efi_status: 0x%x\n", *efi_status);

    return KERN_SUCCESS;
}