Esempio n. 1
0
void mwait_idle_with_hints(unsigned int eax, unsigned int ecx)
{
    unsigned int cpu = smp_processor_id();
    s_time_t expires = per_cpu(timer_deadline, cpu);

    if ( boot_cpu_has(X86_FEATURE_CLFLUSH_MONITOR) )
    {
        mb();
        clflush((void *)&mwait_wakeup(cpu));
        mb();
    }

    __monitor((void *)&mwait_wakeup(cpu), 0, 0);
    smp_mb();

    /*
     * Timer deadline passing is the event on which we will be woken via
     * cpuidle_mwait_wakeup. So check it now that the location is armed.
     */
    if ( (expires > NOW() || expires == 0) && !softirq_pending(cpu) )
    {
        cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
        __mwait(eax, ecx);
        cpumask_clear_cpu(cpu, &cpuidle_mwait_flags);
    }

    if ( expires <= NOW() && expires > 0 )
        raise_softirq(TIMER_SOFTIRQ);
}
Esempio n. 2
0
void cpuidle_wakeup_mwait(cpumask_t *mask)
{
    cpumask_t target;
    unsigned int cpu;

    cpumask_and(&target, mask, &cpuidle_mwait_flags);

    /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
    for_each_cpu(cpu, &target)
        mwait_wakeup(cpu) = 0;

    cpumask_andnot(mask, mask, &target);
}
Esempio n. 3
0
void cpuidle_wakeup_mwait(cpumask_t *mask)
{
    cpumask_t target;
    unsigned int cpu;

    cpus_and(target, *mask, cpuidle_mwait_flags);

    /* CPU is MWAITing on the cpuidle_mwait_wakeup flag. */
    for_each_cpu_mask(cpu, target)
        mwait_wakeup(cpu) = 0;

    cpus_andnot(*mask, *mask, target);
}
Esempio n. 4
0
static void acpi_dead_idle(void)
{
    struct acpi_processor_power *power;
    struct acpi_processor_cx *cx;
    void *mwait_ptr;

    if ( (power = processor_powers[smp_processor_id()]) == NULL )
        goto default_halt;

    if ( (cx = &power->states[power->count-1]) == NULL )
        goto default_halt;

    mwait_ptr = (void *)&mwait_wakeup(smp_processor_id());

    if ( cx->entry_method == ACPI_CSTATE_EM_FFH )
    {
        /*
         * cache must be flashed as the last ops before cpu going into dead,
         * otherwise, cpu may dead with dirty data breaking cache coherency,
         * leading to strange errors.
         */
        wbinvd();

        while ( 1 )
        {
            /*
             * 1. The CLFLUSH is a workaround for erratum AAI65 for
             * the Xeon 7400 series.  
             * 2. The WBINVD is insufficient due to the spurious-wakeup
             * case where we return around the loop.
             * 3. Unlike wbinvd, clflush is a light weight but not serializing 
             * instruction, hence memory fence is necessary to make sure all 
             * load/store visible before flush cache line.
             */
            mb();
            clflush(mwait_ptr);
            __monitor(mwait_ptr, 0, 0);
            mb();
            __mwait(cx->address, 0);
        }
    }

default_halt:
    wbinvd();
    for ( ; ; )
        halt();
}
Esempio n. 5
0
static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
    unsigned int cpu = smp_processor_id();
    s_time_t expires = per_cpu(timer_deadline, cpu);

    __monitor((void *)&mwait_wakeup(cpu), 0, 0);
    smp_mb();

    /*
     * Timer deadline passing is the event on which we will be woken via
     * cpuidle_mwait_wakeup. So check it now that the location is armed.
     */
    if ( expires > NOW() || expires == 0 )
    {
        cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
        __mwait(eax, ecx);
        cpumask_clear_cpu(cpu, &cpuidle_mwait_flags);
    }

    if ( expires <= NOW() && expires > 0 )
        raise_softirq(TIMER_SOFTIRQ);
}
Esempio n. 6
0
static void acpi_dead_idle(void)
{
    struct acpi_processor_power *power;
    struct acpi_processor_cx *cx;

    if ( (power = processor_powers[smp_processor_id()]) == NULL )
        goto default_halt;

    if ( (cx = &power->states[power->count-1]) == NULL )
        goto default_halt;

    if ( cx->entry_method == ACPI_CSTATE_EM_FFH )
    {
        void *mwait_ptr = &mwait_wakeup(smp_processor_id());

        /*
         * Cache must be flushed as the last operation before sleeping.
         * Otherwise, CPU may still hold dirty data, breaking cache coherency,
         * leading to strange errors.
         */
        wbinvd();

        while ( 1 )
        {
            /*
             * 1. The CLFLUSH is a workaround for erratum AAI65 for
             * the Xeon 7400 series.  
             * 2. The WBINVD is insufficient due to the spurious-wakeup
             * case where we return around the loop.
             * 3. Unlike wbinvd, clflush is a light weight but not serializing 
             * instruction, hence memory fence is necessary to make sure all 
             * load/store visible before flush cache line.
             */
            mb();
            clflush(mwait_ptr);
            __monitor(mwait_ptr, 0, 0);
            mb();
            __mwait(cx->address, 0);
        }
    }
    else if ( current_cpu_data.x86_vendor == X86_VENDOR_AMD &&
              cx->entry_method == ACPI_CSTATE_EM_SYSIO )
    {
        /* Intel prefers not to use SYSIO */

        /* Avoid references to shared data after the cache flush */
        u32 address = cx->address;
        u32 pmtmr_ioport_local = pmtmr_ioport;

        wbinvd();

        while ( 1 )
        {
            inb(address);
            inl(pmtmr_ioport_local);
        }
    }

default_halt:
    for ( ; ; )
        halt();
}