コード例 #1
0
/**
 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
 * @dev: the target CPU
 * @index: the index of suggested state
 */
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);

	ACPI_FLUSH_CPU_CACHE();

	while (1) {

		if (cx->entry_method == ACPI_CSTATE_HALT)
			safe_halt();
		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
			inb(cx->address);
			/* See comment in acpi_idle_do_entry() */
			inl(acpi_gbl_FADT.xpm_timer_block.address);
		} else
			return -ENODEV;
	}

	/* Never reached */
	return 0;
}
コード例 #2
0
ファイル: main.c プロジェクト: Tigrouzen/k1099
static int acpi_sleep_prepare(u32 acpi_state)
{
#ifdef CONFIG_ACPI_SLEEP
	/* do we have a wakeup address for S2 and S3? */
	if (acpi_state == ACPI_STATE_S3) {
		if (!acpi_wakeup_address) {
			return -EFAULT;
		}
		acpi_set_firmware_waking_vector((acpi_physical_address)
						virt_to_phys((void *)
							     acpi_wakeup_address));

	}
	ACPI_FLUSH_CPU_CACHE();
	acpi_enable_wakeup_device_prep(acpi_state);
#endif
	printk(KERN_INFO PREFIX "Preparing to enter system sleep state S%d\n",
		acpi_state);
	acpi_enter_sleep_state_prep(acpi_state);
	return 0;
}
コード例 #3
0
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);

	ACPI_FLUSH_CPU_CACHE();

	while (1) {

		if (cx->entry_method == ACPI_CSTATE_HALT)
			safe_halt();
		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
			inb(cx->address);
			
			inl(acpi_gbl_FADT.xpm_timer_block.address);
		} else
			return -ENODEV;
	}

	
	return 0;
}
コード例 #4
0
ファイル: processor_idle.c プロジェクト: AlexShiLucky/linux
static int acpi_idle_enter(struct cpuidle_device *dev,
			   struct cpuidle_driver *drv, int index)
{
	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
	struct acpi_processor *pr;

	pr = __this_cpu_read(processors);
	if (unlikely(!pr))
		return -EINVAL;

	if (cx->type != ACPI_STATE_C1) {
		if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
			index = ACPI_IDLE_STATE_START;
			cx = per_cpu(acpi_cstate[index], dev->cpu);
		} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
			if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
				acpi_idle_enter_bm(pr, cx, true);
				return index;
			} else if (drv->safe_state_index >= 0) {
				index = drv->safe_state_index;
				cx = per_cpu(acpi_cstate[index], dev->cpu);
			} else {
				acpi_safe_halt();
				return -EBUSY;
			}
		}
	}

	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	acpi_idle_do_entry(cx);

	lapic_timer_state_broadcast(pr, cx, 0);

	return index;
}
コード例 #5
0
static int acpi_pm_enter(suspend_state_t pm_state)
{
	acpi_status status = AE_OK;
	unsigned long flags = 0;
	u32 acpi_state = acpi_suspend_states[pm_state];

	ACPI_FLUSH_CPU_CACHE();

	/* Do arch specific saving of state. */
	if (pm_state > PM_SUSPEND_STANDBY) {
		int error = acpi_save_state_mem();
		if (error)
			return error;
	}

	local_irq_save(flags);
	acpi_enable_wakeup_device(acpi_state);
	switch (pm_state) {
	case PM_SUSPEND_STANDBY:
		barrier();
		status = acpi_enter_sleep_state(acpi_state);
		break;

	case PM_SUSPEND_MEM:
		if (unlikely(acpi_simulate_suspend_to_ram)) {
			printk(KERN_INFO "ACPI: simulating suspend-to-RAM: "
					 "not calling BIOS.\n");
		} else {
			do_suspend_lowlevel();
		}
		break;

	case PM_SUSPEND_DISK:
		if (acpi_pm_ops.pm_disk_mode == PM_DISK_PLATFORM)
			status = acpi_enter_sleep_state(acpi_state);
		break;
	case PM_SUSPEND_MAX:
		acpi_power_off();
		break;

	default:
		return -EINVAL;
	}

	/* ACPI 3.0 specs (P62) says that it's the responsabilty
	 * of the OSPM to clear the status bit [ implying that the
	 * POWER_BUTTON event should not reach userspace ]
	 */
	if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
		acpi_clear_event(ACPI_EVENT_POWER_BUTTON);

	local_irq_restore(flags);
	printk(KERN_DEBUG "Back to C!\n");

	/* restore processor state
	 * We should only be here if we're coming back from STR or STD.
	 * And, in the case of the latter, the memory image should have already
	 * been loaded from disk.
	 */
	if (pm_state > PM_SUSPEND_STANDBY)
		acpi_restore_state_mem();

	return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
コード例 #6
0
ACPI_STATUS
AcpiHwLegacySleep (
    UINT8                   SleepState,
    UINT8                   Flags)
{
    ACPI_BIT_REGISTER_INFO  *SleepTypeRegInfo;
    ACPI_BIT_REGISTER_INFO  *SleepEnableRegInfo;
    UINT32                  Pm1aControl;
    UINT32                  Pm1bControl;
    UINT32                  InValue;
    UINT32                  Retry;
    ACPI_STATUS             Status;


    ACPI_FUNCTION_TRACE (HwLegacySleep);


    SleepTypeRegInfo = AcpiHwGetBitRegisterInfo (ACPI_BITREG_SLEEP_TYPE);
    SleepEnableRegInfo = AcpiHwGetBitRegisterInfo (ACPI_BITREG_SLEEP_ENABLE);

    /* Clear wake status */

    Status = AcpiWriteBitRegister (ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Clear all fixed and general purpose status bits */

    Status = AcpiHwClearAcpiStatus ();
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    if (SleepState != ACPI_STATE_S5)
    {
        /*
         * Disable BM arbitration. This feature is contained within an
         * optional register (PM2 Control), so ignore a BAD_ADDRESS
         * exception.
         */
        Status = AcpiWriteBitRegister (ACPI_BITREG_ARB_DISABLE, 1);
        if (ACPI_FAILURE (Status) && (Status != AE_BAD_ADDRESS))
        {
            return_ACPI_STATUS (Status);
        }
    }

    /*
     * 1) Disable/Clear all GPEs
     * 2) Enable all wakeup GPEs
     */
    Status = AcpiHwDisableAllGpes ();
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }
    AcpiGbl_SystemAwakeAndRunning = FALSE;

    Status = AcpiHwEnableAllWakeupGpes ();
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Optionally execute _GTS (Going To Sleep) */

    if (Flags & ACPI_EXECUTE_GTS)
    {
        AcpiHwExecuteSleepMethod (METHOD_PATHNAME__GTS, SleepState);
    }

    /* Get current value of PM1A control */

    Status = AcpiHwRegisterRead (ACPI_REGISTER_PM1_CONTROL,
                                 &Pm1aControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }
    ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
                       "Entering sleep state [S%u]\n", SleepState));

    /* Clear the SLP_EN and SLP_TYP fields */

    Pm1aControl &= ~(SleepTypeRegInfo->AccessBitMask |
                     SleepEnableRegInfo->AccessBitMask);
    Pm1bControl = Pm1aControl;

    /* Insert the SLP_TYP bits */

    Pm1aControl |= (AcpiGbl_SleepTypeA << SleepTypeRegInfo->BitPosition);
    Pm1bControl |= (AcpiGbl_SleepTypeB << SleepTypeRegInfo->BitPosition);

    /*
     * We split the writes of SLP_TYP and SLP_EN to workaround
     * poorly implemented hardware.
     */

    /* Write #1: write the SLP_TYP data to the PM1 Control registers */

    Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Insert the sleep enable (SLP_EN) bit */

    Pm1aControl |= SleepEnableRegInfo->AccessBitMask;
    Pm1bControl |= SleepEnableRegInfo->AccessBitMask;

    /* Flush caches, as per ACPI specification */

    ACPI_FLUSH_CPU_CACHE ();

    /* Write #2: Write both SLP_TYP + SLP_EN */

    Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    if (SleepState > ACPI_STATE_S3)
    {
        /*
         * We wanted to sleep > S3, but it didn't happen (by virtue of the
         * fact that we are still executing!)
         *
         * Wait ten seconds, then try again. This is to get S4/S5 to work on
         * all machines.
         *
         * We wait so long to allow chipsets that poll this reg very slowly
         * to still read the right value. Ideally, this block would go
         * away entirely.
         */
        AcpiOsStall (10000000);

        Status = AcpiHwRegisterWrite (ACPI_REGISTER_PM1_CONTROL,
                                      SleepEnableRegInfo->AccessBitMask);
        if (ACPI_FAILURE (Status))
        {
            return_ACPI_STATUS (Status);
        }
    }

    /* Wait for transition back to Working State */

    Retry = 1000;
    do
    {
        Status = AcpiReadBitRegister (ACPI_BITREG_WAKE_STATUS, &InValue);
        if (ACPI_FAILURE (Status))
        {
            return_ACPI_STATUS (Status);
        }

        if (AcpiGbl_EnableInterpreterSlack)
        {
            /*
             * Some BIOSs don't set WAK_STS at all.  Give up waiting after
             * 1000 retries if it still isn't set.
             */
            if (Retry-- == 0)
            {
                break;
            }
        }

    } while (!InValue);

    return_ACPI_STATUS (AE_OK);
}
コード例 #7
0
ファイル: hwsleep.c プロジェクト: cywzl/spice4xen
/*******************************************************************************
 *
 * FUNCTION:    acpi_enter_sleep_state
 *
 * PARAMETERS:  sleep_state         - Which sleep state to enter
 *
 * RETURN:      Status
 *
 * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
 *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
 *
 ******************************************************************************/
acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
{
	u32 PM1Acontrol;
	u32 PM1Bcontrol;
	struct acpi_bit_register_info *sleep_type_reg_info;
	struct acpi_bit_register_info *sleep_enable_reg_info;
#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
	u32 in_value;
#else
	int err;
#endif
	acpi_status status;

	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);

	if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
	    (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
		ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X",
			    acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
	}

	sleep_type_reg_info =
	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE_A);
	sleep_enable_reg_info =
	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);

	/* Clear wake status */

	status =
	    acpi_set_register(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/* Clear all fixed and general purpose status bits */

	status = acpi_hw_clear_acpi_status(ACPI_MTX_DO_NOT_LOCK);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/*
	 * 1) Disable/Clear all GPEs
	 * 2) Enable all wakeup GPEs
	 */
	status = acpi_hw_disable_all_gpes();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}
	acpi_gbl_system_awake_and_running = FALSE;

	status = acpi_hw_enable_all_wakeup_gpes();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/* Get current value of PM1A control */

	status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
				       ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}
	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
			  "Entering sleep state [S%d]\n", sleep_state));

	/* Clear SLP_EN and SLP_TYP fields */

	PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask |
			 sleep_enable_reg_info->access_bit_mask);
	PM1Bcontrol = PM1Acontrol;

	/* Insert SLP_TYP bits */

	PM1Acontrol |=
	    (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
	PM1Bcontrol |=
	    (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);

	/*
	 * We split the writes of SLP_TYP and SLP_EN to workaround
	 * poorly implemented hardware.
	 */

	/* Write #1: fill in SLP_TYP data */

	status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
					ACPI_REGISTER_PM1A_CONTROL,
					PM1Acontrol);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
					ACPI_REGISTER_PM1B_CONTROL,
					PM1Bcontrol);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/* Insert SLP_ENABLE bit */

	PM1Acontrol |= sleep_enable_reg_info->access_bit_mask;
	PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask;

	/* Write #2: SLP_TYP + SLP_EN */

	ACPI_FLUSH_CPU_CACHE();

#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
	status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
					ACPI_REGISTER_PM1A_CONTROL,
					PM1Acontrol);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
					ACPI_REGISTER_PM1B_CONTROL,
					PM1Bcontrol);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	if (sleep_state > ACPI_STATE_S3) {
		/*
		 * We wanted to sleep > S3, but it didn't happen (by virtue of the
		 * fact that we are still executing!)
		 *
		 * Wait ten seconds, then try again. This is to get S4/S5 to work on
		 * all machines.
		 *
		 * We wait so long to allow chipsets that poll this reg very slowly to
		 * still read the right value. Ideally, this block would go
		 * away entirely.
		 */
		acpi_os_stall(10000000);

		status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
						ACPI_REGISTER_PM1_CONTROL,
						sleep_enable_reg_info->
						access_bit_mask);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}
	}

	/* Wait until we enter sleep state */

	do {
		status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value,
					   ACPI_MTX_DO_NOT_LOCK);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}

		/* Spin until we wake */

	} while (!in_value);
#else
	/* PV ACPI just need check hypercall return value */
	err = acpi_notify_hypervisor_state(sleep_state,
			PM1Acontrol, PM1Bcontrol);
	if (err) {
		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
				  "Hypervisor failure [%d]\n", err));
		return_ACPI_STATUS(AE_ERROR);
	}
#endif

	return_ACPI_STATUS(AE_OK);
}
コード例 #8
0
ファイル: hwsleep.c プロジェクト: Luwak/linux
/*******************************************************************************
 *
 * FUNCTION:    acpi_hw_legacy_sleep
 *
 * PARAMETERS:  sleep_state         - Which sleep state to enter
 *
 * RETURN:      Status
 *
 * DESCRIPTION: Enter a system sleep state via the legacy FADT PM registers
 *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
 *
 ******************************************************************************/
acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
{
	struct acpi_bit_register_info *sleep_type_reg_info;
	struct acpi_bit_register_info *sleep_enable_reg_info;
	u32 pm1a_control;
	u32 pm1b_control;
	u32 in_value;
	acpi_status status;

	ACPI_FUNCTION_TRACE(hw_legacy_sleep);

	sleep_type_reg_info =
	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
	sleep_enable_reg_info =
	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);

	/* Clear wake status */

	status =
	    acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/* Clear all fixed and general purpose status bits */

	status = acpi_hw_clear_acpi_status();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/*
	 * 1) Disable/Clear all GPEs
	 * 2) Enable all wakeup GPEs
	 */
	status = acpi_hw_disable_all_gpes();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}
	acpi_gbl_system_awake_and_running = FALSE;

	status = acpi_hw_enable_all_wakeup_gpes();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/* Get current value of PM1A control */

	status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
				       &pm1a_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}
	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
			  "Entering sleep state [S%u]\n", sleep_state));

	/* Clear the SLP_EN and SLP_TYP fields */

	pm1a_control &= ~(sleep_type_reg_info->access_bit_mask |
			  sleep_enable_reg_info->access_bit_mask);
	pm1b_control = pm1a_control;

	/* Insert the SLP_TYP bits */

	pm1a_control |=
	    (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
	pm1b_control |=
	    (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);

	/*
	 * We split the writes of SLP_TYP and SLP_EN to workaround
	 * poorly implemented hardware.
	 */

	/* Write #1: write the SLP_TYP data to the PM1 Control registers */

	status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/* Insert the sleep enable (SLP_EN) bit */

	pm1a_control |= sleep_enable_reg_info->access_bit_mask;
	pm1b_control |= sleep_enable_reg_info->access_bit_mask;

	/* Flush caches, as per ACPI specification */

	ACPI_FLUSH_CPU_CACHE();

	status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
				       pm1b_control);
	if (ACPI_SKIP(status))
		return_ACPI_STATUS(AE_OK);
	if (ACPI_FAILURE(status))
		return_ACPI_STATUS(status);
	/* Write #2: Write both SLP_TYP + SLP_EN */

	status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	if (sleep_state > ACPI_STATE_S3) {
		/*
		 * We wanted to sleep > S3, but it didn't happen (by virtue of the
		 * fact that we are still executing!)
		 *
		 * Wait ten seconds, then try again. This is to get S4/S5 to work on
		 * all machines.
		 *
		 * We wait so long to allow chipsets that poll this reg very slowly
		 * to still read the right value. Ideally, this block would go
		 * away entirely.
		 */
		acpi_os_stall(10000000);

		status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
						sleep_enable_reg_info->
						access_bit_mask);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}
	}

	/* Wait for transition back to Working State */

	do {
		status =
		    acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}

	} while (!in_value);

	return_ACPI_STATUS(AE_OK);
}
コード例 #9
0
ファイル: hwsleep.c プロジェクト: iPodLinux/linux-2.6.7-ipod
acpi_status asmlinkage
acpi_enter_sleep_state (
    u8                              sleep_state)
{
    u32                             PM1Acontrol;
    u32                             PM1Bcontrol;
    struct acpi_bit_register_info   *sleep_type_reg_info;
    struct acpi_bit_register_info   *sleep_enable_reg_info;
    u32                             in_value;
    acpi_status                     status;


    ACPI_FUNCTION_TRACE ("acpi_enter_sleep_state");


    if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
            (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
        ACPI_REPORT_ERROR (("Sleep values out of range: A=%X B=%X\n",
                            acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
        return_ACPI_STATUS (AE_AML_OPERAND_VALUE);
    }

    sleep_type_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_TYPE_A);
    sleep_enable_reg_info = acpi_hw_get_bit_register_info (ACPI_BITREG_SLEEP_ENABLE);

    if (sleep_state != ACPI_STATE_S5) {
        /* Clear wake status */

        status = acpi_set_register (ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_DO_NOT_LOCK);
        if (ACPI_FAILURE (status)) {
            return_ACPI_STATUS (status);
        }

        status = acpi_hw_clear_acpi_status (ACPI_MTX_DO_NOT_LOCK);
        if (ACPI_FAILURE (status)) {
            return_ACPI_STATUS (status);
        }

        /* Disable BM arbitration */

        status = acpi_set_register (ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
        if (ACPI_FAILURE (status)) {
            return_ACPI_STATUS (status);
        }
    }

    /*
     * 1) Disable all runtime GPEs
     * 2) Enable all wakeup GPEs
     */
    status = acpi_hw_prepare_gpes_for_sleep ();
    if (ACPI_FAILURE (status)) {
        return_ACPI_STATUS (status);
    }

    /* Get current value of PM1A control */

    status = acpi_hw_register_read (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
    if (ACPI_FAILURE (status)) {
        return_ACPI_STATUS (status);
    }
    ACPI_DEBUG_PRINT ((ACPI_DB_INIT, "Entering sleep state [S%d]\n", sleep_state));

    /* Clear SLP_EN and SLP_TYP fields */

    PM1Acontrol &= ~(sleep_type_reg_info->access_bit_mask | sleep_enable_reg_info->access_bit_mask);
    PM1Bcontrol = PM1Acontrol;

    /* Insert SLP_TYP bits */

    PM1Acontrol |= (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
    PM1Bcontrol |= (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);

    /*
     * We split the writes of SLP_TYP and SLP_EN to workaround
     * poorly implemented hardware.
     */

    /* Write #1: fill in SLP_TYP data */

    status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol);
    if (ACPI_FAILURE (status)) {
        return_ACPI_STATUS (status);
    }

    status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1B_CONTROL, PM1Bcontrol);
    if (ACPI_FAILURE (status)) {
        return_ACPI_STATUS (status);
    }

    /* Insert SLP_ENABLE bit */

    PM1Acontrol |= sleep_enable_reg_info->access_bit_mask;
    PM1Bcontrol |= sleep_enable_reg_info->access_bit_mask;

    /* Write #2: SLP_TYP + SLP_EN */

    ACPI_FLUSH_CPU_CACHE ();

    status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1A_CONTROL, PM1Acontrol);
    if (ACPI_FAILURE (status)) {
        return_ACPI_STATUS (status);
    }

    status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1B_CONTROL, PM1Bcontrol);
    if (ACPI_FAILURE (status)) {
        return_ACPI_STATUS (status);
    }

    if (sleep_state > ACPI_STATE_S3) {
        /*
         * We wanted to sleep > S3, but it didn't happen (by virtue of the fact that
         * we are still executing!)
         *
         * Wait ten seconds, then try again. This is to get S4/S5 to work on all machines.
         *
         * We wait so long to allow chipsets that poll this reg very slowly to
         * still read the right value. Ideally, this block would go
         * away entirely.
         */
        acpi_os_stall (10000000);

        status = acpi_hw_register_write (ACPI_MTX_DO_NOT_LOCK, ACPI_REGISTER_PM1_CONTROL,
                                         sleep_enable_reg_info->access_bit_mask);
        if (ACPI_FAILURE (status)) {
            return_ACPI_STATUS (status);
        }
    }

    /* Wait until we enter sleep state */

    do {
        status = acpi_get_register (ACPI_BITREG_WAKE_STATUS, &in_value, ACPI_MTX_DO_NOT_LOCK);
        if (ACPI_FAILURE (status)) {
            return_ACPI_STATUS (status);
        }

        /* Spin until we wake */

    } while (!in_value);

    return_ACPI_STATUS (AE_OK);
}
コード例 #10
0
ファイル: hwesleep.c プロジェクト: 99corps/runtime
ACPI_STATUS
AcpiHwExtendedSleep (
    UINT8                   SleepState)
{
    ACPI_STATUS             Status;
    UINT8                   SleepTypeValue;
    UINT64                  SleepStatus;


    ACPI_FUNCTION_TRACE (HwExtendedSleep);


    /* Extended sleep registers must be valid */

    if (!AcpiGbl_FADT.SleepControl.Address ||
        !AcpiGbl_FADT.SleepStatus.Address)
    {
        return_ACPI_STATUS (AE_NOT_EXIST);
    }

    /* Clear wake status (WAK_STS) */

    Status = AcpiWrite ((UINT64) ACPI_X_WAKE_STATUS, &AcpiGbl_FADT.SleepStatus);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    AcpiGbl_SystemAwakeAndRunning = FALSE;

    /* Flush caches, as per ACPI specification */

    ACPI_FLUSH_CPU_CACHE ();

    /*
     * Set the SLP_TYP and SLP_EN bits.
     *
     * Note: We only use the first value returned by the \_Sx method
     * (AcpiGbl_SleepTypeA) - As per ACPI specification.
     */
    ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
        "Entering sleep state [S%u]\n", SleepState));

    SleepTypeValue = ((AcpiGbl_SleepTypeA << ACPI_X_SLEEP_TYPE_POSITION) &
        ACPI_X_SLEEP_TYPE_MASK);

    Status = AcpiWrite ((UINT64) (SleepTypeValue | ACPI_X_SLEEP_ENABLE),
        &AcpiGbl_FADT.SleepControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Wait for transition back to Working State */

    do
    {
        Status = AcpiRead (&SleepStatus, &AcpiGbl_FADT.SleepStatus);
        if (ACPI_FAILURE (Status))
        {
            return_ACPI_STATUS (Status);
        }

    } while (!(((UINT8) SleepStatus) & ACPI_X_WAKE_STATUS));

    return_ACPI_STATUS (AE_OK);
}
コード例 #11
0
ファイル: hwsleep.c プロジェクト: CoryXie/BarrelfishOS
ACPI_STATUS
AcpiEnterSleepState (
    UINT8                   SleepState)
{
    UINT32                  Pm1aControl;
    UINT32                  Pm1bControl;
    ACPI_BIT_REGISTER_INFO  *SleepTypeRegInfo;
    ACPI_BIT_REGISTER_INFO  *SleepEnableRegInfo;
    UINT32                  InValue;
    ACPI_OBJECT_LIST        ArgList;
    ACPI_OBJECT             Arg;
    ACPI_STATUS             Status;


    ACPI_FUNCTION_TRACE (AcpiEnterSleepState);


    if ((AcpiGbl_SleepTypeA > ACPI_SLEEP_TYPE_MAX) ||
        (AcpiGbl_SleepTypeB > ACPI_SLEEP_TYPE_MAX))
    {
        ACPI_ERROR ((AE_INFO, "Sleep values out of range: A=%X B=%X",
            AcpiGbl_SleepTypeA, AcpiGbl_SleepTypeB));
        return_ACPI_STATUS (AE_AML_OPERAND_VALUE);
    }

    SleepTypeRegInfo   = AcpiHwGetBitRegisterInfo (ACPI_BITREG_SLEEP_TYPE);
    SleepEnableRegInfo = AcpiHwGetBitRegisterInfo (ACPI_BITREG_SLEEP_ENABLE);

    /* Clear wake status */

    Status = AcpiWriteBitRegister (ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Clear all fixed and general purpose status bits */

    Status = AcpiHwClearAcpiStatus ();
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    if (SleepState != ACPI_STATE_S5)
    {
        /*
         * Disable BM arbitration. This feature is contained within an
         * optional register (PM2 Control), so ignore a BAD_ADDRESS
         * exception.
         */
        Status = AcpiWriteBitRegister (ACPI_BITREG_ARB_DISABLE, 1);
        if (ACPI_FAILURE (Status) && (Status != AE_BAD_ADDRESS))
        {
            return_ACPI_STATUS (Status);
        }
    }

    /*
     * 1) Disable/Clear all GPEs
     * 2) Enable all wakeup GPEs
     */
    Status = AcpiHwDisableAllGpes ();
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }
    AcpiGbl_SystemAwakeAndRunning = FALSE;

    Status = AcpiHwEnableAllWakeupGpes ();
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Execute the _GTS method (Going To Sleep) */

    ArgList.Count = 1;
    ArgList.Pointer = &Arg;
    Arg.Type = ACPI_TYPE_INTEGER;
    Arg.Integer.Value = SleepState;

    Status = AcpiEvaluateObject (NULL, METHOD_NAME__GTS, &ArgList, NULL);
    if (ACPI_FAILURE (Status) && Status != AE_NOT_FOUND)
    {
        return_ACPI_STATUS (Status);
    }

    /* Get current value of PM1A control */

    Status = AcpiHwRegisterRead (ACPI_REGISTER_PM1_CONTROL,
                &Pm1aControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }
    ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
        "Entering sleep state [S%d]\n", SleepState));

    /* Clear the SLP_EN and SLP_TYP fields */

    Pm1aControl &= ~(SleepTypeRegInfo->AccessBitMask |
                     SleepEnableRegInfo->AccessBitMask);
    Pm1bControl = Pm1aControl;

    /* Insert the SLP_TYP bits */

    Pm1aControl |= (AcpiGbl_SleepTypeA << SleepTypeRegInfo->BitPosition);
    Pm1bControl |= (AcpiGbl_SleepTypeB << SleepTypeRegInfo->BitPosition);

    /*
     * We split the writes of SLP_TYP and SLP_EN to workaround
     * poorly implemented hardware.
     */

    /* Write #1: write the SLP_TYP data to the PM1 Control registers */

    Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Insert the sleep enable (SLP_EN) bit */

    Pm1aControl |= SleepEnableRegInfo->AccessBitMask;
    Pm1bControl |= SleepEnableRegInfo->AccessBitMask;

    /* Flush caches, as per ACPI specification */

    ACPI_FLUSH_CPU_CACHE ();

    /* Write #2: Write both SLP_TYP + SLP_EN */

    Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    if (SleepState > ACPI_STATE_S3)
    {
        /*
         * We wanted to sleep > S3, but it didn't happen (by virtue of the
         * fact that we are still executing!)
         *
         * Wait ten seconds, then try again. This is to get S4/S5 to work on
         * all machines.
         *
         * We wait so long to allow chipsets that poll this reg very slowly
         * to still read the right value. Ideally, this block would go
         * away entirely.
         */
        AcpiOsStall (10000000);

        Status = AcpiHwRegisterWrite (ACPI_REGISTER_PM1_CONTROL,
                    SleepEnableRegInfo->AccessBitMask);
        if (ACPI_FAILURE (Status))
        {
            return_ACPI_STATUS (Status);
        }
    }

    /* Wait until we enter sleep state */

    do
    {
        Status = AcpiReadBitRegister (ACPI_BITREG_WAKE_STATUS, &InValue);
        if (ACPI_FAILURE (Status))
        {
            return_ACPI_STATUS (Status);
        }

        /* Spin until we wake */

    } while (!InValue);

    return_ACPI_STATUS (AE_OK);
}
コード例 #12
0
/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @state: the state data
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;


	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
		if (dev->safe_state) {
			dev->last_state = dev->safe_state;
			return dev->safe_state->enter(dev, dev->safe_state);
		} else {
			local_irq_disable();
			acpi_safe_halt();
			local_irq_enable();
			return 0;
		}
	}

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return 0;
		}
	}

	acpi_unlazy_tlb(smp_processor_id());

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	kt1 = ktime_get_real();
	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	acpi_idle_do_entry(cx);

	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		spin_unlock(&c3_lock);
	}
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return idle_time;
}
コード例 #13
0
ファイル: acpi_cpu_cstate.c プロジェクト: wan721/DragonFlyBSD
/*
 * Idle the CPU in the lowest state possible.  This function is called with
 * interrupts disabled.  Note that once it re-enables interrupts, a task
 * switch can occur so do not access shared data (i.e. the softc) after
 * interrupts are re-enabled.
 */
static void
acpi_cst_idle(void)
{
    struct	acpi_cst_softc *sc;
    struct	acpi_cst_cx *cx_next;
    union microtime_pcpu start, end;
    int		cx_next_idx, i, tdiff, bm_arb_disabled = 0;

    /* If disabled, return immediately. */
    if (acpi_cst_disable_idle) {
	ACPI_ENABLE_IRQS();
	return;
    }

    /*
     * Look up our CPU id to get our softc.  If it's NULL, we'll use C1
     * since there is no Cx state for this processor.
     */
    sc = acpi_cst_softc[mdcpu->mi.gd_cpuid];
    if (sc == NULL) {
	acpi_cst_c1_halt();
	return;
    }

    /* Still probing; use C1 */
    if (sc->cst_flags & ACPI_CST_FLAG_PROBING) {
	acpi_cst_c1_halt();
	return;
    }

    /* Find the lowest state that has small enough latency. */
    cx_next_idx = 0;
    for (i = sc->cst_cx_lowest; i >= 0; i--) {
	if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) {
	    cx_next_idx = i;
	    break;
	}
    }

    /*
     * Check for bus master activity if needed for the selected state.
     * If there was activity, clear the bit and use the lowest non-C3 state.
     */
    cx_next = &sc->cst_cx_states[cx_next_idx];
    if (cx_next->flags & ACPI_CST_CX_FLAG_BM_STS) {
	int bm_active;

	AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
	if (bm_active != 0) {
	    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
	    cx_next_idx = sc->cst_non_c3;
	}
    }

    /* Select the next state and update statistics. */
    cx_next = &sc->cst_cx_states[cx_next_idx];
    sc->cst_cx_stats[cx_next_idx]++;
    KASSERT(cx_next->type != ACPI_STATE_C0, ("C0 sleep"));

    /*
     * Execute HLT (or equivalent) and wait for an interrupt.  We can't
     * calculate the time spent in C1 since the place we wake up is an
     * ISR.  Assume we slept half of quantum and return.
     */
    if (cx_next->type == ACPI_STATE_C1) {
	sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4;
	cx_next->enter(cx_next);
	return;
    }

    /* Execute the proper preamble before enter the selected state. */
    if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_BM_ARB) {
	AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
	bm_arb_disabled = 1;
    } else if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_WBINVD) {
	ACPI_FLUSH_CPU_CACHE();
    }

    /*
     * Enter the selected state and check time spent asleep.
     */
    microtime_pcpu_get(&start);
    cpu_mfence();

    cx_next->enter(cx_next);

    cpu_mfence();
    microtime_pcpu_get(&end);

    /* Enable bus master arbitration, if it was disabled. */
    if (bm_arb_disabled)
	AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);

    ACPI_ENABLE_IRQS();

    /* Find the actual time asleep in microseconds. */
    tdiff = microtime_pcpu_diff(&start, &end);
    sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + tdiff) / 4;
}
コード例 #14
0
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;


	pr = __this_cpu_read(processors);
	dev->last_residency = 0;

	if (unlikely(!pr))
		return -EINVAL;

	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
		if (drv->safe_state_index >= 0) {
			return drv->states[drv->safe_state_index].enter(dev,
						drv, drv->safe_state_index);
		} else {
			local_irq_disable();
			acpi_safe_halt();
			local_irq_enable();
			return -EINVAL;
		}
	}

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return -EINVAL;
		}
	}

	acpi_unlazy_tlb(smp_processor_id());

	
	sched_clock_idle_sleep_event();
	lapic_timer_state_broadcast(pr, cx, 1);

	kt1 = ktime_get_real();
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		c3_cpu_count++;
		
		if (c3_cpu_count == num_online_cpus())
			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		raw_spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	acpi_idle_do_entry(cx);

	
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		raw_spin_unlock(&c3_lock);
	}
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	
	dev->last_residency = (int)idle_time;

	
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return index;
}
コード例 #15
0
/*
 * The main idle loop.
 */
void
acpicpu_cstate_idle(void)
{
	struct cpu_info *ci = curcpu();
	struct acpicpu_softc *sc;
	int state;

	KASSERT(acpicpu_sc != NULL);
	KASSERT(ci->ci_acpiid < maxcpus);

	sc = acpicpu_sc[ci->ci_acpiid];

	if (__predict_false(sc == NULL))
		return;

	KASSERT(ci->ci_ilevel == IPL_NONE);
	KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0);

	if (__predict_false(sc->sc_cold != false))
		return;

	if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
		return;

	state = acpicpu_cstate_latency(sc);
	mutex_exit(&sc->sc_mtx);

	/*
	 * Apply AMD C1E quirk.
	 */
	if ((sc->sc_flags & ACPICPU_FLAG_C_C1E) != 0)
		acpicpu_md_quirk_c1e();

	/*
	 * Check for bus master activity. Note that particularly usb(4)
	 * causes high activity, which may prevent the use of C3 states.
	 */
	if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {

		if (acpicpu_cstate_bm_check() != false)
			state--;

		if (__predict_false(sc->sc_cstate[state].cs_method == 0))
			state = ACPI_STATE_C1;
	}

	KASSERT(state != ACPI_STATE_C0);

	if (state != ACPI_STATE_C3) {
		acpicpu_cstate_idle_enter(sc, state);
		return;
	}

	/*
	 * On all recent (Intel) CPUs caches are shared
	 * by CPUs and bus master control is required to
	 * keep these coherent while in C3. Flushing the
	 * CPU caches is only the last resort.
	 */
	if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
		ACPI_FLUSH_CPU_CACHE();

	/*
	 * Allow the bus master to request that any given
	 * CPU should return immediately to C0 from C3.
	 */
	if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
		(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);

	/*
	 * It may be necessary to disable bus master arbitration
	 * to ensure that bus master cycles do not occur while
	 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
	 */
	if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
		(void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);

	acpicpu_cstate_idle_enter(sc, state);

	/*
	 * Disable bus master wake and re-enable the arbiter.
	 */
	if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
		(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);

	if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
		(void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
}
コード例 #16
0
/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @state: the state data
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
			      struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	u32 t1, t2;
	int sleep_ticks = 0;

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	if (acpi_idle_bm_check()) {
		if (dev->safe_state) {
			dev->last_state = dev->safe_state;
			return dev->safe_state->enter(dev, dev->safe_state);
		} else {
			local_irq_disable();
			acpi_safe_halt();
			local_irq_enable();
			return 0;
		}
	}

	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

	acpi_unlazy_tlb(smp_processor_id());

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	acpi_state_timer_broadcast(pr, cx, 1);

	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
		spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
	acpi_idle_do_entry(cx);
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);

	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		spin_lock(&c3_lock);
		acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		spin_unlock(&c3_lock);
	}

#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
	/* TSC could halt in idle, so notify users */
	if (tsc_halts_in_c(ACPI_STATE_C3))
		mark_tsc_unstable("TSC halts in idle");
#endif
	sleep_ticks = ticks_elapsed(t1, t2);
	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	acpi_state_timer_broadcast(pr, cx, 0);
	cx->time += sleep_ticks;
	return ticks_elapsed_in_us(t1, t2);
}
コード例 #17
0
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @state: the state data
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
				  struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	u32 t1, t2;
	int sleep_ticks = 0;

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	local_irq_disable();
	current_thread_info()->status &= ~TS_POLLING;
	/*
	 * TS_POLLING-cleared state must be visible before we test
	 * NEED_RESCHED:
	 */
	smp_mb();

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return 0;
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	acpi_state_timer_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);

#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
	/* TSC could halt in idle, so notify users */
	if (tsc_halts_in_c(cx->type))
		mark_tsc_unstable("TSC halts in idle");;
#endif
	sleep_ticks = ticks_elapsed(t1, t2);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	acpi_state_timer_broadcast(pr, cx, 0);
	cx->time += sleep_ticks;
	return ticks_elapsed_in_us(t1, t2);
}
コード例 #18
0
acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
{
	struct acpi_bit_register_info *sleep_type_reg_info;
	struct acpi_bit_register_info *sleep_enable_reg_info;
	u32 pm1a_control;
	u32 pm1b_control;
	u32 in_value;
	acpi_status status;

	ACPI_FUNCTION_TRACE(hw_legacy_sleep);

	sleep_type_reg_info =
	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
	sleep_enable_reg_info =
	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);

	

	status =
	    acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	

	status = acpi_hw_clear_acpi_status();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	if (sleep_state != ACPI_STATE_S5) {
		status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
			return_ACPI_STATUS(status);
		}
	}

	status = acpi_hw_disable_all_gpes();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}
	acpi_gbl_system_awake_and_running = FALSE;

	status = acpi_hw_enable_all_wakeup_gpes();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	

	if (flags & ACPI_EXECUTE_GTS) {
		acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state);
	}

	

	status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
				       &pm1a_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}
	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
			  "Entering sleep state [S%u]\n", sleep_state));

	

	pm1a_control &= ~(sleep_type_reg_info->access_bit_mask |
			  sleep_enable_reg_info->access_bit_mask);
	pm1b_control = pm1a_control;

	

	pm1a_control |=
	    (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
	pm1b_control |=
	    (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);


	

	status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	

	pm1a_control |= sleep_enable_reg_info->access_bit_mask;
	pm1b_control |= sleep_enable_reg_info->access_bit_mask;

	

	ACPI_FLUSH_CPU_CACHE();

	status = acpi_os_prepare_sleep(sleep_state, pm1a_control,
				       pm1b_control);
	if (ACPI_SKIP(status))
		return_ACPI_STATUS(AE_OK);
	if (ACPI_FAILURE(status))
		return_ACPI_STATUS(status);
	

	status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	if (sleep_state > ACPI_STATE_S3) {
		acpi_os_stall(10000000);

		status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
						sleep_enable_reg_info->
						access_bit_mask);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}
	}

	

	do {
		status =
		    acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS, &in_value);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}

	} while (!in_value);

	return_ACPI_STATUS(AE_OK);
}
コード例 #19
0
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @index: the index of suggested state
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev, int index)
{
	struct acpi_processor *pr;
	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
	ktime_t  kt1, kt2;
	s64 idle_time;
	s64 sleep_ticks = 0;

	pr = __get_cpu_var(processors);
	dev->last_residency = 0;

	if (unlikely(!pr))
		return -EINVAL;

	local_irq_disable();

	if (acpi_idle_suspend) {
		local_irq_enable();
		cpu_relax();
		return -EBUSY;
	}

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();
	}

	if (unlikely(need_resched())) {
		current_thread_info()->status |= TS_POLLING;
		local_irq_enable();
		return -EINVAL;
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	kt1 = ktime_get_real();
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));

	sleep_ticks = us_to_pm_timer_ticks(idle_time);

	/* Update device last_residency*/
	dev->last_residency = (int)idle_time;

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);

	local_irq_enable();
	current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return index;
}
コード例 #20
0
/*
 * Idle the CPU in the lowest state possible.  This function is called with
 * interrupts disabled.  Note that once it re-enables interrupts, a task
 * switch can occur so do not access shared data (i.e. the softc) after
 * interrupts are re-enabled.
 */
static void
acpi_cpu_idle()
{
    struct	acpi_cpu_softc *sc;
    struct	acpi_cx *cx_next;
    uint32_t	start_time, end_time;
    int		bm_active, cx_next_idx, i;

    /* If disabled, return immediately. */
    if (cpu_disable_idle) {
	ACPI_ENABLE_IRQS();
	return;
    }

    /*
     * Look up our CPU id to get our softc.  If it's NULL, we'll use C1
     * since there is no ACPI processor object for this CPU.  This occurs
     * for logical CPUs in the HTT case.
     */
    sc = cpu_softc[PCPU_GET(cpuid)];
    if (sc == NULL) {
	acpi_cpu_c1();
	return;
    }

    /* Find the lowest state that has small enough latency. */
    cx_next_idx = 0;
    for (i = sc->cpu_cx_lowest; i >= 0; i--) {
	if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) {
	    cx_next_idx = i;
	    break;
	}
    }

    /*
     * Check for bus master activity.  If there was activity, clear
     * the bit and use the lowest non-C3 state.  Note that the USB
     * driver polling for new devices keeps this bit set all the
     * time if USB is loaded.
     */
    if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
	AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
	if (bm_active != 0) {
	    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
	    cx_next_idx = min(cx_next_idx, sc->cpu_non_c3);
	}
    }

    /* Select the next state and update statistics. */
    cx_next = &sc->cpu_cx_states[cx_next_idx];
    sc->cpu_cx_stats[cx_next_idx]++;
    KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));

    /*
     * Execute HLT (or equivalent) and wait for an interrupt.  We can't
     * calculate the time spent in C1 since the place we wake up is an
     * ISR.  Assume we slept half of quantum and return.
     */
    if (cx_next->type == ACPI_STATE_C1) {
	sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + 500000 / hz) / 4;
	acpi_cpu_c1();
	return;
    }

    /*
     * For C3, disable bus master arbitration and enable bus master wake
     * if BM control is available, otherwise flush the CPU cache.
     */
    if (cx_next->type == ACPI_STATE_C3) {
	if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
	    AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
	    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
	} else
	    ACPI_FLUSH_CPU_CACHE();
    }

    /*
     * Read from P_LVLx to enter C2(+), checking time spent asleep.
     * Use the ACPI timer for measuring sleep time.  Since we need to
     * get the time very close to the CPU start/stop clock logic, this
     * is the only reliable time source.
     */
    AcpiRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
    CPU_GET_REG(cx_next->p_lvlx, 1);

    /*
     * Read the end time twice.  Since it may take an arbitrary time
     * to enter the idle state, the first read may be executed before
     * the processor has stopped.  Doing it again provides enough
     * margin that we are certain to have a correct value.
     */
    AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
    AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);

    /* Enable bus master arbitration and disable bus master wakeup. */
    if (cx_next->type == ACPI_STATE_C3 &&
	(cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
	AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
	AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
    }
    ACPI_ENABLE_IRQS();

    /* Find the actual time asleep in microseconds. */
    end_time = acpi_TimerDelta(end_time, start_time);
    sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
}
コード例 #21
0
ファイル: hwsleep.c プロジェクト: 1703011/asuswrt-merlin
/*******************************************************************************
 *
 * FUNCTION:    acpi_enter_sleep_state
 *
 * PARAMETERS:  sleep_state         - Which sleep state to enter
 *
 * RETURN:      Status
 *
 * DESCRIPTION: Enter a system sleep state (see ACPI 2.0 spec p 231)
 *              THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
 *
 ******************************************************************************/
acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
{
	u32 pm1a_control;
	u32 pm1b_control;
	struct acpi_bit_register_info *sleep_type_reg_info;
	struct acpi_bit_register_info *sleep_enable_reg_info;
	u32 in_value;
	struct acpi_object_list arg_list;
	union acpi_object arg;
	acpi_status status;

	ACPI_FUNCTION_TRACE(acpi_enter_sleep_state);

	if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
	    (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
		ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
			    acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
		return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
	}

	sleep_type_reg_info =
	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
	sleep_enable_reg_info =
	    acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_ENABLE);

	/* Clear wake status */

	status =
	    acpi_write_bit_register(ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/* Clear all fixed and general purpose status bits */

	status = acpi_hw_clear_acpi_status();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/*
	 * 1) Disable/Clear all GPEs
	 * 2) Enable all wakeup GPEs
	 */
	status = acpi_hw_disable_all_gpes();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}
	acpi_gbl_system_awake_and_running = FALSE;

	status = acpi_hw_enable_all_wakeup_gpes();
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	if (gts) {
		/* Execute the _GTS method */

		arg_list.count = 1;
		arg_list.pointer = &arg;
		arg.type = ACPI_TYPE_INTEGER;
		arg.integer.value = sleep_state;

		status = acpi_evaluate_object(NULL, METHOD_NAME__GTS, &arg_list, NULL);
		if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
			return_ACPI_STATUS(status);
		}
	}

	/* Get current value of PM1A control */

	status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
				       &pm1a_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}
	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
			  "Entering sleep state [S%u]\n", sleep_state));

	/* Clear the SLP_EN and SLP_TYP fields */

	pm1a_control &= ~(sleep_type_reg_info->access_bit_mask |
			  sleep_enable_reg_info->access_bit_mask);
	pm1b_control = pm1a_control;

	/* Insert the SLP_TYP bits */

	pm1a_control |=
	    (acpi_gbl_sleep_type_a << sleep_type_reg_info->bit_position);
	pm1b_control |=
	    (acpi_gbl_sleep_type_b << sleep_type_reg_info->bit_position);


	/* Write #1: write the SLP_TYP data to the PM1 Control registers */

	status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/* Insert the sleep enable (SLP_EN) bit */

	pm1a_control |= sleep_enable_reg_info->access_bit_mask;
	pm1b_control |= sleep_enable_reg_info->access_bit_mask;

	/* Flush caches, as per ACPI specification */

	ACPI_FLUSH_CPU_CACHE();

	tboot_sleep(sleep_state, pm1a_control, pm1b_control);

	/* Write #2: Write both SLP_TYP + SLP_EN */

	status = acpi_hw_write_pm1_control(pm1a_control, pm1b_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	if (sleep_state > ACPI_STATE_S3) {
		/*
		 * We wanted to sleep > S3, but it didn't happen (by virtue of the
		 * fact that we are still executing!)
		 *
		 * Wait ten seconds, then try again. This is to get S4/S5 to work on
		 * all machines.
		 *
		 * We wait so long to allow chipsets that poll this reg very slowly
		 * to still read the right value. Ideally, this block would go
		 * away entirely.
		 */
		acpi_os_stall(10000000);

		status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
						sleep_enable_reg_info->
						access_bit_mask);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}
	}

	/* Wait until we enter sleep state */

	do {
		status = acpi_read_bit_register(ACPI_BITREG_WAKE_STATUS,
						    &in_value);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}

		/* Spin until we wake */

	} while (!in_value);

	return_ACPI_STATUS(AE_OK);
}
コード例 #22
0
/**
 * acpi_idle_enter_simple - enters an ACPI state without BM handling
 * @dev: the target CPU
 * @state: the state data
 */
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
				  struct cpuidle_state *state)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
	ktime_t  kt1, kt2;
	s64 idle_time_ns;
	s64 idle_time;

	pr = __get_cpu_var(processors);

	if (unlikely(!pr))
		return 0;

	if (acpi_idle_suspend)
		return(acpi_idle_enter_c1(dev, state));

	local_irq_disable();

	if (cx->entry_method != ACPI_CSTATE_FFH) {
		current_thread_info()->status &= ~TS_POLLING;
		/*
		 * TS_POLLING-cleared state must be visible before we test
		 * NEED_RESCHED:
		 */
		smp_mb();

		if (unlikely(need_resched())) {
			current_thread_info()->status |= TS_POLLING;
			local_irq_enable();
			return 0;
		}
	}

	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	if (cx->type == ACPI_STATE_C3)
		ACPI_FLUSH_CPU_CACHE();

	kt1 = ktime_get_real();
	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	acpi_idle_do_entry(cx);
	kt2 = ktime_get_real();
	idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
	idle_time = idle_time_ns;
	do_div(idle_time, NSEC_PER_USEC);

	/* Tell the scheduler how much we idled: */
	sched_clock_idle_wakeup_event(idle_time_ns);

	local_irq_enable();
	if (cx->entry_method != ACPI_CSTATE_FFH)
		current_thread_info()->status |= TS_POLLING;

	cx->usage++;

	lapic_timer_state_broadcast(pr, cx, 0);
	cx->time += idle_time;
	return idle_time;
}
コード例 #23
0
ファイル: hwesleep.c プロジェクト: AlexShiLucky/linux
acpi_status acpi_hw_extended_sleep(u8 sleep_state)
{
	acpi_status status;
	u8 sleep_control;
	u64 sleep_status;

	ACPI_FUNCTION_TRACE(hw_extended_sleep);

	/* Extended sleep registers must be valid */

	if (!acpi_gbl_FADT.sleep_control.address ||
	    !acpi_gbl_FADT.sleep_status.address) {
		return_ACPI_STATUS(AE_NOT_EXIST);
	}

	/* Clear wake status (WAK_STS) */

	status = acpi_write((u64)ACPI_X_WAKE_STATUS,
			    &acpi_gbl_FADT.sleep_status);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	acpi_gbl_system_awake_and_running = FALSE;

	/*
	 * Set the SLP_TYP and SLP_EN bits.
	 *
	 * Note: We only use the first value returned by the \_Sx method
	 * (acpi_gbl_sleep_type_a) - As per ACPI specification.
	 */
	ACPI_DEBUG_PRINT((ACPI_DB_INIT,
			  "Entering sleep state [S%u]\n", sleep_state));

	sleep_control = ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
			 ACPI_X_SLEEP_TYPE_MASK) | ACPI_X_SLEEP_ENABLE;

	/* Flush caches, as per ACPI specification */

	ACPI_FLUSH_CPU_CACHE();

	status = acpi_os_enter_sleep(sleep_state, sleep_control, 0);
	if (status == AE_CTRL_TERMINATE) {
		return_ACPI_STATUS(AE_OK);
	}
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	status = acpi_write((u64)sleep_control, &acpi_gbl_FADT.sleep_control);
	if (ACPI_FAILURE(status)) {
		return_ACPI_STATUS(status);
	}

	/* Wait for transition back to Working State */

	do {
		status = acpi_read(&sleep_status, &acpi_gbl_FADT.sleep_status);
		if (ACPI_FAILURE(status)) {
			return_ACPI_STATUS(status);
		}

	} while (!(((u8)sleep_status) & ACPI_X_WAKE_STATUS));

	return_ACPI_STATUS(AE_OK);
}
コード例 #24
0
ファイル: power.c プロジェクト: lwhibernate/xen
/* Main interface to do xen specific suspend/resume */
static int enter_state(u32 state)
{
    unsigned long flags;
    int error;
    unsigned long cr4;

    if ( (state <= ACPI_STATE_S0) || (state > ACPI_S_STATES_MAX) )
        return -EINVAL;

    if ( !spin_trylock(&pm_lock) )
        return -EBUSY;

    BUG_ON(system_state != SYS_STATE_active);
    system_state = SYS_STATE_suspend;

    printk(XENLOG_INFO "Preparing system for ACPI S%d state.\n", state);

    freeze_domains();

    acpi_dmar_reinstate();

    if ( (error = disable_nonboot_cpus()) )
    {
        system_state = SYS_STATE_resume;
        goto enable_cpu;
    }

    cpufreq_del_cpu(0);

    hvm_cpu_down();

    acpi_sleep_prepare(state);

    console_start_sync();
    printk("Entering ACPI S%d state.\n", state);

    local_irq_save(flags);
    spin_debug_disable();

    if ( (error = device_power_down()) )
    {
        printk(XENLOG_ERR "Some devices failed to power down.");
        system_state = SYS_STATE_resume;
        goto done;
    }

    ACPI_FLUSH_CPU_CACHE();

    switch ( state )
    {
    case ACPI_STATE_S3:
        do_suspend_lowlevel();
        system_reset_counter++;
        error = tboot_s3_resume();
        break;
    case ACPI_STATE_S5:
        acpi_enter_sleep_state(ACPI_STATE_S5);
        break;
    default:
        error = -EINVAL;
        break;
    }

    system_state = SYS_STATE_resume;

    /* Restore CR4 and EFER from cached values. */
    cr4 = read_cr4();
    write_cr4(cr4 & ~X86_CR4_MCE);
    write_efer(read_efer());

    device_power_up();

    mcheck_init(&boot_cpu_data, 0);
    write_cr4(cr4);

    printk(XENLOG_INFO "Finishing wakeup from ACPI S%d state.\n", state);

    if ( (state == ACPI_STATE_S3) && error )
        tboot_s3_error(error);

 done:
    spin_debug_enable();
    local_irq_restore(flags);
    console_end_sync();
    acpi_sleep_post(state);
    if ( hvm_cpu_up() )
        BUG();

 enable_cpu:
    cpufreq_add_cpu(0);
    microcode_resume_cpu(0);
    rcu_barrier();
    mtrr_aps_sync_begin();
    enable_nonboot_cpus();
    mtrr_aps_sync_end();
    adjust_vtd_irq_affinities();
    acpi_dmar_zap();
    thaw_domains();
    system_state = SYS_STATE_active;
    spin_unlock(&pm_lock);
    return error;
}
コード例 #25
0
ファイル: power.c プロジェクト: a2k2/xen-unstable
/* Main interface to do xen specific suspend/resume */
static int enter_state(u32 state)
{
    unsigned long flags;
    int error;

    if ( (state <= ACPI_STATE_S0) || (state > ACPI_S_STATES_MAX) )
        return -EINVAL;

    if ( !spin_trylock(&pm_lock) )
        return -EBUSY;

    printk(XENLOG_INFO "Preparing system for ACPI S%d state.", state);

    freeze_domains();

    disable_nonboot_cpus();
    if ( num_online_cpus() != 1 )
    {
        error = -EBUSY;
        goto enable_cpu;
    }

    cpufreq_del_cpu(0);

    hvm_cpu_down();

    acpi_sleep_prepare(state);

    console_start_sync();
    printk("Entering ACPI S%d state.\n", state);

    local_irq_save(flags);
    spin_debug_disable();

    if ( (error = device_power_down()) )
    {
        printk(XENLOG_ERR "Some devices failed to power down.");
        goto done;
    }

    ACPI_FLUSH_CPU_CACHE();

    switch ( state )
    {
    case ACPI_STATE_S3:
        do_suspend_lowlevel();
        system_reset_counter++;
        error = tboot_s3_resume();
        break;
    case ACPI_STATE_S5:
        acpi_enter_sleep_state(ACPI_STATE_S5);
        break;
    default:
        error = -EINVAL;
        break;
    }

    /* Restore CR4 and EFER from cached values. */
    write_cr4(read_cr4());
    if ( cpu_has_efer )
        write_efer(read_efer());

    device_power_up();

    printk(XENLOG_INFO "Finishing wakeup from ACPI S%d state.\n", state);

    if ( (state == ACPI_STATE_S3) && error )
        panic("Memory integrity was lost on resume (%d)\n", error);

 done:
    spin_debug_enable();
    local_irq_restore(flags);
    console_end_sync();
    acpi_sleep_post(state);
    if ( !hvm_cpu_up() )
        BUG();

 enable_cpu:
    cpufreq_add_cpu(0);
    microcode_resume_cpu(0);
    enable_nonboot_cpus();
    thaw_domains();
    spin_unlock(&pm_lock);
    return error;
}
コード例 #26
0
ファイル: cpu_idle.c プロジェクト: Ultra-Seven/gxen
static void acpi_processor_idle(void)
{
    struct acpi_processor_power *power = processor_powers[smp_processor_id()];
    struct acpi_processor_cx *cx = NULL;
    int next_state;
    uint64_t t1, t2 = 0;
    u32 exp = 0, pred = 0;
    u32 irq_traced[4] = { 0 };

    if ( max_cstate > 0 && power && !sched_has_urgent_vcpu() &&
         (next_state = cpuidle_current_governor->select(power)) > 0 )
    {
        cx = &power->states[next_state];
        if ( power->flags.bm_check && acpi_idle_bm_check()
             && cx->type == ACPI_STATE_C3 )
            cx = power->safe_state;
        if ( cx->idx > max_cstate )
            cx = &power->states[max_cstate];
        menu_get_trace_data(&exp, &pred);
    }
    if ( !cx )
    {
        if ( pm_idle_save )
            pm_idle_save();
        else
            safe_halt();
        return;
    }

    cpufreq_dbs_timer_suspend();

    sched_tick_suspend();
    /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */
    process_pending_softirqs();

    /*
     * Interrupts must be disabled during bus mastering calculations and
     * for C2/C3 transitions.
     */
    local_irq_disable();

    if ( !cpu_is_haltable(smp_processor_id()) )
    {
        local_irq_enable();
        sched_tick_resume();
        cpufreq_dbs_timer_resume();
        return;
    }

    if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() )
        cx = power->safe_state;

    power->last_state = cx;

    /*
     * Sleep:
     * ------
     * Invoke the current Cx state to put the processor to sleep.
     */
    switch ( cx->type )
    {
    case ACPI_STATE_C1:
    case ACPI_STATE_C2:
        if ( cx->type == ACPI_STATE_C1 || local_apic_timer_c2_ok )
        {
            /* Get start time (ticks) */
            t1 = get_tick();
            /* Trace cpu idle entry */
            TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);
            /* Invoke C2 */
            acpi_idle_do_entry(cx);
            /* Get end time (ticks) */
            t2 = get_tick();
            trace_exit_reason(irq_traced);
            /* Trace cpu idle exit */
            TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2,
                     irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);
            /* Update statistics */
            acpi_update_idle_stats(power, cx, ticks_elapsed(t1, t2));
            /* Re-enable interrupts */
            local_irq_enable();
            break;
        }

    case ACPI_STATE_C3:
        /*
         * Before invoking C3, be aware that TSC/APIC timer may be 
         * stopped by H/W. Without carefully handling of TSC/APIC stop issues,
         * deep C state can't work correctly.
         */
        /* preparing APIC stop */
        lapic_timer_off();

        /* Get start time (ticks) */
        t1 = get_tick();
        /* Trace cpu idle entry */
        TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred);

        /*
         * disable bus master
         * bm_check implies we need ARB_DIS
         * !bm_check implies we need cache flush
         * bm_control implies whether we can do ARB_DIS
         *
         * That leaves a case where bm_check is set and bm_control is
         * not set. In that case we cannot do much, we enter C3
         * without doing anything.
         */
        if ( cx->type != ACPI_STATE_C3 )
            /* nothing to be done here */;
        else if ( power->flags.bm_check && power->flags.bm_control )
        {
            spin_lock(&c3_cpu_status.lock);
            if ( ++c3_cpu_status.count == num_online_cpus() )
            {
                /*
                 * All CPUs are trying to go to C3
                 * Disable bus master arbitration
                 */
                acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
            }
            spin_unlock(&c3_cpu_status.lock);
        }
        else if ( !power->flags.bm_check )
        {
            /* SMP with no shared cache... Invalidate cache  */
            ACPI_FLUSH_CPU_CACHE();
        }

        /* Invoke C3 */
        acpi_idle_do_entry(cx);

        if ( (cx->type == ACPI_STATE_C3) &&
             power->flags.bm_check && power->flags.bm_control )
        {
            /* Enable bus master arbitration */
            spin_lock(&c3_cpu_status.lock);
            acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
            c3_cpu_status.count--;
            spin_unlock(&c3_cpu_status.lock);
        }

        /* Get end time (ticks) */
        t2 = get_tick();

        /* recovering TSC */
        cstate_restore_tsc();
        trace_exit_reason(irq_traced);
        /* Trace cpu idle exit */
        TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2,
                 irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]);

        /* Update statistics */
        acpi_update_idle_stats(power, cx, ticks_elapsed(t1, t2));
        /* Re-enable interrupts */
        local_irq_enable();
        /* recovering APIC */
        lapic_timer_on();

        break;

    default:
        /* Now in C0 */
        power->last_state = &power->states[0];
        local_irq_enable();
        sched_tick_resume();
        cpufreq_dbs_timer_resume();
        return;
    }

    /* Now in C0 */
    power->last_state = &power->states[0];

    sched_tick_resume();
    cpufreq_dbs_timer_resume();

    if ( cpuidle_current_governor->reflect )
        cpuidle_current_governor->reflect(power);
}
コード例 #27
0
/**
 * acpi_system_save_state - save OS specific state and power down devices
 * @state:	sleep state we're entering.
 *
 * This handles saving all context to memory, and possibly disk.
 * First, we call to the device driver layer to save device state.
 * Once we have that, we save whatevery processor and kernel state we
 * need to memory.
 * If we're entering S4, we then write the memory image to disk.
 *
 * Only then it is safe for us to power down devices, since we may need
 * the disks and upstream buses to write to.
 */
acpi_status
acpi_system_save_state(
	u32			state)
{
	int			error = 0;

	/* Send notification to devices that they will be suspended.
	 * If any device or driver cannot make the transition, either up
	 * or down, we'll get an error back.
	 */
	if (state > ACPI_STATE_S1) {
		error = pm_send_all(PM_SAVE_STATE, (void *)3);
		if (error)
			return AE_ERROR;
	}

	if (state <= ACPI_STATE_S5) {
		/* Tell devices to stop I/O and actually save their state.
		 * It is theoretically possible that something could fail,
		 * so handle that gracefully..
		 */
		if (state > ACPI_STATE_S1 && state != ACPI_STATE_S5) {
			error = pm_send_all(PM_SUSPEND, (void *)3);
			if (error) {
				/* Tell devices to restore state if they have
				 * it saved and to start taking I/O requests.
				 */
				pm_send_all(PM_RESUME, (void *)0);
				return error;
			}
		}
		
		/* flush caches */
		ACPI_FLUSH_CPU_CACHE();

		/* Do arch specific saving of state. */
		if (state > ACPI_STATE_S1) {
			error = acpi_save_state_mem();

			/* TBD: if no s4bios, write codes for
			 * acpi_save_state_disk()...
			 */
#if 0
			if (!error && (state == ACPI_STATE_S4))
				error = acpi_save_state_disk();
#endif
			if (error) {
				pm_send_all(PM_RESUME, (void *)0);
				return error;
			}
		}
	}
	/* disable interrupts
	 * Note that acpi_suspend -- our caller -- will do this once we return.
	 * But, we want it done early, so we don't get any suprises during
	 * the device suspend sequence.
	 */
	ACPI_DISABLE_IRQS();

	/* Unconditionally turn off devices.
	 * Obvious if we enter a sleep state.
	 * If entering S5 (soft off), this should put devices in a
	 * quiescent state.
	 */

	if (state > ACPI_STATE_S1) {
		error = pm_send_all(PM_SUSPEND, (void *)3);

		/* We're pretty screwed if we got an error from this.
		 * We try to recover by simply calling our own restore_state
		 * function; see above for definition.
		 *
		 * If it's S5 though, go through with it anyway..
		 */
		if (error && state != ACPI_STATE_S5)
			acpi_system_restore_state(state);
	}
	return error ? AE_ERROR : AE_OK;
}
コード例 #28
0
ACPI_STATUS
AcpiHwLegacySleep (
    UINT8                   SleepState)
{
    ACPI_BIT_REGISTER_INFO  *SleepTypeRegInfo;
    ACPI_BIT_REGISTER_INFO  *SleepEnableRegInfo;
    UINT32                  Pm1aControl;
    UINT32                  Pm1bControl;
    UINT32                  InValue;
    ACPI_STATUS             Status;


    ACPI_FUNCTION_TRACE (HwLegacySleep);


    SleepTypeRegInfo = AcpiHwGetBitRegisterInfo (ACPI_BITREG_SLEEP_TYPE);
    SleepEnableRegInfo = AcpiHwGetBitRegisterInfo (ACPI_BITREG_SLEEP_ENABLE);

    /* Clear wake status */

    Status = AcpiWriteBitRegister (ACPI_BITREG_WAKE_STATUS, ACPI_CLEAR_STATUS);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Clear all fixed and general purpose status bits */

    Status = AcpiHwClearAcpiStatus ();
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /*
     * 1) Disable/Clear all GPEs
     * 2) Enable all wakeup GPEs
     */
    Status = AcpiHwDisableAllGpes ();
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }
    AcpiGbl_SystemAwakeAndRunning = FALSE;

    Status = AcpiHwEnableAllWakeupGpes ();
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Get current value of PM1A control */

    Status = AcpiHwRegisterRead (ACPI_REGISTER_PM1_CONTROL,
                                 &Pm1aControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }
    ACPI_DEBUG_PRINT ((ACPI_DB_INIT,
                       "Entering sleep state [S%u]\n", SleepState));

    /* Clear the SLP_EN and SLP_TYP fields */

    Pm1aControl &= ~(SleepTypeRegInfo->AccessBitMask |
                     SleepEnableRegInfo->AccessBitMask);
    Pm1bControl = Pm1aControl;

    /* Insert the SLP_TYP bits */

    Pm1aControl |= (AcpiGbl_SleepTypeA << SleepTypeRegInfo->BitPosition);
    Pm1bControl |= (AcpiGbl_SleepTypeB << SleepTypeRegInfo->BitPosition);

    /*
     * We split the writes of SLP_TYP and SLP_EN to workaround
     * poorly implemented hardware.
     */

    /* Write #1: write the SLP_TYP data to the PM1 Control registers */

    Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    /* Insert the sleep enable (SLP_EN) bit */

    Pm1aControl |= SleepEnableRegInfo->AccessBitMask;
    Pm1bControl |= SleepEnableRegInfo->AccessBitMask;

    /* Flush caches, as per ACPI specification */

    ACPI_FLUSH_CPU_CACHE ();

    /* Write #2: Write both SLP_TYP + SLP_EN */

    Status = AcpiHwWritePm1Control (Pm1aControl, Pm1bControl);
    if (ACPI_FAILURE (Status))
    {
        return_ACPI_STATUS (Status);
    }

    if (SleepState > ACPI_STATE_S3)
    {
        /*
         * We wanted to sleep > S3, but it didn't happen (by virtue of the
         * fact that we are still executing!)
         *
         * Wait ten seconds, then try again. This is to get S4/S5 to work on
         * all machines.
         *
         * We wait so long to allow chipsets that poll this reg very slowly
         * to still read the right value. Ideally, this block would go
         * away entirely.
         */
        AcpiOsStall (10 * ACPI_USEC_PER_SEC);

        Status = AcpiHwRegisterWrite (ACPI_REGISTER_PM1_CONTROL,
                                      SleepEnableRegInfo->AccessBitMask);
        if (ACPI_FAILURE (Status))
        {
            return_ACPI_STATUS (Status);
        }
    }

    /* Wait for transition back to Working State */

    do
    {
        Status = AcpiReadBitRegister (ACPI_BITREG_WAKE_STATUS, &InValue);
        if (ACPI_FAILURE (Status))
        {
            return_ACPI_STATUS (Status);
        }

    } while (!InValue);

    return_ACPI_STATUS (AE_OK);
}
コード例 #29
0
/**
 * acpi_idle_enter_bm - enters C3 with proper BM handling
 * @dev: the target CPU
 * @drv: cpuidle driver containing state data
 * @index: the index of suggested state
 *
 * If BM is detected, the deepest non-C3 idle state is entered instead.
 */
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
		struct cpuidle_driver *drv, int index)
{
	struct acpi_processor *pr;
	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);

	pr = __this_cpu_read(processors);

	if (unlikely(!pr))
		return -EINVAL;

	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
		if (drv->safe_state_index >= 0) {
			return drv->states[drv->safe_state_index].enter(dev,
						drv, drv->safe_state_index);
		} else {
			acpi_safe_halt();
			return -EBUSY;
		}
	}

	if (cx->entry_method == ACPI_CSTATE_FFH) {
		if (current_set_polling_and_test())
			return -EINVAL;
	}

	acpi_unlazy_tlb(smp_processor_id());

	/* Tell the scheduler that we are going deep-idle: */
	sched_clock_idle_sleep_event();
	/*
	 * Must be done before busmaster disable as we might need to
	 * access HPET !
	 */
	lapic_timer_state_broadcast(pr, cx, 1);

	/*
	 * disable bus master
	 * bm_check implies we need ARB_DIS
	 * !bm_check implies we need cache flush
	 * bm_control implies whether we can do ARB_DIS
	 *
	 * That leaves a case where bm_check is set and bm_control is
	 * not set. In that case we cannot do much, we enter C3
	 * without doing anything.
	 */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		c3_cpu_count++;
		/* Disable bus master arbitration when all CPUs are in C3 */
		if (c3_cpu_count == num_online_cpus())
			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
		raw_spin_unlock(&c3_lock);
	} else if (!pr->flags.bm_check) {
		ACPI_FLUSH_CPU_CACHE();
	}

	acpi_idle_do_entry(cx);

	/* Re-enable bus master arbitration */
	if (pr->flags.bm_check && pr->flags.bm_control) {
		raw_spin_lock(&c3_lock);
		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
		c3_cpu_count--;
		raw_spin_unlock(&c3_lock);
	}

	sched_clock_idle_wakeup_event(0);

	lapic_timer_state_broadcast(pr, cx, 0);
	return index;
}