int
acpi_poweroff(void)
{
	extern int acpica_use_safe_delay;
	ACPI_STATUS status;

	PSM_VERBOSE_POWEROFF(("acpi_poweroff: starting poweroff\n"));

	acpica_use_safe_delay = 1;

	status = AcpiEnterSleepStatePrep(5);
	if (status != AE_OK) {
		PSM_VERBOSE_POWEROFF(("acpi_poweroff: failed to prepare for "
		    "poweroff, status=0x%x\n", status));
		return (1);
	}
	ACPI_DISABLE_IRQS();
	status = AcpiEnterSleepState(5);
	ACPI_ENABLE_IRQS();

	/* we should be off; if we get here it's an error */
	PSM_VERBOSE_POWEROFF(("acpi_poweroff: failed to actually power "
	    "off, status=0x%x\n", status));
	return (1);
}
Beispiel #2
0
/**
 * acpi_suspend - OS-agnostic system suspend/resume support (S? states)
 * @state:	state we're entering
 *
 */
acpi_status
acpi_suspend (
	u32			state)
{
	acpi_status status;

	/* only support S1 and S5 on kernel 2.4 */
	if (state != ACPI_STATE_S1 && state != ACPI_STATE_S4
	    && state != ACPI_STATE_S5)
		return AE_ERROR;


	if (ACPI_STATE_S4 == state) {
		/* For s4bios, we need a wakeup address. */
		if (1 == acpi_gbl_FACS->S4bios_f &&
		    0 != acpi_gbl_FADT->smi_cmd) {
			if (!acpi_wakeup_address)
				return AE_ERROR;
			acpi_set_firmware_waking_vector((acpi_physical_address) acpi_wakeup_address);
		} else
			/* We don't support S4 under 2.4.  Give up */
			return AE_ERROR;
	}

	status = acpi_system_save_state(state);
	if (!ACPI_SUCCESS(status) && state != ACPI_STATE_S5)
		return status;

	acpi_enter_sleep_state_prep(state);

	/* disable interrupts and flush caches */
	ACPI_DISABLE_IRQS();
	ACPI_FLUSH_CPU_CACHE();

	/* perform OS-specific sleep actions */
	status = acpi_system_suspend(state);

	/* Even if we failed to go to sleep, all of the devices are in an suspended
	 * mode. So, we run these unconditionaly to make sure we have a usable system
	 * no matter what.
	 */
	acpi_leave_sleep_state(state);
	acpi_system_restore_state(state);

	/* make sure interrupts are enabled */
	ACPI_ENABLE_IRQS();

	/* reset firmware waking vector */
	acpi_set_firmware_waking_vector((acpi_physical_address) 0);

	return status;
}
/*
 * Release a spinlock. See above.
 */
void
acpi_os_release_lock (
	acpi_handle	handle,
	u32		flags)
{
	ACPI_FUNCTION_TRACE ("os_release_lock");

	ACPI_DEBUG_PRINT ((ACPI_DB_MUTEX, "Releasing spinlock[%p] from %s level\n", handle,
		((flags & ACPI_NOT_ISR) ? "non-interrupt" : "interrupt")));

	spin_unlock((spinlock_t *)handle);

	if (flags & ACPI_NOT_ISR)
		ACPI_ENABLE_IRQS();

	return_VOID;
}
Beispiel #4
0
/**
 * acpi_system_restore_state - OS-specific restoration of state
 * @state:	sleep state we're exiting
 *
 * Note that if we're coming back from S4, the memory image should have
 * already been loaded from the disk and is already in place.  (Otherwise how
 * else would we be here?).
 */
acpi_status
acpi_system_restore_state(
	u32			state)
{
	/* 
	 * We should only be here if we're coming back from STR or STD.
	 * And, in the case of the latter, the memory image should have already
	 * been loaded from disk.
	 */
	if (state > ACPI_STATE_S1) {
		acpi_restore_state_mem();

		/* Do _early_ resume for irqs.  Required by
		 * ACPI specs.
		 */
		/* TBD: call arch dependant reinitialization of the 
		 * interrupts.
		 */
#ifdef CONFIG_X86
		init_8259A(0);
#endif
		/* wait for power to come back */
		mdelay(1000);

	}

	/* Be really sure that irqs are disabled. */
	ACPI_DISABLE_IRQS();

	/* Wait a little again, just in case... */
	mdelay(1000);

	/* enable interrupts once again */
	ACPI_ENABLE_IRQS();

	/* turn all the devices back on */
	if (state > ACPI_STATE_S1)
		pm_send_all(PM_RESUME, (void *)0);

	return AE_OK;
}
/*
 * Idle the CPU in the lowest state possible.  This function is called with
 * interrupts disabled.  Note that once it re-enables interrupts, a task
 * switch can occur so do not access shared data (i.e. the softc) after
 * interrupts are re-enabled.
 */
static void
acpi_cpu_idle()
{
    struct	acpi_cpu_softc *sc;
    struct	acpi_cx *cx_next;
    uint32_t	start_time, end_time;
    int		bm_active, cx_next_idx, i;

    /* If disabled, return immediately. */
    if (cpu_disable_idle) {
	ACPI_ENABLE_IRQS();
	return;
    }

    /*
     * Look up our CPU id to get our softc.  If it's NULL, we'll use C1
     * since there is no ACPI processor object for this CPU.  This occurs
     * for logical CPUs in the HTT case.
     */
    sc = cpu_softc[PCPU_GET(cpuid)];
    if (sc == NULL) {
	acpi_cpu_c1();
	return;
    }

    /* Find the lowest state that has small enough latency. */
    cx_next_idx = 0;
    for (i = sc->cpu_cx_lowest; i >= 0; i--) {
	if (sc->cpu_cx_states[i].trans_lat * 3 <= sc->cpu_prev_sleep) {
	    cx_next_idx = i;
	    break;
	}
    }

    /*
     * Check for bus master activity.  If there was activity, clear
     * the bit and use the lowest non-C3 state.  Note that the USB
     * driver polling for new devices keeps this bit set all the
     * time if USB is loaded.
     */
    if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
	AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
	if (bm_active != 0) {
	    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
	    cx_next_idx = min(cx_next_idx, sc->cpu_non_c3);
	}
    }

    /* Select the next state and update statistics. */
    cx_next = &sc->cpu_cx_states[cx_next_idx];
    sc->cpu_cx_stats[cx_next_idx]++;
    KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep"));

    /*
     * Execute HLT (or equivalent) and wait for an interrupt.  We can't
     * calculate the time spent in C1 since the place we wake up is an
     * ISR.  Assume we slept half of quantum and return.
     */
    if (cx_next->type == ACPI_STATE_C1) {
	sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + 500000 / hz) / 4;
	acpi_cpu_c1();
	return;
    }

    /*
     * For C3, disable bus master arbitration and enable bus master wake
     * if BM control is available, otherwise flush the CPU cache.
     */
    if (cx_next->type == ACPI_STATE_C3) {
	if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
	    AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
	    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
	} else
	    ACPI_FLUSH_CPU_CACHE();
    }

    /*
     * Read from P_LVLx to enter C2(+), checking time spent asleep.
     * Use the ACPI timer for measuring sleep time.  Since we need to
     * get the time very close to the CPU start/stop clock logic, this
     * is the only reliable time source.
     */
    AcpiRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock);
    CPU_GET_REG(cx_next->p_lvlx, 1);

    /*
     * Read the end time twice.  Since it may take an arbitrary time
     * to enter the idle state, the first read may be executed before
     * the processor has stopped.  Doing it again provides enough
     * margin that we are certain to have a correct value.
     */
    AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);
    AcpiRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock);

    /* Enable bus master arbitration and disable bus master wakeup. */
    if (cx_next->type == ACPI_STATE_C3 &&
	(cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) {
	AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
	AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
    }
    ACPI_ENABLE_IRQS();

    /* Find the actual time asleep in microseconds. */
    end_time = acpi_TimerDelta(end_time, start_time);
    sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4;
}
Beispiel #6
0
/*
 * Idle the CPU in the lowest state possible.  This function is called with
 * interrupts disabled.  Note that once it re-enables interrupts, a task
 * switch can occur so do not access shared data (i.e. the softc) after
 * interrupts are re-enabled.
 */
static void
acpi_cst_idle(void)
{
    struct	acpi_cst_softc *sc;
    struct	acpi_cst_cx *cx_next;
    union microtime_pcpu start, end;
    int		cx_next_idx, i, tdiff, bm_arb_disabled = 0;

    /* If disabled, return immediately. */
    if (acpi_cst_disable_idle) {
	ACPI_ENABLE_IRQS();
	return;
    }

    /*
     * Look up our CPU id to get our softc.  If it's NULL, we'll use C1
     * since there is no Cx state for this processor.
     */
    sc = acpi_cst_softc[mdcpu->mi.gd_cpuid];
    if (sc == NULL) {
	acpi_cst_c1_halt();
	return;
    }

    /* Still probing; use C1 */
    if (sc->cst_flags & ACPI_CST_FLAG_PROBING) {
	acpi_cst_c1_halt();
	return;
    }

    /* Find the lowest state that has small enough latency. */
    cx_next_idx = 0;
    for (i = sc->cst_cx_lowest; i >= 0; i--) {
	if (sc->cst_cx_states[i].trans_lat * 3 <= sc->cst_prev_sleep) {
	    cx_next_idx = i;
	    break;
	}
    }

    /*
     * Check for bus master activity if needed for the selected state.
     * If there was activity, clear the bit and use the lowest non-C3 state.
     */
    cx_next = &sc->cst_cx_states[cx_next_idx];
    if (cx_next->flags & ACPI_CST_CX_FLAG_BM_STS) {
	int bm_active;

	AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active);
	if (bm_active != 0) {
	    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
	    cx_next_idx = sc->cst_non_c3;
	}
    }

    /* Select the next state and update statistics. */
    cx_next = &sc->cst_cx_states[cx_next_idx];
    sc->cst_cx_stats[cx_next_idx]++;
    KASSERT(cx_next->type != ACPI_STATE_C0, ("C0 sleep"));

    /*
     * Execute HLT (or equivalent) and wait for an interrupt.  We can't
     * calculate the time spent in C1 since the place we wake up is an
     * ISR.  Assume we slept half of quantum and return.
     */
    if (cx_next->type == ACPI_STATE_C1) {
	sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + 500000 / hz) / 4;
	cx_next->enter(cx_next);
	return;
    }

    /* Execute the proper preamble before enter the selected state. */
    if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_BM_ARB) {
	AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
	bm_arb_disabled = 1;
    } else if (cx_next->preamble == ACPI_CST_CX_PREAMBLE_WBINVD) {
	ACPI_FLUSH_CPU_CACHE();
    }

    /*
     * Enter the selected state and check time spent asleep.
     */
    microtime_pcpu_get(&start);
    cpu_mfence();

    cx_next->enter(cx_next);

    cpu_mfence();
    microtime_pcpu_get(&end);

    /* Enable bus master arbitration, if it was disabled. */
    if (bm_arb_disabled)
	AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);

    ACPI_ENABLE_IRQS();

    /* Find the actual time asleep in microseconds. */
    tdiff = microtime_pcpu_diff(&start, &end);
    sc->cst_prev_sleep = (sc->cst_prev_sleep * 3 + tdiff) / 4;
}