static void
acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc)
{
    ACPI_GENERIC_ADDRESS	 gas;
    struct acpi_cx		*cx_ptr;

    sc->cpu_cx_count = 0;
    cx_ptr = sc->cpu_cx_states;

    /* Use initial sleep value of 1 sec. to start with lowest idle state. */
    sc->cpu_prev_sleep = 1000000;

    /* C1 has been required since just after ACPI 1.0 */
    cx_ptr->type = ACPI_STATE_C1;
    cx_ptr->trans_lat = 0;
    cx_ptr++;
    sc->cpu_cx_count++;

    /* 
     * The spec says P_BLK must be 6 bytes long.  However, some systems
     * use it to indicate a fractional set of features present so we
     * take 5 as C2.  Some may also have a value of 7 to indicate
     * another C3 but most use _CST for this (as required) and having
     * "only" C1-C3 is not a hardship.
     */
    if (sc->cpu_p_blk_len < 5)
	return; 

    /* Validate and allocate resources for C2 (P_LVL2). */
    gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
    gas.BitWidth = 8;
    if (AcpiGbl_FADT.C2Latency <= 100) {
	gas.Address = sc->cpu_p_blk + 4;
	acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &sc->cpu_rid,
	    &gas, &cx_ptr->p_lvlx, RF_SHAREABLE);
	if (cx_ptr->p_lvlx != NULL) {
	    sc->cpu_rid++;
	    cx_ptr->type = ACPI_STATE_C2;
	    cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
	    cx_ptr++;
	    sc->cpu_cx_count++;
	}
    }
    if (sc->cpu_p_blk_len < 6)
	return;

    /* Validate and allocate resources for C3 (P_LVL3). */
    if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) {
	gas.Address = sc->cpu_p_blk + 5;
	acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &sc->cpu_rid, &gas,
	    &cx_ptr->p_lvlx, RF_SHAREABLE);
	if (cx_ptr->p_lvlx != NULL) {
	    sc->cpu_rid++;
	    cx_ptr->type = ACPI_STATE_C3;
	    cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
	    cx_ptr++;
	    sc->cpu_cx_count++;
	}
    }
}
示例#2
0
int
acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *rid,
	    struct resource **dst)
{
    ACPI_GENERIC_ADDRESS gas;
    ACPI_OBJECT		*obj;

    obj = &res->Package.Elements[idx];
    if (obj == NULL || obj->Type != ACPI_TYPE_BUFFER ||
	obj->Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) {

	ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
		    "PkgGas error at %d\n", idx);
	return (-1);
    }

    memcpy(&gas, obj->Buffer.Pointer + 3, sizeof(gas));
    *dst = acpi_bus_alloc_gas(dev, rid, &gas);
    if (*dst == NULL) {
	ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
		    "PkgGas error at %d\n", idx);
	return (-1);
    }

    return (0);
}
示例#3
0
int
acpi_PkgGas(device_t dev, ACPI_OBJECT *res, int idx, int *type, int *rid,
    struct resource **dst, u_int flags)
{
    ACPI_GENERIC_ADDRESS gas;
    ACPI_OBJECT *obj;

    obj = &res->Package.Elements[idx];
    if (obj == NULL || obj->Type != ACPI_TYPE_BUFFER ||
	obj->Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3)
	return (EINVAL);

    memcpy(&gas, obj->Buffer.Pointer + 3, sizeof(gas));

    return (acpi_bus_alloc_gas(dev, type, rid, &gas, dst, flags));
}
示例#4
0
/*
 * Parse a _CST package and set up its Cx states.  Since the _CST object
 * can change dynamically, our notify handler may call this function
 * to clean up and probe the new _CST package.
 */
static int
acpi_cst_cx_probe_cst(struct acpi_cst_softc *sc, int reprobe)
{
    struct	 acpi_cst_cx *cx_ptr;
    ACPI_STATUS	 status;
    ACPI_BUFFER	 buf;
    ACPI_OBJECT	*top;
    ACPI_OBJECT	*pkg;
    uint32_t	 count;
    int		 i;

    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

#ifdef INVARIANTS
    if (reprobe)
	KKASSERT(&curthread->td_msgport == netisr_cpuport(sc->cst_cpuid));
#endif

    buf.Pointer = NULL;
    buf.Length = ACPI_ALLOCATE_BUFFER;
    status = AcpiEvaluateObject(sc->cst_handle, "_CST", NULL, &buf);
    if (ACPI_FAILURE(status))
	return (ENXIO);

    /* _CST is a package with a count and at least one Cx package. */
    top = (ACPI_OBJECT *)buf.Pointer;
    if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
	device_printf(sc->cst_dev, "invalid _CST package\n");
	AcpiOsFree(buf.Pointer);
	return (ENXIO);
    }
    if (count != top->Package.Count - 1) {
	device_printf(sc->cst_dev, "invalid _CST state count (%d != %d)\n",
	       count, top->Package.Count - 1);
	count = top->Package.Count - 1;
    }
    if (count > MAX_CX_STATES) {
	device_printf(sc->cst_dev, "_CST has too many states (%d)\n", count);
	count = MAX_CX_STATES;
    }

    sc->cst_flags |= ACPI_CST_FLAG_PROBING | ACPI_CST_FLAG_MATCH_HT;
    cpu_sfence();

    /*
     * Free all previously allocated resources
     *
     * NOTE: It is needed for _CST reprobing.
     */
    acpi_cst_free_resource(sc, 0);

    /* Set up all valid states. */
    sc->cst_cx_count = 0;
    cx_ptr = sc->cst_cx_states;
    for (i = 0; i < count; i++) {
	int error;

	pkg = &top->Package.Elements[i + 1];
	if (!ACPI_PKG_VALID(pkg, 4) ||
	    acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
	    acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
	    acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {

	    device_printf(sc->cst_dev, "skipping invalid Cx state package\n");
	    continue;
	}

	/* Validate the state to see if we should use it. */
	switch (cx_ptr->type) {
	case ACPI_STATE_C1:
	    sc->cst_non_c3 = i;
	    cx_ptr->enter = acpi_cst_c1_halt_enter;
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (error)
		panic("C1 CST HALT setup failed: %d", error);
	    if (sc->cst_cx_count != 0) {
		/*
		 * C1 is not the first C-state; something really stupid
		 * is going on ...
		 */
		sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT;
	    }
	    cx_ptr++;
	    sc->cst_cx_count++;
	    continue;
	case ACPI_STATE_C2:
	    sc->cst_non_c3 = i;
	    break;
	case ACPI_STATE_C3:
	default:
	    if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) != 0) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				 "cpu_cst%d: C3[%d] not available.\n",
				 device_get_unit(sc->cst_dev), i));
		continue;
	    }
	    break;
	}

	/*
	 * Allocate the control register for C2 or C3(+).
	 */
	KASSERT(cx_ptr->res == NULL, ("still has res"));
	acpi_PkgRawGas(pkg, 0, &cx_ptr->gas);

	/*
	 * We match number of C2/C3 for hyperthreads, only if the
	 * register is "Fixed Hardware", e.g. on most of the Intel
	 * CPUs.  We don't have much to do for the rest of the
	 * register types.
	 */
	if (cx_ptr->gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE)
	    sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT;

	cx_ptr->rid = sc->cst_parent->cpu_next_rid;
	acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid,
	    &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE);
	if (cx_ptr->res != NULL) {
	    sc->cst_parent->cpu_next_rid++;
	    ACPI_DEBUG_PRINT((ACPI_DB_INFO,
			     "cpu_cst%d: Got C%d - %d latency\n",
			     device_get_unit(sc->cst_dev), cx_ptr->type,
			     cx_ptr->trans_lat));
	    cx_ptr->enter = acpi_cst_cx_io_enter;
	    cx_ptr->btag = rman_get_bustag(cx_ptr->res);
	    cx_ptr->bhand = rman_get_bushandle(cx_ptr->res);
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (error)
		panic("C%d CST I/O setup failed: %d", cx_ptr->type, error);
	    cx_ptr++;
	    sc->cst_cx_count++;
	} else {
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (!error) {
		KASSERT(cx_ptr->enter != NULL,
		    ("C%d enter is not set", cx_ptr->type));
		cx_ptr++;
		sc->cst_cx_count++;
	    }
	}
    }
    AcpiOsFree(buf.Pointer);

    if (sc->cst_flags & ACPI_CST_FLAG_MATCH_HT) {
	cpumask_t mask;

	mask = get_cpumask_from_level(sc->cst_cpuid, CORE_LEVEL);
	if (CPUMASK_TESTNZERO(mask)) {
	    int cpu;

	    for (cpu = 0; cpu < ncpus; ++cpu) {
		struct acpi_cst_softc *sc1 = acpi_cst_softc[cpu];

		if (sc1 == NULL || sc1 == sc ||
		    (sc1->cst_flags & ACPI_CST_FLAG_ATTACHED) == 0 ||
		    (sc1->cst_flags & ACPI_CST_FLAG_MATCH_HT) == 0)
		    continue;
		if (!CPUMASK_TESTBIT(mask, sc1->cst_cpuid))
		    continue;

		if (sc1->cst_cx_count != sc->cst_cx_count) {
		    struct acpi_cst_softc *src_sc, *dst_sc;

		    if (bootverbose) {
			device_printf(sc->cst_dev,
			    "inconstent C-state count: %d, %s has %d\n",
			    sc->cst_cx_count,
			    device_get_nameunit(sc1->cst_dev),
			    sc1->cst_cx_count);
		    }
		    if (sc1->cst_cx_count > sc->cst_cx_count) {
			src_sc = sc1;
			dst_sc = sc;
		    } else {
			src_sc = sc;
			dst_sc = sc1;
		    }
		    acpi_cst_copy(dst_sc, src_sc);
		}
	    }
	}
    }

    if (reprobe) {
	/* If there are C3(+) states, always enable bus master wakeup */
	if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) {
	    for (i = 0; i < sc->cst_cx_count; ++i) {
		struct acpi_cst_cx *cx = &sc->cst_cx_states[i];

		if (cx->type >= ACPI_STATE_C3) {
		    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
		    break;
		}
	    }
	}

	/* Fix up the lowest Cx being used */
	acpi_cst_set_lowest_oncpu(sc, sc->cst_cx_lowest_req);
    }

    /*
     * Cache the lowest non-C3 state.
     * NOTE: must after cst_cx_lowest is set.
     */
    acpi_cst_non_c3(sc);

    cpu_sfence();
    sc->cst_flags &= ~ACPI_CST_FLAG_PROBING;

    return (0);
}
示例#5
0
static void
acpi_cst_cx_probe_fadt(struct acpi_cst_softc *sc)
{
    struct acpi_cst_cx *cx_ptr;
    int error;

    /*
     * Free all previously allocated resources.
     *
     * NITE:
     * It is needed, since we could enter here because of other
     * cpu's _CST probing failure.
     */
    acpi_cst_free_resource(sc, 0);

    sc->cst_cx_count = 0;
    cx_ptr = sc->cst_cx_states;

    /* Use initial sleep value of 1 sec. to start with lowest idle state. */
    sc->cst_prev_sleep = 1000000;

    /* C1 has been required since just after ACPI 1.0 */
    cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_FIXED_HARDWARE;
    cx_ptr->type = ACPI_STATE_C1;
    cx_ptr->trans_lat = 0;
    cx_ptr->enter = acpi_cst_c1_halt_enter;
    error = acpi_cst_cx_setup(cx_ptr);
    if (error)
	panic("C1 FADT HALT setup failed: %d", error);
    cx_ptr++;
    sc->cst_cx_count++;

    /* C2(+) is not supported on MP system */
    if (ncpus > 1 && (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
	return;

    /*
     * The spec says P_BLK must be 6 bytes long.  However, some systems
     * use it to indicate a fractional set of features present so we
     * take 5 as C2.  Some may also have a value of 7 to indicate
     * another C3 but most use _CST for this (as required) and having
     * "only" C1-C3 is not a hardship.
     */
    if (sc->cst_p_blk_len < 5)
	return; 

    /* Validate and allocate resources for C2 (P_LVL2). */
    if (AcpiGbl_FADT.C2Latency <= 100) {
	cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
	cx_ptr->gas.BitWidth = 8;
	cx_ptr->gas.Address = sc->cst_p_blk + 4;

	cx_ptr->rid = sc->cst_parent->cpu_next_rid;
	acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid,
	    &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE);
	if (cx_ptr->res != NULL) {
	    sc->cst_parent->cpu_next_rid++;
	    cx_ptr->type = ACPI_STATE_C2;
	    cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency;
	    cx_ptr->enter = acpi_cst_cx_io_enter;
	    cx_ptr->btag = rman_get_bustag(cx_ptr->res);
	    cx_ptr->bhand = rman_get_bushandle(cx_ptr->res);
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (error)
		panic("C2 FADT I/O setup failed: %d", error);
	    cx_ptr++;
	    sc->cst_cx_count++;
	    sc->cst_non_c3 = 1;
	}
    }
    if (sc->cst_p_blk_len < 6)
	return;

    /* Validate and allocate resources for C3 (P_LVL3). */
    if (AcpiGbl_FADT.C3Latency <= 1000 &&
        !(acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3)) {
	cx_ptr->gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO;
	cx_ptr->gas.BitWidth = 8;
	cx_ptr->gas.Address = sc->cst_p_blk + 5;

	cx_ptr->rid = sc->cst_parent->cpu_next_rid;
	acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid,
	    &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE);
	if (cx_ptr->res != NULL) {
	    sc->cst_parent->cpu_next_rid++;
	    cx_ptr->type = ACPI_STATE_C3;
	    cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency;
	    cx_ptr->enter = acpi_cst_cx_io_enter;
	    cx_ptr->btag = rman_get_bustag(cx_ptr->res);
	    cx_ptr->bhand = rman_get_bushandle(cx_ptr->res);
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (error)
		panic("C3 FADT I/O setup failed: %d", error);
	    cx_ptr++;
	    sc->cst_cx_count++;
	}
    }
}