Example #1
0
static void
acpi_cst_set_lowest_handler(struct cpuhelper_msg *msg)
{
    int error;

    error = acpi_cst_set_lowest_oncpu(msg->ch_cbarg, msg->ch_cbarg1);
    cpuhelper_replymsg(msg, error);
}
Example #2
0
static void
acpi_cst_set_lowest_handler(netmsg_t msg)
{
    struct netmsg_acpi_cst *rmsg = (struct netmsg_acpi_cst *)msg;
    int error;

    error = acpi_cst_set_lowest_oncpu(rmsg->sc, rmsg->val);
    lwkt_replymsg(&rmsg->base.lmsg, error);
}
Example #3
0
/*
 * Parse a _CST package and set up its Cx states.  Since the _CST object
 * can change dynamically, our notify handler may call this function
 * to clean up and probe the new _CST package.
 */
static int
acpi_cst_cx_probe_cst(struct acpi_cst_softc *sc, int reprobe)
{
    struct	 acpi_cst_cx *cx_ptr;
    ACPI_STATUS	 status;
    ACPI_BUFFER	 buf;
    ACPI_OBJECT	*top;
    ACPI_OBJECT	*pkg;
    uint32_t	 count;
    int		 i;

    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

#ifdef INVARIANTS
    if (reprobe)
	KKASSERT(&curthread->td_msgport == netisr_cpuport(sc->cst_cpuid));
#endif

    buf.Pointer = NULL;
    buf.Length = ACPI_ALLOCATE_BUFFER;
    status = AcpiEvaluateObject(sc->cst_handle, "_CST", NULL, &buf);
    if (ACPI_FAILURE(status))
	return (ENXIO);

    /* _CST is a package with a count and at least one Cx package. */
    top = (ACPI_OBJECT *)buf.Pointer;
    if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) {
	device_printf(sc->cst_dev, "invalid _CST package\n");
	AcpiOsFree(buf.Pointer);
	return (ENXIO);
    }
    if (count != top->Package.Count - 1) {
	device_printf(sc->cst_dev, "invalid _CST state count (%d != %d)\n",
	       count, top->Package.Count - 1);
	count = top->Package.Count - 1;
    }
    if (count > MAX_CX_STATES) {
	device_printf(sc->cst_dev, "_CST has too many states (%d)\n", count);
	count = MAX_CX_STATES;
    }

    sc->cst_flags |= ACPI_CST_FLAG_PROBING | ACPI_CST_FLAG_MATCH_HT;
    cpu_sfence();

    /*
     * Free all previously allocated resources
     *
     * NOTE: It is needed for _CST reprobing.
     */
    acpi_cst_free_resource(sc, 0);

    /* Set up all valid states. */
    sc->cst_cx_count = 0;
    cx_ptr = sc->cst_cx_states;
    for (i = 0; i < count; i++) {
	int error;

	pkg = &top->Package.Elements[i + 1];
	if (!ACPI_PKG_VALID(pkg, 4) ||
	    acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 ||
	    acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 ||
	    acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) {

	    device_printf(sc->cst_dev, "skipping invalid Cx state package\n");
	    continue;
	}

	/* Validate the state to see if we should use it. */
	switch (cx_ptr->type) {
	case ACPI_STATE_C1:
	    sc->cst_non_c3 = i;
	    cx_ptr->enter = acpi_cst_c1_halt_enter;
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (error)
		panic("C1 CST HALT setup failed: %d", error);
	    if (sc->cst_cx_count != 0) {
		/*
		 * C1 is not the first C-state; something really stupid
		 * is going on ...
		 */
		sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT;
	    }
	    cx_ptr++;
	    sc->cst_cx_count++;
	    continue;
	case ACPI_STATE_C2:
	    sc->cst_non_c3 = i;
	    break;
	case ACPI_STATE_C3:
	default:
	    if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) != 0) {
		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
				 "cpu_cst%d: C3[%d] not available.\n",
				 device_get_unit(sc->cst_dev), i));
		continue;
	    }
	    break;
	}

	/*
	 * Allocate the control register for C2 or C3(+).
	 */
	KASSERT(cx_ptr->res == NULL, ("still has res"));
	acpi_PkgRawGas(pkg, 0, &cx_ptr->gas);

	/*
	 * We match number of C2/C3 for hyperthreads, only if the
	 * register is "Fixed Hardware", e.g. on most of the Intel
	 * CPUs.  We don't have much to do for the rest of the
	 * register types.
	 */
	if (cx_ptr->gas.SpaceId != ACPI_ADR_SPACE_FIXED_HARDWARE)
	    sc->cst_flags &= ~ACPI_CST_FLAG_MATCH_HT;

	cx_ptr->rid = sc->cst_parent->cpu_next_rid;
	acpi_bus_alloc_gas(sc->cst_dev, &cx_ptr->res_type, &cx_ptr->rid,
	    &cx_ptr->gas, &cx_ptr->res, RF_SHAREABLE);
	if (cx_ptr->res != NULL) {
	    sc->cst_parent->cpu_next_rid++;
	    ACPI_DEBUG_PRINT((ACPI_DB_INFO,
			     "cpu_cst%d: Got C%d - %d latency\n",
			     device_get_unit(sc->cst_dev), cx_ptr->type,
			     cx_ptr->trans_lat));
	    cx_ptr->enter = acpi_cst_cx_io_enter;
	    cx_ptr->btag = rman_get_bustag(cx_ptr->res);
	    cx_ptr->bhand = rman_get_bushandle(cx_ptr->res);
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (error)
		panic("C%d CST I/O setup failed: %d", cx_ptr->type, error);
	    cx_ptr++;
	    sc->cst_cx_count++;
	} else {
	    error = acpi_cst_cx_setup(cx_ptr);
	    if (!error) {
		KASSERT(cx_ptr->enter != NULL,
		    ("C%d enter is not set", cx_ptr->type));
		cx_ptr++;
		sc->cst_cx_count++;
	    }
	}
    }
    AcpiOsFree(buf.Pointer);

    if (sc->cst_flags & ACPI_CST_FLAG_MATCH_HT) {
	cpumask_t mask;

	mask = get_cpumask_from_level(sc->cst_cpuid, CORE_LEVEL);
	if (CPUMASK_TESTNZERO(mask)) {
	    int cpu;

	    for (cpu = 0; cpu < ncpus; ++cpu) {
		struct acpi_cst_softc *sc1 = acpi_cst_softc[cpu];

		if (sc1 == NULL || sc1 == sc ||
		    (sc1->cst_flags & ACPI_CST_FLAG_ATTACHED) == 0 ||
		    (sc1->cst_flags & ACPI_CST_FLAG_MATCH_HT) == 0)
		    continue;
		if (!CPUMASK_TESTBIT(mask, sc1->cst_cpuid))
		    continue;

		if (sc1->cst_cx_count != sc->cst_cx_count) {
		    struct acpi_cst_softc *src_sc, *dst_sc;

		    if (bootverbose) {
			device_printf(sc->cst_dev,
			    "inconstent C-state count: %d, %s has %d\n",
			    sc->cst_cx_count,
			    device_get_nameunit(sc1->cst_dev),
			    sc1->cst_cx_count);
		    }
		    if (sc1->cst_cx_count > sc->cst_cx_count) {
			src_sc = sc1;
			dst_sc = sc;
		    } else {
			src_sc = sc;
			dst_sc = sc1;
		    }
		    acpi_cst_copy(dst_sc, src_sc);
		}
	    }
	}
    }

    if (reprobe) {
	/* If there are C3(+) states, always enable bus master wakeup */
	if ((acpi_cst_quirks & ACPI_CST_QUIRK_NO_BM) == 0) {
	    for (i = 0; i < sc->cst_cx_count; ++i) {
		struct acpi_cst_cx *cx = &sc->cst_cx_states[i];

		if (cx->type >= ACPI_STATE_C3) {
		    AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
		    break;
		}
	    }
	}

	/* Fix up the lowest Cx being used */
	acpi_cst_set_lowest_oncpu(sc, sc->cst_cx_lowest_req);
    }

    /*
     * Cache the lowest non-C3 state.
     * NOTE: must after cst_cx_lowest is set.
     */
    acpi_cst_non_c3(sc);

    cpu_sfence();
    sc->cst_flags &= ~ACPI_CST_FLAG_PROBING;

    return (0);
}