Ejemplo n.º 1
0
void
apm_attach(struct apm_softc *sc)
{
	u_int numbatts, capflags;

	aprint_normal(": ");

	switch ((APM_MAJOR_VERS(sc->sc_vers) << 8) + APM_MINOR_VERS(sc->sc_vers)) {
	case 0x0100:
		apm_v11_enabled = 0;
		apm_v12_enabled = 0;
		break;
	case 0x0101:
		apm_v12_enabled = 0;
		/* fall through */
	case 0x0102:
	default:
		break;
	}

	apm_set_ver(sc);	/* prints version info */
	aprint_normal("\n");
	if (apm_minver >= 2)
		(*sc->sc_ops->aa_get_capabilities)(sc->sc_cookie, &numbatts,
		    &capflags);

	/*
	 * enable power management
	 */
	(*sc->sc_ops->aa_enable)(sc->sc_cookie, 1);

	if (sc->sc_ops->aa_cpu_busy)
		(*sc->sc_ops->aa_cpu_busy)(sc->sc_cookie);

	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);

	/* Initial state is `resumed'. */
	sc->sc_power_state = PWR_RESUME;
	selinit(&sc->sc_rsel);
	selinit(&sc->sc_xsel);

	/* Do an initial check. */
	apm_periodic_check(sc);

	/*
	 * Create a kernel thread to periodically check for APM events,
	 * and notify other subsystems when they occur.
	 */
	if (kthread_create(PRI_NONE, 0, NULL, apm_thread, sc,
	    &sc->sc_thread, "%s", device_xname(sc->sc_dev)) != 0) {
		/*
		 * We were unable to create the APM thread; bail out.
		 */
		if (sc->sc_ops->aa_disconnect)
			(*sc->sc_ops->aa_disconnect)(sc->sc_cookie);
		aprint_error_dev(sc->sc_dev, "unable to create thread, "
		    "kernel APM support disabled\n");
	}

	if (!pmf_device_register(sc->sc_dev, NULL, NULL))
		aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
}
Ejemplo n.º 2
0
void
apmattach(struct device *parent, struct device *self, void *aux)
{
	struct bios_attach_args *ba = aux;
	bios_apminfo_t *ap = ba->ba_apmp;
	struct apm_softc *sc = (void *)self;
	struct apmregs regs;
	u_int cbase, clen, l;
	bus_space_handle_t ch16, ch32, dh;

	apm_flags = ap->apm_detail;
	/*
	 * set up GDT descriptors for APM
	 */
	if (apm_flags & APM_32BIT_SUPPORTED) {

		/* truncate segments' limits to a page */
		ap->apm_code_len -= (ap->apm_code32_base +
		    ap->apm_code_len + 1) & 0xfff;
		ap->apm_code16_len -= (ap->apm_code16_base +
		    ap->apm_code16_len + 1) & 0xfff;
		ap->apm_data_len -= (ap->apm_data_base +
		    ap->apm_data_len + 1) & 0xfff;

		/* adjust version */
		if ((sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK) &&
		    (apm_flags & APM_VERMASK) !=
		    (sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK))
			apm_flags = (apm_flags & ~APM_VERMASK) |
			    (sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK);
		if (sc->sc_dev.dv_cfdata->cf_flags & APM_NOCLI) {
			extern int apm_cli; /* from apmcall.S */
			apm_cli = 0;
		}
		if (sc->sc_dev.dv_cfdata->cf_flags & APM_BEBATT)
			sc->be_batt = 1;
		apm_ep.seg = GSEL(GAPM32CODE_SEL,SEL_KPL);
		apm_ep.entry = ap->apm_entry;
		cbase = min(ap->apm_code32_base, ap->apm_code16_base);
		clen = max(ap->apm_code32_base + ap->apm_code_len,
			   ap->apm_code16_base + ap->apm_code16_len) - cbase;
		if ((cbase <= ap->apm_data_base &&
		     cbase + clen >= ap->apm_data_base) ||
		    (ap->apm_data_base <= cbase &&
		     ap->apm_data_base + ap->apm_data_len >= cbase)) {
			l = max(ap->apm_data_base + ap->apm_data_len + 1,
				cbase + clen + 1) -
			    min(ap->apm_data_base, cbase);
			bus_space_map(ba->ba_memt,
				min(ap->apm_data_base, cbase),
				l, 1, &dh);
			ch16 = dh;
			if (ap->apm_data_base < cbase)
				ch16 += cbase - ap->apm_data_base;
			else
				dh += ap->apm_data_base - cbase;
		} else {

			bus_space_map(ba->ba_memt, cbase, clen + 1, 1, &ch16);
			bus_space_map(ba->ba_memt, ap->apm_data_base,
			    ap->apm_data_len + 1, 1, &dh);
		}
		ch32 = ch16;
		if (ap->apm_code16_base == cbase)
			ch32 += ap->apm_code32_base - cbase;
		else
			ch16 += ap->apm_code16_base - cbase;

		setgdt(GAPM32CODE_SEL, (void *)ch32, ap->apm_code_len,
		    SDT_MEMERA, SEL_KPL, 1, 0);
		setgdt(GAPM16CODE_SEL, (void *)ch16, ap->apm_code16_len,
		    SDT_MEMERA, SEL_KPL, 0, 0);
		setgdt(GAPMDATA_SEL, (void *)dh, ap->apm_data_len, SDT_MEMRWA,
		    SEL_KPL, 1, 0);
		DPRINTF((": flags %x code 32:%x/%x[%x] 16:%x/%x[%x] "
		    "data %x/%x/%x ep %x (%x:%x)\n%s", apm_flags,
		    ap->apm_code32_base, ch32, ap->apm_code_len,
		    ap->apm_code16_base, ch16, ap->apm_code16_len,
		    ap->apm_data_base, dh, ap->apm_data_len,
		    ap->apm_entry, apm_ep.seg, ap->apm_entry+ch32,
		    sc->sc_dev.dv_xname));

		apm_set_ver(sc);

		if (apm_flags & APM_BIOS_PM_DISABLED)
			apm_powmgt_enable(1);

		/* Engage cooperative power management on all devices (v1.1) */
		apm_powmgt_engage(1, APM_DEV_ALLDEVS);

		bzero(&regs, sizeof(regs));
		if (apm_get_powstat(&regs) != 0)
			apm_perror("get power status", &regs);
		apm_cpu_busy();

		rw_init(&sc->sc_lock, "apmlk");

		/*
		 * Do a check once, ignoring any errors. This avoids
		 * gratuitous APM disconnects on laptops where the first
		 * event in the queue (after a boot) is non-recognizable.
		 * The IBM ThinkPad 770Z is one of those.
		 */
		apm_periodic_check(sc);

		if (apm_periodic_check(sc) == -1) {
			apm_disconnect(sc);

			/* Failed, nuke APM idle loop */
			cpu_idle_enter_fcn = NULL;
			cpu_idle_cycle_fcn = NULL;
			cpu_idle_leave_fcn = NULL;
		} else {
			kthread_create_deferred(apm_thread_create, sc);

			/* Setup APM idle loop */
			if (apm_flags & APM_IDLE_SLOWS) {
				cpu_idle_enter_fcn = apm_cpu_slow;
				cpu_idle_cycle_fcn = NULL;
				cpu_idle_leave_fcn = apm_cpu_busy;
			} else {
				cpu_idle_enter_fcn = NULL;
				cpu_idle_cycle_fcn = apm_cpu_idle;
				cpu_idle_leave_fcn = NULL;
			}

			/* All is well, let the rest of the world know */
			acpiapm_open = apmopen;
			acpiapm_close = apmclose;
			acpiapm_ioctl = apmioctl;
			acpiapm_kqfilter = apmkqfilter;
			apm_attached = 1;
		}
	} else {
		setgdt(GAPM32CODE_SEL, NULL, 0, 0, 0, 0, 0);
		setgdt(GAPM16CODE_SEL, NULL, 0, 0, 0, 0, 0);
		setgdt(GAPMDATA_SEL, NULL, 0, 0, 0, 0, 0);
	}
}