static void nvme_notify_consumer(struct nvme_consumer *cons) { device_t *devlist; struct nvme_controller *ctrlr; struct nvme_namespace *ns; void *ctrlr_cookie; int dev_idx, ns_idx, devcount; if (devclass_get_devices(nvme_devclass, &devlist, &devcount)) return; for (dev_idx = 0; dev_idx < devcount; dev_idx++) { ctrlr = DEVICE2SOFTC(devlist[dev_idx]); if (cons->ctrlr_fn != NULL) ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr); else ctrlr_cookie = NULL; ctrlr->cons_cookie[cons->id] = ctrlr_cookie; for (ns_idx = 0; ns_idx < ctrlr->cdata.nn; ns_idx++) { ns = &ctrlr->ns[ns_idx]; if (cons->ns_fn != NULL) ns->cons_cookie[cons->id] = (*cons->ns_fn)(ns, ctrlr_cookie); } } free(devlist, M_TEMP); }
static void usbpf_uninit(void *arg) { int devlcnt; device_t *devlp; devclass_t dc; struct usb_bus *ubus; int error; int i; if_clone_detach(usbpf_cloner); dc = devclass_find(usbusname); if (dc == NULL) return; error = devclass_get_devices(dc, &devlp, &devlcnt); if (error) return; for (i = 0; i < devlcnt; i++) { ubus = device_get_softc(devlp[i]); if (ubus != NULL && ubus->ifp != NULL) usbpf_clone_destroy(usbpf_cloner, ubus->ifp); } free(devlp, M_TEMP); }
static void nvme_shutdown(void) { device_t *devlist; struct nvme_controller *ctrlr; union cc_register cc; union csts_register csts; int dev, devcount; if (devclass_get_devices(nvme_devclass, &devlist, &devcount)) return; for (dev = 0; dev < devcount; dev++) { /* * Only notify controller of shutdown when a real shutdown is * in process, not when a module unload occurs. It seems at * least some controllers (Chatham at least) don't let you * re-enable the controller after shutdown notification has * been received. */ ctrlr = DEVICE2SOFTC(devlist[dev]); cc.raw = nvme_mmio_read_4(ctrlr, cc); cc.bits.shn = NVME_SHN_NORMAL; nvme_mmio_write_4(ctrlr, cc, cc.raw); csts.raw = nvme_mmio_read_4(ctrlr, csts); while (csts.bits.shst != NVME_SHST_COMPLETE) { DELAY(5); csts.raw = nvme_mmio_read_4(ctrlr, csts); } } free(devlist, M_TEMP); }
/* * Thermal zone monitor thread. */ static void acpi_tz_thread(void *arg) { device_t *devs; int devcount, i; int flags; struct acpi_tz_softc **sc; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); devs = NULL; devcount = 0; sc = NULL; for (;;) { /* If the number of devices has changed, re-evaluate. */ if (devclass_get_count(acpi_tz_devclass) != devcount) { if (devs != NULL) { free(devs, M_TEMP); free(sc, M_TEMP); } devclass_get_devices(acpi_tz_devclass, &devs, &devcount); sc = malloc(sizeof(struct acpi_tz_softc *) * devcount, M_TEMP, M_WAITOK | M_ZERO); for (i = 0; i < devcount; i++) sc[i] = device_get_softc(devs[i]); } /* Check for temperature events and act on them. */ for (i = 0; i < devcount; i++) { ACPI_LOCK(thermal); flags = sc[i]->tz_flags; sc[i]->tz_flags &= TZ_FLAG_NO_SCP; ACPI_UNLOCK(thermal); acpi_tz_timeout(sc[i], flags); } /* If more work to do, don't go to sleep yet. */ ACPI_LOCK(thermal); for (i = 0; i < devcount; i++) { if (sc[i]->tz_flags & ~TZ_FLAG_NO_SCP) break; } /* * If we have no more work, sleep for a while, setting PDROP so that * the mutex will not be reacquired. Otherwise, drop the mutex and * loop to handle more events. */ if (i == devcount) msleep(&acpi_tz_proc, &thermal_mutex, PZERO | PDROP, "tzpoll", hz * acpi_tz_polling_rate); else ACPI_UNLOCK(thermal); } }
static void nvme_notify_new_consumer(struct nvme_consumer *cons) { device_t *devlist; struct nvme_controller *ctrlr; int dev_idx, devcount; if (devclass_get_devices(nvme_devclass, &devlist, &devcount)) return; for (dev_idx = 0; dev_idx < devcount; dev_idx++) { ctrlr = DEVICE2SOFTC(devlist[dev_idx]); nvme_notify(cons, ctrlr); } free(devlist, M_TEMP); }
static void nvme_shutdown(void) { device_t *devlist; struct nvme_controller *ctrlr; int dev, devcount; if (devclass_get_devices(nvme_devclass, &devlist, &devcount)) return; for (dev = 0; dev < devcount; dev++) { ctrlr = DEVICE2SOFTC(devlist[dev]); nvme_ctrlr_shutdown(ctrlr); } free(devlist, M_TEMP); }
static int clkrun_hack(int run) { #ifdef __i386__ devclass_t pci_devclass; device_t *pci_devices, *pci_children, *busp, *childp; int pci_count = 0, pci_childcount = 0; int i, j, port; u_int16_t control; bus_space_tag_t btag; if ((pci_devclass = devclass_find("pci")) == NULL) { return ENXIO; } devclass_get_devices(pci_devclass, &pci_devices, &pci_count); for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) { pci_childcount = 0; if (device_get_children(*busp, &pci_children, &pci_childcount)) continue; for (j = 0, childp = pci_children; j < pci_childcount; j++, childp++) { if (pci_get_vendor(*childp) == 0x8086 && pci_get_device(*childp) == 0x7113) { port = (pci_read_config(*childp, 0x41, 1) << 8) + 0x10; /* XXX */ btag = X86_BUS_SPACE_IO; control = bus_space_read_2(btag, 0x0, port); control &= ~0x2000; control |= run? 0 : 0x2000; bus_space_write_2(btag, 0x0, port, control); free(pci_devices, M_TEMP); free(pci_children, M_TEMP); return 0; } } free(pci_children, M_TEMP); } free(pci_devices, M_TEMP); return ENXIO; #else return 0; #endif }
static int smapi_modevent (module_t mod, int what, void *arg) { device_t * devs; int count; int i; switch (what) { case MOD_LOAD: break; case MOD_UNLOAD: devclass_get_devices(smapi_devclass, &devs, &count); for (i = 0; i < count; i++) { device_delete_child(device_get_parent(devs[i]), devs[i]); } break; default: break; } return (0); }
/* Probe for Cx state support. */ acpi_cpu_cx_probe(sc); return (0); } static void acpi_cpu_postattach(void *unused __unused) { device_t *devices; int err; int i, n; int attached; err = devclass_get_devices(acpi_cpu_devclass, &devices, &n); if (err != 0) { printf("devclass_get_devices(acpi_cpu_devclass) failed\n"); return; } attached = 0; for (i = 0; i < n; i++) if (device_is_attached(devices[i]) && device_get_driver(devices[i]) == &acpi_cpu_driver) attached = 1; for (i = 0; i < n; i++) bus_generic_probe(devices[i]); for (i = 0; i < n; i++) bus_generic_attach(devices[i]); free(devices, M_TEMP);
/* * Call this *after* all CPUs have been attached. */ static void acpi_cpu_startup(void *arg) { struct acpi_cpu_softc *sc; int i; /* Get set of CPU devices */ devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices); /* * Setup any quirks that might necessary now that we have probed * all the CPUs */ acpi_cpu_quirks(); cpu_cx_count = 0; if (cpu_cx_generic) { /* * We are using generic Cx mode, probe for available Cx states * for all processors. */ for (i = 0; i < cpu_ndevices; i++) { sc = device_get_softc(cpu_devices[i]); acpi_cpu_generic_cx_probe(sc); if (sc->cpu_cx_count > cpu_cx_count) cpu_cx_count = sc->cpu_cx_count; } /* * Find the highest Cx state common to all CPUs * in the system, taking quirks into account. */ for (i = 0; i < cpu_ndevices; i++) { sc = device_get_softc(cpu_devices[i]); if (sc->cpu_cx_count < cpu_cx_count) cpu_cx_count = sc->cpu_cx_count; } } else { /* * We are using _CST mode, remove C3 state if necessary. * Update the largest Cx state supported in the global cpu_cx_count. * It will be used in the global Cx sysctl handler. * As we now know for sure that we will be using _CST mode * install our notify handler. */ for (i = 0; i < cpu_ndevices; i++) { sc = device_get_softc(cpu_devices[i]); if (cpu_quirks & CPU_QUIRK_NO_C3) { sc->cpu_cx_count = sc->cpu_non_c3 + 1; } if (sc->cpu_cx_count > cpu_cx_count) cpu_cx_count = sc->cpu_cx_count; AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY, acpi_cpu_notify, sc); } } /* Perform Cx final initialization. */ for (i = 0; i < cpu_ndevices; i++) { sc = device_get_softc(cpu_devices[i]); acpi_cpu_startup_cx(sc); } /* Add a sysctl handler to handle global Cx lowest setting */ SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree), OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A", "Global lowest Cx sleep state to use"); /* Take over idling from cpu_idle_default(). */ cpu_cx_lowest = 0; cpu_disable_idle = FALSE; cpu_idle_hook = acpi_cpu_idle; }
/* * Call this *after* all CPUs Cx states have been attached. */ static void acpi_cst_postattach(void *arg) { struct acpi_cst_softc *sc; int i; /* Get set of Cx state devices */ devclass_get_devices(acpi_cst_devclass, &acpi_cst_devices, &acpi_cst_ndevices); /* * Setup any quirks that might necessary now that we have probed * all the CPUs' Cx states. */ acpi_cst_set_quirks(); if (acpi_cst_use_fadt) { /* * We are using Cx mode from FADT, probe for available Cx states * for all processors. */ for (i = 0; i < acpi_cst_ndevices; i++) { sc = device_get_softc(acpi_cst_devices[i]); acpi_cst_cx_probe_fadt(sc); } } else { /* * We are using _CST mode, remove C3 state if necessary. * * As we now know for sure that we will be using _CST mode * install our notify handler. */ for (i = 0; i < acpi_cst_ndevices; i++) { sc = device_get_softc(acpi_cst_devices[i]); if (acpi_cst_quirks & ACPI_CST_QUIRK_NO_C3) { /* Free part of unused resources */ acpi_cst_free_resource(sc, sc->cst_non_c3 + 1); sc->cst_cx_count = sc->cst_non_c3 + 1; } sc->cst_parent->cpu_cst_notify = acpi_cst_notify; } } acpi_cst_global_cx_count(); /* Perform Cx final initialization. */ for (i = 0; i < acpi_cst_ndevices; i++) { sc = device_get_softc(acpi_cst_devices[i]); acpi_cst_startup(sc); if (sc->cst_parent->glob_sysctl_tree != NULL) { struct acpi_cpu_softc *cpu = sc->cst_parent; /* Add a sysctl handler to handle global Cx lowest setting */ SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, SYSCTL_CHILDREN(cpu->glob_sysctl_tree), OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, NULL, 0, acpi_cst_global_lowest_sysctl, "A", "Requested global lowest Cx sleep state"); SYSCTL_ADD_PROC(&cpu->glob_sysctl_ctx, SYSCTL_CHILDREN(cpu->glob_sysctl_tree), OID_AUTO, "cx_lowest_use", CTLTYPE_STRING | CTLFLAG_RD, NULL, 0, acpi_cst_global_lowest_use_sysctl, "A", "Global lowest Cx sleep state to use"); } } /* Take over idling from cpu_idle_default(). */ acpi_cst_cx_lowest = 0; acpi_cst_cx_lowest_req = 0; acpi_cst_disable_idle = FALSE; cpu_sfence(); cpu_idle_hook = acpi_cst_idle; }
/* * Convert ip related info in hmsg from utf16 to utf8 and store in umsg */ static int hv_kvp_convert_utf16_ipinfo_to_utf8(struct hv_kvp_ip_msg *host_ip_msg, struct hv_kvp_msg *umsg) { int err_ip, err_subnet, err_gway, err_dns, err_adap; int UNUSED_FLAG = 1; int guid_index; struct hv_device *hv_dev; /* GUID Data Structure */ hn_softc_t *sc; /* hn softc structure */ char if_name[4]; unsigned char guid_instance[40]; char *guid_data = NULL; char buf[39]; struct guid_extract { char a1[2]; char a2[2]; char a3[2]; char a4[2]; char b1[2]; char b2[2]; char c1[2]; char c2[2]; char d[4]; char e[12]; }; struct guid_extract *id; device_t *devs; int devcnt; /* IP Address */ utf16_to_utf8((char *)umsg->body.kvp_ip_val.ip_addr, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.ip_addr, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_ip); /* Adapter ID : GUID */ utf16_to_utf8((char *)umsg->body.kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, UNUSED_FLAG, &err_adap); if (devclass_get_devices(devclass_find("hn"), &devs, &devcnt) == 0) { for (devcnt = devcnt - 1; devcnt >= 0; devcnt--) { sc = device_get_softc(devs[devcnt]); /* Trying to find GUID of Network Device */ hv_dev = sc->hn_dev_obj; for (guid_index = 0; guid_index < 16; guid_index++) { sprintf(&guid_instance[guid_index * 2], "%02x", hv_dev->device_id.data[guid_index]); } guid_data = (char *)guid_instance; id = (struct guid_extract *)guid_data; snprintf(buf, sizeof(buf), "{%.2s%.2s%.2s%.2s-%.2s%.2s-%.2s%.2s-%.4s-%s}", id->a4, id->a3, id->a2, id->a1, id->b2, id->b1, id->c2, id->c1, id->d, id->e); guid_data = NULL; sprintf(if_name, "%s%d", "hn", device_get_unit(devs[devcnt])); if (strncmp(buf, (char *)umsg->body.kvp_ip_val.adapter_id, 39) == 0) { strcpy((char *)umsg->body.kvp_ip_val.adapter_id, if_name); break; } } free(devs, M_TEMP); } /* Address Family , DHCP , SUBNET, Gateway, DNS */ umsg->kvp_hdr.operation = host_ip_msg->operation; umsg->body.kvp_ip_val.addr_family = host_ip_msg->kvp_ip_val.addr_family; umsg->body.kvp_ip_val.dhcp_enabled = host_ip_msg->kvp_ip_val.dhcp_enabled; utf16_to_utf8((char *)umsg->body.kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_subnet); utf16_to_utf8((char *)umsg->body.kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, UNUSED_FLAG, &err_gway); utf16_to_utf8((char *)umsg->body.kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_dns); return (err_ip | err_subnet | err_gway | err_dns | err_adap); }
/* * Convert ip related info in hmsg from utf16 to utf8 and store in umsg */ static int hv_kvp_convert_utf16_ipinfo_to_utf8(struct hv_kvp_ip_msg *host_ip_msg, struct hv_kvp_msg *umsg) { int err_ip, err_subnet, err_gway, err_dns, err_adap; int UNUSED_FLAG = 1; struct hv_device *hv_dev; /* GUID Data Structure */ hn_softc_t *sc; /* hn softc structure */ char if_name[4]; char buf[39]; device_t *devs; int devcnt; /* IP Address */ utf16_to_utf8((char *)umsg->body.kvp_ip_val.ip_addr, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.ip_addr, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_ip); /* Adapter ID : GUID */ utf16_to_utf8((char *)umsg->body.kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, UNUSED_FLAG, &err_adap); if (devclass_get_devices(devclass_find("hn"), &devs, &devcnt) == 0) { for (devcnt = devcnt - 1; devcnt >= 0; devcnt--) { sc = device_get_softc(devs[devcnt]); /* Trying to find GUID of Network Device */ hv_dev = sc->hn_dev_obj; snprintf_hv_guid(buf, sizeof(buf), &hv_dev->device_id); sprintf(if_name, "%s%d", "hn", device_get_unit(devs[devcnt])); if (strncmp(buf, (char *)umsg->body.kvp_ip_val.adapter_id, 39) == 0) { strcpy((char *)umsg->body.kvp_ip_val.adapter_id, if_name); break; } } free(devs, M_TEMP); } /* Address Family , DHCP , SUBNET, Gateway, DNS */ umsg->kvp_hdr.operation = host_ip_msg->operation; umsg->body.kvp_ip_val.addr_family = host_ip_msg->kvp_ip_val.addr_family; umsg->body.kvp_ip_val.dhcp_enabled = host_ip_msg->kvp_ip_val.dhcp_enabled; utf16_to_utf8((char *)umsg->body.kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_subnet); utf16_to_utf8((char *)umsg->body.kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, UNUSED_FLAG, &err_gway); utf16_to_utf8((char *)umsg->body.kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_dns); return (err_ip | err_subnet | err_gway | err_dns | err_adap); }
/* * Convert ip related info in hmsg from utf16 to utf8 and store in umsg */ static int hv_kvp_convert_utf16_ipinfo_to_utf8(struct hv_kvp_ip_msg *host_ip_msg, struct hv_kvp_msg *umsg) { int err_ip, err_subnet, err_gway, err_dns, err_adap; int UNUSED_FLAG = 1; device_t *devs; int devcnt; /* IP Address */ utf16_to_utf8((char *)umsg->body.kvp_ip_val.ip_addr, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.ip_addr, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_ip); /* Adapter ID : GUID */ utf16_to_utf8((char *)umsg->body.kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.adapter_id, MAX_ADAPTER_ID_SIZE, UNUSED_FLAG, &err_adap); if (devclass_get_devices(devclass_find("hn"), &devs, &devcnt) == 0) { for (devcnt = devcnt - 1; devcnt >= 0; devcnt--) { device_t dev = devs[devcnt]; struct vmbus_channel *chan; char buf[HYPERV_GUID_STRLEN]; int n; chan = vmbus_get_channel(dev); n = hyperv_guid2str(vmbus_chan_guid_inst(chan), buf, sizeof(buf)); /* * The string in the 'kvp_ip_val.adapter_id' has * braces around the GUID; skip the leading brace * in 'kvp_ip_val.adapter_id'. */ if (strncmp(buf, ((char *)&umsg->body.kvp_ip_val.adapter_id) + 1, n) == 0) { strlcpy((char *)umsg->body.kvp_ip_val.adapter_id, device_get_nameunit(dev), MAX_ADAPTER_ID_SIZE); break; } } free(devs, M_TEMP); } /* Address Family , DHCP , SUBNET, Gateway, DNS */ umsg->kvp_hdr.operation = host_ip_msg->operation; umsg->body.kvp_ip_val.addr_family = host_ip_msg->kvp_ip_val.addr_family; umsg->body.kvp_ip_val.dhcp_enabled = host_ip_msg->kvp_ip_val.dhcp_enabled; utf16_to_utf8((char *)umsg->body.kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.sub_net, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_subnet); utf16_to_utf8((char *)umsg->body.kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.gate_way, MAX_GATEWAY_SIZE, UNUSED_FLAG, &err_gway); utf16_to_utf8((char *)umsg->body.kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, (uint16_t *)host_ip_msg->kvp_ip_val.dns_addr, MAX_IP_ADDR_SIZE, UNUSED_FLAG, &err_dns); return (err_ip | err_subnet | err_gway | err_dns | err_adap); }