void ral_pci_attach(device_t parent, device_t self, void *aux) { struct ral_pci_softc *psc = device_private(self); struct rt2560_softc *sc = &psc->sc_sc; const struct pci_attach_args *pa = aux; const char *intrstr; bus_addr_t base; pci_intr_handle_t ih; pcireg_t memtype, reg; int error; char intrbuf[PCI_INTRSTR_LEN]; pci_aprint_devinfo(pa, NULL); if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_RALINK) { switch (PCI_PRODUCT(pa->pa_id)) { case PCI_PRODUCT_RALINK_RT2560: psc->sc_opns = &ral_rt2560_opns; break; case PCI_PRODUCT_RALINK_RT2561: case PCI_PRODUCT_RALINK_RT2561S: case PCI_PRODUCT_RALINK_RT2661: psc->sc_opns = &ral_rt2661_opns; break; default: psc->sc_opns = &ral_rt2860_opns; break; } } else { /* all other vendors are RT2860 only */ psc->sc_opns = &ral_rt2860_opns; } sc->sc_dev = self; sc->sc_dmat = pa->pa_dmat; psc->sc_pc = pa->pa_pc; /* enable the appropriate bits in the PCI CSR */ reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); reg |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE; pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg); /* map control/status registers */ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RAL_PCI_BAR0); error = pci_mapreg_map(pa, RAL_PCI_BAR0, memtype, 0, &sc->sc_st, &sc->sc_sh, &base, &psc->sc_mapsize); if (error != 0) { aprint_error(": could not map memory space\n"); return; } if (pci_intr_map(pa, &ih) != 0) { aprint_error(": could not map interrupt\n"); return; } intrstr = pci_intr_string(psc->sc_pc, ih, intrbuf, sizeof(intrbuf)); psc->sc_ih = pci_intr_establish(psc->sc_pc, ih, IPL_NET, psc->sc_opns->intr, sc); if (psc->sc_ih == NULL) { aprint_error(": could not establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); return; } aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); (*psc->sc_opns->attach)(sc, PCI_PRODUCT(pa->pa_id)); }
static void sapcic_event_thread(void *arg) { struct sapcic_socket *so = arg; int newstatus, s; while (so->shutdown == 0) { /* * Serialize event processing on the PCIC. We may * sleep while we hold this lock. */ mutex_enter(&so->sc->sc_lock); /* sleep .25s to be enqueued chatterling interrupts */ (void) tsleep(sapcic_event_thread, PWAIT, "pcicss", hz / 4); s = splhigh(); so->event = 0; /* we don't rely on interrupt type */ newstatus = (so->pcictag->read)(so, SAPCIC_STATUS_CARD); splx(s); if (so->laststatus == newstatus) { /* * No events to process; release the PCIC lock. */ mutex_exit(&so->sc->sc_lock); (void) tsleep(&so->event, PWAIT, "pcicev", hz); continue; } so->laststatus = newstatus; switch (newstatus) { case SAPCIC_CARD_VALID: aprint_normal_dev(so->sc->sc_dev, "insertion event\n"); pcmcia_card_attach(so->pcmcia); break; case SAPCIC_CARD_INVALID: aprint_normal_dev(so->sc->sc_dev, "removal event\n"); pcmcia_card_detach(so->pcmcia, DETACH_FORCE); break; default: panic("sapcic_event_thread: unknown status %d", newstatus); } mutex_exit(&so->sc->sc_lock); } so->event_thread = NULL; /* In case parent is waiting for us to exit. */ wakeup(so->sc); kthread_exit(0); }
static void apm_power_print(struct apm_softc *sc, struct apm_power_info *pi) { if (pi->battery_life != APM_BATT_LIFE_UNKNOWN) { aprint_normal_dev(sc->sc_dev, "battery life expectancy: %d%%\n", pi->battery_life); } aprint_normal_dev(sc->sc_dev, "A/C state: "); switch (pi->ac_state) { case APM_AC_OFF: printf("off\n"); break; case APM_AC_ON: printf("on\n"); break; case APM_AC_BACKUP: printf("backup power\n"); break; default: case APM_AC_UNKNOWN: printf("unknown\n"); break; } aprint_normal_dev(sc->sc_dev, "battery charge state:"); if (apm_minver == 0) switch (pi->battery_state) { case APM_BATT_HIGH: printf("high\n"); break; case APM_BATT_LOW: printf("low\n"); break; case APM_BATT_CRITICAL: printf("critical\n"); break; case APM_BATT_CHARGING: printf("charging\n"); break; case APM_BATT_UNKNOWN: printf("unknown\n"); break; default: printf("undecoded state %x\n", pi->battery_state); break; } else if (apm_minver >= 1) { if (pi->battery_flags & APM_BATT_FLAG_NO_SYSTEM_BATTERY) printf(" no battery"); else { if (pi->battery_flags & APM_BATT_FLAG_HIGH) printf(" high"); if (pi->battery_flags & APM_BATT_FLAG_LOW) printf(" low"); if (pi->battery_flags & APM_BATT_FLAG_CRITICAL) printf(" critical"); if (pi->battery_flags & APM_BATT_FLAG_CHARGING) printf(" charging"); } printf("\n"); if (pi->minutes_valid) { aprint_normal_dev(sc->sc_dev, "estimated "); if (pi->minutes_left / 60) printf("%dh ", pi->minutes_left / 60); printf("%dm\n", pi->minutes_left % 60); } } return; }
/* Attach */ void url_attach(device_t parent, device_t self, void *aux) { struct url_softc *sc = device_private(self); struct usb_attach_arg *uaa = aux; usbd_device_handle dev = uaa->device; usbd_interface_handle iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char *devinfop; struct ifnet *ifp; struct mii_data *mii; u_char eaddr[ETHER_ADDR_LEN]; int i, s; sc->sc_dev = self; aprint_naive("\n"); aprint_normal("\n"); devinfop = usbd_devinfo_alloc(dev, 0); aprint_normal_dev(self, "%s\n", devinfop); usbd_devinfo_free(devinfop); /* Move the device into the configured state. */ err = usbd_set_config_no(dev, URL_CONFIG_NO, 1); if (err) { aprint_error_dev(self, "failed to set configuration" ", err=%s\n", usbd_errstr(err)); goto bad; } usb_init_task(&sc->sc_tick_task, url_tick_task, sc, 0); rw_init(&sc->sc_mii_rwlock); usb_init_task(&sc->sc_stop_task, (void (*)(void *))url_stop_task, sc, 0); /* get control interface */ err = usbd_device2interface_handle(dev, URL_IFACE_INDEX, &iface); if (err) { aprint_error_dev(self, "failed to get interface, err=%s\n", usbd_errstr(err)); goto bad; } sc->sc_udev = dev; sc->sc_ctl_iface = iface; sc->sc_flags = url_lookup(uaa->vendor, uaa->product)->url_flags; /* get interface descriptor */ id = usbd_get_interface_descriptor(sc->sc_ctl_iface); /* find endpoints */ sc->sc_bulkin_no = sc->sc_bulkout_no = sc->sc_intrin_no = -1; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_ctl_iface, i); if (ed == NULL) { aprint_error_dev(self, "couldn't get endpoint %d\n", i); goto bad; } if ((ed->bmAttributes & UE_XFERTYPE) == UE_BULK && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) sc->sc_bulkin_no = ed->bEndpointAddress; /* RX */ else if ((ed->bmAttributes & UE_XFERTYPE) == UE_BULK && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT) sc->sc_bulkout_no = ed->bEndpointAddress; /* TX */ else if ((ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) sc->sc_intrin_no = ed->bEndpointAddress; /* Status */ } if (sc->sc_bulkin_no == -1 || sc->sc_bulkout_no == -1 || sc->sc_intrin_no == -1) { aprint_error_dev(self, "missing endpoint\n"); goto bad; } s = splnet(); /* reset the adapter */ url_reset(sc); /* Get Ethernet Address */ err = url_mem(sc, URL_CMD_READMEM, URL_IDR0, (void *)eaddr, ETHER_ADDR_LEN); if (err) { aprint_error_dev(self, "read MAC address failed\n"); splx(s); goto bad; } /* Print Ethernet Address */ aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr)); /* initialize interface information */ ifp = GET_IFP(sc); ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; strncpy(ifp->if_xname, device_xname(self), IFNAMSIZ); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = url_start; ifp->if_ioctl = url_ioctl; ifp->if_watchdog = url_watchdog; ifp->if_init = url_init; ifp->if_stop = url_stop; IFQ_SET_READY(&ifp->if_snd); /* * Do ifmedia setup. */ mii = &sc->sc_mii; mii->mii_ifp = ifp; mii->mii_readreg = url_int_miibus_readreg; mii->mii_writereg = url_int_miibus_writereg; #if 0 if (sc->sc_flags & URL_EXT_PHY) { mii->mii_readreg = url_ext_miibus_readreg; mii->mii_writereg = url_ext_miibus_writereg; } #endif mii->mii_statchg = url_miibus_statchg; mii->mii_flags = MIIF_AUTOTSLEEP; sc->sc_ec.ec_mii = mii; ifmedia_init(&mii->mii_media, 0, url_ifmedia_change, url_ifmedia_status); mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (LIST_FIRST(&mii->mii_phys) == NULL) { ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); } else ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); /* attach the interface */ if_attach(ifp); ether_ifattach(ifp, eaddr); rnd_attach_source(&sc->rnd_source, device_xname(self), RND_TYPE_NET, 0); callout_init(&sc->sc_stat_ch, 0); sc->sc_attached = 1; splx(s); usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, dev, sc->sc_dev); return; bad: sc->sc_dying = 1; return; }
void urio_attach(device_t parent, device_t self, void *aux) { struct urio_softc *sc = device_private(self); struct usb_attach_arg *uaa = aux; usbd_device_handle dev = uaa->device; usbd_interface_handle iface; char *devinfop; usbd_status err; usb_endpoint_descriptor_t *ed; u_int8_t epcount; int i; DPRINTFN(10,("urio_attach: sc=%p\n", sc)); sc->sc_dev = self; aprint_naive("\n"); aprint_normal("\n"); devinfop = usbd_devinfo_alloc(dev, 0); aprint_normal_dev(self, "%s\n", devinfop); usbd_devinfo_free(devinfop); err = usbd_set_config_no(dev, URIO_CONFIG_NO, 1); if (err) { aprint_error_dev(self, "failed to set configuration" ", err=%s\n", usbd_errstr(err)); return; } err = usbd_device2interface_handle(dev, URIO_IFACE_IDX, &iface); if (err) { aprint_error_dev(self, "getting interface handle failed\n"); return; } sc->sc_udev = dev; sc->sc_iface = iface; epcount = 0; (void)usbd_endpoint_count(iface, &epcount); sc->sc_in_addr = -1; sc->sc_out_addr = -1; for (i = 0; i < epcount; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { aprint_error_dev(self, "couldn't get ep %d\n", i); return; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->sc_in_addr = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->sc_out_addr = ed->bEndpointAddress; } } if (sc->sc_in_addr == -1 || sc->sc_out_addr == -1) { aprint_error_dev(self, "missing endpoint\n"); return; } DPRINTFN(10, ("urio_attach: %p\n", sc->sc_udev)); usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); return; }
static void rtsx_pci_attach(device_t parent, device_t self, void *aux) { struct rtsx_pci_softc *sc = device_private(self); struct pci_attach_args *pa = (struct pci_attach_args *)aux; pci_chipset_tag_t pc = pa->pa_pc; pcitag_t tag = pa->pa_tag; pcireg_t reg; char const *intrstr; bus_space_tag_t iot; bus_space_handle_t ioh; bus_size_t size; uint32_t flags; char intrbuf[PCI_INTRSTR_LEN]; sc->sc.sc_dev = self; sc->sc_pc = pc; pci_aprint_devinfo(pa, NULL); if ((pci_conf_read(pc, tag, RTSX_CFG_PCI) & RTSX_CFG_ASIC) != 0) { aprint_error_dev(self, "no asic\n"); return; } if (pci_mapreg_map(pa, RTSX_PCI_BAR, PCI_MAPREG_TYPE_MEM, 0, &iot, &ioh, NULL, &size)) { aprint_error_dev(self, "couldn't map registers\n"); return; } if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0)) { aprint_error_dev(self, "couldn't map interrupt\n"); return; } intrstr = pci_intr_string(pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf)); sc->sc_ih = pci_intr_establish(pc, sc->sc_pihp[0], IPL_SDMMC, rtsx_intr, &sc->sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt\n"); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); /* Enable the device */ reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); reg |= PCI_COMMAND_MASTER_ENABLE; pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg); /* Power up the device */ pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D0); switch (PCI_PRODUCT(pa->pa_id)) { case PCI_PRODUCT_REALTEK_RTS5209: flags = RTSX_F_5209; break; case PCI_PRODUCT_REALTEK_RTS5227: flags = RTSX_F_5227; break; case PCI_PRODUCT_REALTEK_RTS5229: flags = RTSX_F_5229; break; case PCI_PRODUCT_REALTEK_RTL8402: flags = RTSX_F_8402; break; case PCI_PRODUCT_REALTEK_RTL8411: flags = RTSX_F_8411; break; case PCI_PRODUCT_REALTEK_RTL8411B: flags = RTSX_F_8411B; break; default: flags = 0; break; } if (rtsx_attach(&sc->sc, iot, ioh, size, pa->pa_dmat, flags) != 0) { aprint_error_dev(self, "couldn't initialize chip\n"); return; } if (!pmf_device_register1(self, rtsx_suspend, rtsx_resume, rtsx_shutdown)) aprint_error_dev(self, "couldn't establish powerhook\n"); }
static void ichsmb_attach(device_t parent, device_t self, void *aux) { struct ichsmb_softc *sc = device_private(self); struct pci_attach_args *pa = aux; struct i2cbus_attach_args iba; pcireg_t conf; bus_size_t iosize; pci_intr_handle_t ih; const char *intrstr = NULL; char intrbuf[PCI_INTRSTR_LEN]; sc->sc_dev = self; pci_aprint_devinfo(pa, NULL); /* Read configuration */ conf = pci_conf_read(pa->pa_pc, pa->pa_tag, LPCIB_SMB_HOSTC); DPRINTF(("%s: conf 0x%08x\n", device_xname(sc->sc_dev), conf)); if ((conf & LPCIB_SMB_HOSTC_HSTEN) == 0) { aprint_error_dev(self, "SMBus disabled\n"); goto out; } /* Map I/O space */ if (pci_mapreg_map(pa, LPCIB_SMB_BASE, PCI_MAPREG_TYPE_IO, 0, &sc->sc_iot, &sc->sc_ioh, NULL, &iosize)) { aprint_error_dev(self, "can't map I/O space\n"); goto out; } sc->sc_poll = 1; if (conf & LPCIB_SMB_HOSTC_SMIEN) { /* No PCI IRQ */ aprint_normal_dev(self, "interrupting at SMI\n"); } else { /* Install interrupt handler */ if (pci_intr_map(pa, &ih) == 0) { intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf)); sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ichsmb_intr, sc); if (sc->sc_ih != NULL) { aprint_normal_dev(self, "interrupting at %s\n", intrstr); sc->sc_poll = 0; } } if (sc->sc_poll) aprint_normal_dev(self, "polling\n"); } /* Attach I2C bus */ mutex_init(&sc->sc_i2c_mutex, MUTEX_DEFAULT, IPL_NONE); sc->sc_i2c_tag.ic_cookie = sc; sc->sc_i2c_tag.ic_acquire_bus = ichsmb_i2c_acquire_bus; sc->sc_i2c_tag.ic_release_bus = ichsmb_i2c_release_bus; sc->sc_i2c_tag.ic_exec = ichsmb_i2c_exec; memset(&iba, 0, sizeof(iba)); iba.iba_type = I2C_TYPE_SMBUS; iba.iba_tag = &sc->sc_i2c_tag; config_found(self, &iba, iicbus_print); out: if (!pmf_device_register(self, NULL, NULL)) aprint_error_dev(self, "couldn't establish power handler\n"); }
void bcmusb_ccb_attach(device_t parent, device_t self, void *aux) { struct bcmusb_softc * const usbsc = device_private(self); const struct bcmccb_attach_args * const ccbaa = aux; const struct bcm_locators * const loc = &ccbaa->ccbaa_loc; usbsc->usbsc_bst = ccbaa->ccbaa_ccb_bst; usbsc->usbsc_dmat = ccbaa->ccbaa_dmat; bus_space_subregion(usbsc->usbsc_bst, ccbaa->ccbaa_ccb_bsh, loc->loc_offset, 0x1000, &usbsc->usbsc_ehci_bsh); bus_space_subregion(usbsc->usbsc_bst, ccbaa->ccbaa_ccb_bsh, loc->loc_offset + OHCI_OFFSET, 0x1000, &usbsc->usbsc_ohci_bsh); /* * Bring the PHYs out of reset. */ bus_space_write_4(usbsc->usbsc_bst, usbsc->usbsc_ehci_bsh, USBH_PHY_CTRL_P0, USBH_PHY_CTRL_INIT); bus_space_write_4(usbsc->usbsc_bst, usbsc->usbsc_ehci_bsh, USBH_PHY_CTRL_P1, USBH_PHY_CTRL_INIT); /* * Disable interrupts */ bus_space_write_4(usbsc->usbsc_bst, usbsc->usbsc_ohci_bsh, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); bus_size_t caplength = bus_space_read_1(usbsc->usbsc_bst, usbsc->usbsc_ehci_bsh, EHCI_CAPLENGTH); bus_space_write_4(usbsc->usbsc_bst, usbsc->usbsc_ehci_bsh, caplength + EHCI_USBINTR, 0); aprint_naive("\n"); aprint_normal("\n"); struct bcmusb_attach_args usbaa_ohci = { .usbaa_name = "ohci", .usbaa_dmat = usbsc->usbsc_dmat, .usbaa_bst = usbsc->usbsc_bst, .usbaa_bsh = usbsc->usbsc_ohci_bsh, .usbaa_size = 0x100, }; usbsc->usbsc_ohci_dev = config_found(self, &usbaa_ohci, NULL); if (usbsc->usbsc_ohci_dev != NULL) usbsc->usbsc_ohci_sc = device_private(usbsc->usbsc_ohci_dev); struct bcmusb_attach_args usbaa_ehci = { .usbaa_name = "ehci", .usbaa_dmat = usbsc->usbsc_dmat, .usbaa_bst = usbsc->usbsc_bst, .usbaa_bsh = usbsc->usbsc_ehci_bsh, .usbaa_size = 0x100, }; usbsc->usbsc_ehci_dev = config_found(self, &usbaa_ehci, NULL); if (usbsc->usbsc_ehci_dev != NULL) usbsc->usbsc_ehci_sc = device_private(usbsc->usbsc_ehci_dev); usbsc->usbsc_ih = intr_establish(loc->loc_intrs[0], IPL_USB, IST_LEVEL, bcmusb_intr, usbsc); if (usbsc->usbsc_ih == NULL) { aprint_error_dev(self, "failed to establish interrupt %d\n", loc->loc_intrs[0]); return; } aprint_normal_dev(self, "interrupting on irq %d\n", loc->loc_intrs[0]); }
static void spdmem_attach(device_t parent, device_t self, void *aux) { struct spdmem_softc *sc = device_private(self); struct i2c_attach_args *ia = aux; struct spdmem *s = &(sc->sc_spd_data); const char *type; const char *voltage; const char *refresh; const char *ddr_type_string = NULL; const char *rambus_rev = "Reserved"; int num_banks = 0; int per_chip = 0; int dimm_size, cycle_time, d_clk, p_clk, bits; int i; unsigned int spd_len, spd_size; unsigned int tAA, tRCD, tRP, tRAS; const struct sysctlnode *node = NULL; sc->sc_tag = ia->ia_tag; sc->sc_addr = ia->ia_addr; if (!pmf_device_register(self, NULL, NULL)) aprint_error_dev(self, "couldn't establish power handler\n"); /* * FBDIMM and DDR3 (and probably all newer) have a different * encoding of the SPD EEPROM used/total sizes */ s->sm_len = spdmem_read(sc, 0); s->sm_size = spdmem_read(sc, 1); s->sm_type = spdmem_read(sc, 2); if (s->sm_type >= SPDMEM_MEMTYPE_FBDIMM) { spd_size = 64 << (s->sm_len & SPDMEM_SPDSIZE_MASK); switch (s->sm_len & SPDMEM_SPDLEN_MASK) { case SPDMEM_SPDLEN_128: spd_len = 128; break; case SPDMEM_SPDLEN_176: spd_len = 176; break; case SPDMEM_SPDLEN_256: spd_len = 256; break; default: spd_len = 64; break; } } else { spd_size = 1 << s->sm_size; spd_len = s->sm_len; if (spd_len < 64) spd_len = 64; } if (spd_len > spd_size) spd_len = spd_size; if (spd_len > sizeof(struct spdmem)) spd_len = sizeof(struct spdmem); for (i = 3; i < spd_len; i++) ((uint8_t *)s)[i] = spdmem_read(sc, i); #ifdef DEBUG for (i = 0; i < spd_len; i += 16) { int j, k; aprint_debug("\n"); aprint_debug_dev(self, "0x%02x:", i); k = (spd_len > i + 16) ? spd_len : i + 16; for (j = i; j < k; j++) aprint_debug(" %02x", ((uint8_t *)s)[j]); } aprint_debug("\n"); aprint_debug_dev(self, ""); #endif /* * Setup our sysctl subtree, hw.spdmemN */ if (hw_node != CTL_EOL) sysctl_createv(NULL, 0, NULL, &node, 0, CTLTYPE_NODE, device_xname(self), NULL, NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); if (node != NULL && spd_len != 0) sysctl_createv(NULL, 0, NULL, NULL, 0, CTLTYPE_STRUCT, "spd_data", SYSCTL_DESCR("raw spd data"), NULL, 0, s, spd_len, CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); /* * Decode and print key SPD contents */ if (IS_RAMBUS_TYPE) { if (s->sm_type == SPDMEM_MEMTYPE_RAMBUS) type = "Rambus"; else if (s->sm_type == SPDMEM_MEMTYPE_DIRECTRAMBUS) type = "Direct Rambus"; else type = "Rambus (unknown)"; switch (s->sm_len) { case 0: rambus_rev = "Invalid"; break; case 1: rambus_rev = "0.7"; break; case 2: rambus_rev = "1.0"; break; default: rambus_rev = "Reserved"; break; } } else { if (s->sm_type < __arraycount(spdmem_basic_types)) type = spdmem_basic_types[s->sm_type]; else type = "unknown memory type"; if (s->sm_type == SPDMEM_MEMTYPE_EDO && s->sm_fpm.fpm_superset == SPDMEM_SUPERSET_EDO_PEM) type = spdmem_superset_types[SPDMEM_SUPERSET_EDO_PEM]; if (s->sm_type == SPDMEM_MEMTYPE_SDRAM && s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_SDRAM_PEM) type = spdmem_superset_types[SPDMEM_SUPERSET_SDRAM_PEM]; if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM && s->sm_ddr.ddr_superset == SPDMEM_SUPERSET_DDR_ESDRAM) type = spdmem_superset_types[SPDMEM_SUPERSET_DDR_ESDRAM]; if (s->sm_type == SPDMEM_MEMTYPE_SDRAM && s->sm_sdr.sdr_superset == SPDMEM_SUPERSET_ESDRAM) { type = spdmem_superset_types[SPDMEM_SUPERSET_ESDRAM]; } } aprint_normal("\n"); aprint_normal_dev(self, "%s memory", type); strlcpy(sc->sc_type, type, SPDMEM_TYPE_MAXLEN); if (node != NULL) sysctl_createv(NULL, 0, NULL, NULL, 0, CTLTYPE_STRING, "mem_type", SYSCTL_DESCR("memory module type"), NULL, 0, sc->sc_type, 0, CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); if (IS_RAMBUS_TYPE) aprint_normal(", SPD Revision %s", rambus_rev); else if (s->sm_config < __arraycount(spdmem_parity_types) && (s->sm_type == SPDMEM_MEMTYPE_SDRAM || s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM || s->sm_type == SPDMEM_MEMTYPE_DDR2SDRAM)) aprint_normal(", %s", spdmem_parity_types[s->sm_config]); else if (s->sm_type == SPDMEM_MEMTYPE_DDR3SDRAM) aprint_normal(", %sECC", s->sm_ddr3.ddr3_hasECC?"":"no "); /* Extract module size info */ dimm_size = 0; if (IS_RAMBUS_TYPE) { dimm_size = s->sm_rdr.rdr_rows + s->sm_rdr.rdr_cols - 13; num_banks = 1; per_chip = 1; } else if (s->sm_type == SPDMEM_MEMTYPE_SDRAM) { dimm_size = s->sm_sdr.sdr_rows + s->sm_sdr.sdr_cols - 17; num_banks = s->sm_sdr.sdr_banks; per_chip = s->sm_sdr.sdr_banks_per_chip; } else if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM) { dimm_size = s->sm_ddr.ddr_rows + s->sm_ddr.ddr_cols - 17; num_banks = s->sm_ddr.ddr_ranks; per_chip = s->sm_ddr.ddr_banks_per_chip; } else if (s->sm_type == SPDMEM_MEMTYPE_DDR2SDRAM) { dimm_size = s->sm_ddr2.ddr2_rows + s->sm_ddr2.ddr2_cols - 17; num_banks = s->sm_ddr2.ddr2_ranks + 1; per_chip = s->sm_ddr2.ddr2_banks_per_chip; } else if (s->sm_type == SPDMEM_MEMTYPE_DDR3SDRAM) { /* * DDR3 size specification is quite different from DDR2 * * Module capacity is defined as * Chip_Capacity_in_bits / 8bits-per-byte * * external_bus_width * / internal_bus_width * We further divide by 2**20 to get our answer in MB */ dimm_size = (s->sm_ddr3.ddr3_chipsize + 28 - 20) - 3 + (s->sm_ddr3.ddr3_datawidth + 3) - (s->sm_ddr3.ddr3_chipwidth + 2); num_banks = s->sm_ddr3.ddr3_physbanks; per_chip = 1; } else if (s->sm_type == SPDMEM_MEMTYPE_FBDIMM || s->sm_type == SPDMEM_MEMTYPE_FBDIMM_PROBE) { /* * FB-DIMM is very much like DDR3 */ dimm_size = s->sm_fbd.fbdimm_rows + 12 + s->sm_fbd.fbdimm_cols + 9 - 20 - 3; num_banks = 1 << (s->sm_fbd.fbdimm_banks + 2); per_chip = 1; } if (IS_RAMBUS_TYPE || (num_banks <= 8 && per_chip <= 8 && dimm_size > 0 && dimm_size <= 12)) { dimm_size = (1 << dimm_size) * num_banks * per_chip; aprint_normal(", %dMB", dimm_size); if (node != NULL) sysctl_createv(NULL, 0, NULL, NULL, CTLFLAG_IMMEDIATE, CTLTYPE_INT, "size", SYSCTL_DESCR("module size in MB"), NULL, dimm_size, NULL, 0, CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); } /* Nothing further for RAMBUS memory */ if (IS_RAMBUS_TYPE) { aprint_normal("\n"); return; } cycle_time = 0; /* cycle_time in units of 0.001 ns */ tAA = tRCD = tRP = tRAS = 0; /* Initialize latency values */ if (s->sm_type == SPDMEM_MEMTYPE_SDRAM) { cycle_time = s->sm_sdr.sdr_cycle_whole * 1000 + s->sm_sdr.sdr_cycle_tenths * 100; tRCD = s->sm_sdr.sdr_tRCD; tRP = s->sm_sdr.sdr_tRP; tRAS = s->sm_sdr.sdr_tRAS; tAA = 0; for (i = 0; i < 8; i++) if (s->sm_sdr.sdr_tCAS & (1 << i)) tAA = i; tAA++; } else if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM || s->sm_type == SPDMEM_MEMTYPE_DDR2SDRAM) { cycle_time = s->sm_ddr2.ddr2_cycle_whole * 1000 + spdmem_cycle_frac[s->sm_ddr2.ddr2_cycle_frac]; tRCD = ( 250 * s->sm_ddr2.ddr2_tRCD + cycle_time - 1) / cycle_time; tRP = ( 250 * s->sm_ddr2.ddr2_tRP + cycle_time - 1) / cycle_time; tRAS = (1000 * s->sm_ddr2.ddr2_tRAS + cycle_time - 1) / cycle_time; tAA = 0; for (i = 2; i < 8; i++) if (s->sm_ddr2.ddr2_tCAS & (1 << i)) tAA = i; /* DDR_SDRAM measures tAA in half-cycles */ if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM) tAA /= 2; } else if (s->sm_type == SPDMEM_MEMTYPE_DDR3SDRAM) { cycle_time = (1000 * s->sm_ddr3.ddr3_mtb_dividend + (s->sm_ddr3.ddr3_mtb_divisor / 2)) / s->sm_ddr3.ddr3_mtb_divisor; cycle_time *= s->sm_ddr3.ddr3_tCKmin; tAA = s->sm_ddr3.ddr3_tAAmin / s->sm_ddr3.ddr3_tCKmin; tRCD = s->sm_ddr3.ddr3_tRCDmin / s->sm_ddr3.ddr3_tCKmin; tRP = s->sm_ddr3.ddr3_tRPmin / s->sm_ddr3.ddr3_tCKmin; tRAS = (s->sm_ddr3.ddr3_tRAS_msb * 256 + s->sm_ddr3.ddr3_tRAS_lsb) / s->sm_ddr3.ddr3_tCKmin; } else if (s->sm_type == SPDMEM_MEMTYPE_FBDIMM || s->sm_type == SPDMEM_MEMTYPE_FBDIMM_PROBE) { cycle_time = (1000 * s->sm_fbd.fbdimm_mtb_dividend + (s->sm_fbd.fbdimm_mtb_divisor / 2)) / s->sm_fbd.fbdimm_mtb_divisor; tAA = s->sm_fbd.fbdimm_tAAmin / s->sm_fbd.fbdimm_tCKmin; tRCD = s->sm_fbd.fbdimm_tRCDmin / s->sm_fbd.fbdimm_tCKmin; tRP = s->sm_fbd.fbdimm_tRPmin / s->sm_fbd.fbdimm_tCKmin; tRAS = (s->sm_fbd.fbdimm_tRAS_msb * 256 + s->sm_fbd.fbdimm_tRAS_lsb) / s->sm_fbd.fbdimm_tCKmin; } if (cycle_time != 0) { /* * cycle time is scaled by a factor of 1000 to avoid using * floating point. Calculate memory speed as the number * of cycles per microsecond. */ d_clk = 1000 * 1000; if (s->sm_type == SPDMEM_MEMTYPE_FBDIMM || s->sm_type == SPDMEM_MEMTYPE_FBDIMM_PROBE) { /* DDR2 FB-DIMM uses a dual-pumped clock */ d_clk *= 2; bits = 1 << (s->sm_fbd.fbdimm_dev_width + 2); ddr_type_string = "PC2"; } else if (s->sm_type == SPDMEM_MEMTYPE_DDR3SDRAM) { /* DDR3 uses a dual-pumped clock */ d_clk *= 2; bits = 1 << (s->sm_ddr3.ddr3_datawidth + 3); ddr_type_string = "PC3"; } else if (s->sm_type == SPDMEM_MEMTYPE_DDR2SDRAM) { /* DDR2 uses a dual-pumped clock */ d_clk *= 2; bits = s->sm_ddr2.ddr2_datawidth; if ((s->sm_config & 0x03) != 0) bits -= 8; ddr_type_string = "PC2"; } else if (s->sm_type == SPDMEM_MEMTYPE_DDRSDRAM) { /* DDR uses a dual-pumped clock */ d_clk *= 2; bits = le16toh(s->sm_ddr.ddr_datawidth); if (s->sm_config == 1 || s->sm_config == 2) bits -= 8; ddr_type_string = "PC"; } else { /* SPDMEM_MEMTYPE_SDRAM */ bits = le16toh(s->sm_sdr.sdr_datawidth); if (s->sm_config == 1 || s->sm_config == 2) bits -= 8; ddr_type_string = "PC"; } /* * Calculate p_clk first, since for DDR3 we need maximum * significance. DDR3 rating is not rounded to a multiple * of 100. This results in cycle_time of 1.5ns displayed * as PC3-10666. */ p_clk = (d_clk * bits) / 8 / cycle_time; d_clk = ((d_clk + cycle_time / 2) ) / cycle_time; if ( s->sm_type != SPDMEM_MEMTYPE_DDR3SDRAM) { if ((p_clk % 100) >= 50) p_clk += 50; p_clk -= p_clk % 100; } aprint_normal(", %dMHz (%s-%d)\n", d_clk, ddr_type_string, p_clk); if (node != NULL) sysctl_createv(NULL, 0, NULL, NULL, CTLFLAG_IMMEDIATE, CTLTYPE_INT, "speed", SYSCTL_DESCR("memory speed in MHz"), NULL, d_clk, NULL, 0, CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL); } aprint_verbose_dev(self, ""); switch (s->sm_type) { case SPDMEM_MEMTYPE_EDO: case SPDMEM_MEMTYPE_FPM: aprint_verbose( "%d rows, %d cols, %d banks, %dns tRAC, %dns tCAC\n", s->sm_fpm.fpm_rows, s->sm_fpm.fpm_cols, s->sm_fpm.fpm_banks, s->sm_fpm.fpm_tRAC, s->sm_fpm.fpm_tCAC); break; case SPDMEM_MEMTYPE_ROM: aprint_verbose("%d rows, %d cols, %d banks\n", s->sm_rom.rom_rows, s->sm_rom.rom_cols, s->sm_rom.rom_banks); break; case SPDMEM_MEMTYPE_SDRAM: aprint_verbose( "%d rows, %d cols, %d banks, %d banks/chip, " "%d.%dns cycle time\n", s->sm_sdr.sdr_rows, s->sm_sdr.sdr_cols, s->sm_sdr.sdr_banks, s->sm_sdr.sdr_banks_per_chip, cycle_time/1000, (cycle_time % 1000) / 100); aprint_verbose_dev(self, latency, tAA, tRCD, tRP, tRAS); break; case SPDMEM_MEMTYPE_DDRSDRAM: aprint_verbose( "%d rows, %d cols, %d ranks, %d banks/chip, " "%d.%dns cycle time\n", s->sm_ddr.ddr_rows, s->sm_ddr.ddr_cols, s->sm_ddr.ddr_ranks, s->sm_ddr.ddr_banks_per_chip, cycle_time/1000, (cycle_time % 1000 + 50) / 100); aprint_verbose_dev(self, latency, tAA, tRCD, tRP, tRAS); break; case SPDMEM_MEMTYPE_DDR2SDRAM: aprint_verbose( "%d rows, %d cols, %d ranks, %d banks/chip, " "%d.%02dns cycle time\n", s->sm_ddr2.ddr2_rows, s->sm_ddr2.ddr2_cols, s->sm_ddr2.ddr2_ranks + 1, s->sm_ddr2.ddr2_banks_per_chip, cycle_time / 1000, (cycle_time % 1000 + 5) /10 ); aprint_verbose_dev(self, latency, tAA, tRCD, tRP, tRAS); break; case SPDMEM_MEMTYPE_DDR3SDRAM: aprint_verbose( "%d rows, %d cols, %d internal banks, %d physical banks, " "%d.%03dns cycle time\n", s->sm_ddr3.ddr3_rows + 9, s->sm_ddr3.ddr3_cols + 12, 8 << s->sm_ddr3.ddr3_logbanks, s->sm_ddr3.ddr3_physbanks, cycle_time/1000, cycle_time % 1000); aprint_verbose_dev(self, latency, tAA, tRCD, tRP, tRAS); break; default: break; } if (s->sm_type < SPDMEM_MEMTYPE_DDR3SDRAM) { if (s->sm_voltage < __arraycount(spdmem_voltage_types)) voltage = spdmem_voltage_types[s->sm_voltage]; else voltage = "unknown"; if (s->sm_refresh < __arraycount(spdmem_refresh_types)) refresh = spdmem_refresh_types[s->sm_refresh]; else refresh = "unknown"; aprint_verbose_dev(self, "voltage %s, refresh time %s", voltage, refresh); if (s->sm_selfrefresh) aprint_verbose(" (self-refreshing)"); aprint_verbose("\n"); } }
static void brgphyattach(device_t parent, device_t self, void *aux) { struct brgphy_softc *bsc = device_private(self); struct mii_softc *sc = &bsc->sc_mii; struct mii_attach_args *ma = aux; struct mii_data *mii = ma->mii_data; const struct mii_phydesc *mpd; prop_dictionary_t dict; mpd = mii_phy_match(ma, brgphys); aprint_naive(": Media interface\n"); aprint_normal(": %s, rev. %d\n", mpd->mpd_name, MII_REV(ma->mii_id2)); sc->mii_dev = self; sc->mii_inst = mii->mii_instance; sc->mii_phy = ma->mii_phyno; sc->mii_mpd_oui = MII_OUI(ma->mii_id1, ma->mii_id2); sc->mii_mpd_model = MII_MODEL(ma->mii_id2); sc->mii_mpd_rev = MII_REV(ma->mii_id2); sc->mii_pdata = mii; sc->mii_flags = ma->mii_flags; sc->mii_anegticks = MII_ANEGTICKS; sc->mii_funcs = &brgphy_funcs; if (device_is_a(parent, "bge")) bsc->sc_isbge = true; else if (device_is_a(parent, "bnx")) bsc->sc_isbnx = true; if (bsc->sc_isbge || bsc->sc_isbnx) { dict = device_properties(parent); if (!prop_dictionary_get_uint32(dict, "phyflags", &bsc->sc_phyflags)) aprint_error_dev(self, "failed to get phyflags\n"); if (!prop_dictionary_get_uint32(dict, "chipid", &bsc->sc_chipid)) aprint_error_dev(self, "failed to get chipid\n"); } PHY_RESET(sc); sc->mii_capabilities = PHY_READ(sc, MII_BMSR) & ma->mii_capmask; if (sc->mii_capabilities & BMSR_EXTSTAT) sc->mii_extcapabilities = PHY_READ(sc, MII_EXTSR); aprint_normal_dev(self, ""); if ((sc->mii_capabilities & BMSR_MEDIAMASK) == 0 && (sc->mii_extcapabilities & EXTSR_MEDIAMASK) == 0) aprint_error("no media present"); else { if (sc->mii_flags & MIIF_HAVEFIBER) { sc->mii_flags |= MIIF_NOISOLATE | MIIF_NOLOOP; /* * Set the proper bits for capabilities so that the * correct media get selected by mii_phy_add_media() */ sc->mii_capabilities |= BMSR_ANEG; sc->mii_capabilities &= ~BMSR_100T4; sc->mii_extcapabilities |= EXTSR_1000XFDX; if (bsc->sc_isbnx) { /* * 2.5Gb support is a software enabled feature * on the BCM5708S and BCM5709S controllers. */ #define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL) if (bsc->sc_phyflags & BNX_PHY_2_5G_CAPABLE_FLAG) { ADD(IFM_MAKEWORD(IFM_ETHER, IFM_2500_SX, IFM_FDX, sc->mii_inst), 0); aprint_normal("2500baseSX-FDX, "); #undef ADD } } } mii_phy_add_media(sc); } aprint_normal("\n"); }
static void acer_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; pcireg_t cr, interface; pcireg_t rev = PCI_REVISION(pa->pa_class); struct aceride_softc *acer_sc = (struct aceride_softc *)sc; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; if (rev >= 0x20) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; if (rev >= 0xC7) sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; else if (rev >= 0xC4) sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; else if (rev >= 0xC2) sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; else sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; } sc->sc_wdcdev.irqack = pciide_irqack; if (rev <= 0xc4) { sc->sc_wdcdev.dma_init = acer_dma_init; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "using PIO transfers above 137GB as workaround for " "48bit DMA access bug, expect reduced performance\n"); } } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_set_modes = acer_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); /* Enable "microsoft register bits" R/W. */ pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & ~ACER_CHANSTATUSREGS_RO); cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); { /* * some BIOSes (port-cats ABLE) enable native mode, but don't * setup everything correctly, so allow the forcing of * compat mode */ bool force_compat_mode; bool property_is_set; property_is_set = prop_dictionary_get_bool( device_properties(sc->sc_wdcdev.sc_atac.atac_dev), "ali1543-ide-force-compat-mode", &force_compat_mode); if (property_is_set && force_compat_mode) { cr &= ~((PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1)) << PCI_INTERFACE_SHIFT); } } pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); /* Don't use cr, re-read the real register content instead */ interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); /* From linux: enable "Cable Detection" */ if (rev >= 0xC2) { pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) | ACER_0x4B_CDETECT); } wdc_allocate_regs(&sc->sc_wdcdev); if (rev == 0xC3) { /* install reset bug workaround */ if (pci_find_device(&acer_sc->pcib_pa, acer_pcib_match) == 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "WARNING: can't find pci-isa bridge\n"); } else sc->sc_wdcdev.reset = acer_do_reset; } for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "%s channel ignored (disabled)\n", cp->name); cp->ata_channel.ch_flags |= ATACH_DISABLED; continue; } /* newer controllers seems to lack the ACER_CHIDS. Sigh */ pciide_mapchan(pa, cp, interface, (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); } }
/* * Channel initialization/deinitialization per user device. */ struct dmac_channel_stat * dmac_alloc_channel(device_t self, int ch, const char *name, int normalv, dmac_intr_handler_t normal, void *normalarg, int errorv, dmac_intr_handler_t error, void *errorarg) { struct intio_softc *intio = device_private(self); struct dmac_softc *sc = device_private(intio->sc_dmac); struct dmac_channel_stat *chan = &sc->sc_channels[ch]; #ifdef DMAC_ARRAYCHAIN int r, dummy; #endif aprint_normal_dev(sc->sc_dev, "allocating ch %d for %s.\n", ch, name); DPRINTF(3, ("dmamap=%p\n", (void *)chan->ch_xfer.dx_dmamap)); #ifdef DIAGNOSTIC if (ch < 0 || ch >= DMAC_NCHAN) panic("Invalid DMAC channel."); if (chan->ch_name[0]) panic("DMAC: channel in use."); if (strlen(name) > 8) panic("DMAC: wrong user name."); #endif #ifdef DMAC_ARRAYCHAIN /* allocate the DMAC arraychaining map */ r = bus_dmamem_alloc(intio->sc_dmat, sizeof(struct dmac_sg_array) * DMAC_MAPSIZE, 4, 0, &chan->ch_seg[0], 1, &dummy, BUS_DMA_NOWAIT); if (r) panic("DMAC: cannot alloc DMA safe memory"); r = bus_dmamem_map(intio->sc_dmat, &chan->ch_seg[0], 1, sizeof(struct dmac_sg_array) * DMAC_MAPSIZE, (void **) &chan->ch_map, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); if (r) panic("DMAC: cannot map DMA safe memory"); #endif /* fill the channel status structure by the default values. */ strcpy(chan->ch_name, name); chan->ch_dcr = (DMAC_DCR_XRM_CSWH | DMAC_DCR_OTYP_EASYNC | DMAC_DCR_OPS_8BIT); chan->ch_ocr = (DMAC_OCR_SIZE_BYTE | DMAC_OCR_REQG_EXTERNAL); chan->ch_normalv = normalv; chan->ch_errorv = errorv; chan->ch_normal = normal; chan->ch_error = error; chan->ch_normalarg = normalarg; chan->ch_errorarg = errorarg; chan->ch_xfer.dx_dmamap = 0; /* setup the device-specific registers */ bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_CSR, 0xff); bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_DCR, chan->ch_dcr); bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_CPR, 0); /* * X68k physical user space is a subset of the kernel space; * the memory is always included in the physical user space, * while the device is not. */ bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_BFCR, DMAC_FC_USER_DATA); bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_MFCR, DMAC_FC_USER_DATA); bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_DFCR, DMAC_FC_KERNEL_DATA); /* setup the interrupt handlers */ bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_NIVR, normalv); bus_space_write_1(sc->sc_bst, chan->ch_bht, DMAC_REG_EIVR, errorv); intio_intr_establish_ext(normalv, name, "dma", dmac_done, chan); intio_intr_establish_ext(errorv, name, "dmaerr", dmac_error, chan); return chan; }
static void udsir_attach(device_t parent, device_t self, void *aux) { struct udsir_softc *sc = device_private(self); struct usbif_attach_arg *uaa = aux; usbd_device_handle dev = uaa->device; usbd_interface_handle iface = uaa->iface; char *devinfop; usb_endpoint_descriptor_t *ed; uint8_t epcount; int i; struct ir_attach_args ia; DPRINTFN(10, ("udsir_attach: sc=%p\n", sc)); sc->sc_dev = self; aprint_naive("\n"); aprint_normal("\n"); devinfop = usbd_devinfo_alloc(dev, 0); aprint_normal_dev(self, "%s\n", devinfop); usbd_devinfo_free(devinfop); sc->sc_udev = dev; sc->sc_iface = iface; epcount = 0; (void)usbd_endpoint_count(iface, &epcount); sc->sc_rd_addr = -1; sc->sc_wr_addr = -1; for (i = 0; i < epcount; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { aprint_error_dev(self, "couldn't get ep %d\n", i); return; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->sc_rd_addr = ed->bEndpointAddress; sc->sc_rd_maxpsz = UGETW(ed->wMaxPacketSize); } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->sc_wr_addr = ed->bEndpointAddress; sc->sc_wr_maxpsz = UGETW(ed->wMaxPacketSize); } } if (sc->sc_rd_addr == -1 || sc->sc_wr_addr == -1) { aprint_error_dev(self, "missing endpoint\n"); return; } DPRINTFN(10, ("udsir_attach: %p\n", sc->sc_udev)); usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); ia.ia_type = IR_TYPE_IRFRAME; ia.ia_methods = &udsir_methods; ia.ia_handle = sc; sc->sc_child = config_found(self, &ia, ir_print); selinit(&sc->sc_rd_sel); selinit(&sc->sc_wr_sel); return; }
void ciaattach(device_t parent, device_t self, void *aux) { struct cia_softc *sc = device_private(self); struct cia_config *ccp; struct pcibus_attach_args pba; char bits[64]; const char *name; int pass; /* note that we've attached the chipset; can't have 2 CIAs. */ ciafound = 1; sc->sc_dev = self; /* * set up the chipset's info; done once at console init time * (maybe), but we must do it here as well to take care of things * that need to use memory allocation. */ ccp = sc->sc_ccp = &cia_configuration; cia_init(ccp, 1); if (ccp->cc_flags & CCF_ISPYXIS) { name = "Pyxis"; pass = ccp->cc_rev; } else { name = "ALCOR/ALCOR2"; pass = ccp->cc_rev + 1; } aprint_normal(": DECchip 2117x Core Logic Chipset (%s), pass %d\n", name, pass); if (ccp->cc_cnfg) { snprintb(bits, sizeof(bits), CIA_CSR_CNFG_BITS, ccp->cc_cnfg); aprint_normal_dev(self, "extended capabilities: %s\n", bits); } switch (ccp->cc_flags & (CCF_PCI_USE_BWX|CCF_BUS_USE_BWX)) { case CCF_PCI_USE_BWX|CCF_BUS_USE_BWX: name = "PCI config and bus"; break; case CCF_PCI_USE_BWX: name = "PCI config"; break; case CCF_BUS_USE_BWX: name = "bus"; break; default: name = NULL; break; } if (name != NULL) aprint_normal_dev(self, "using BWX for %s access\n", name); #ifdef DEC_550 if (cputype == ST_DEC_550 && (hwrpb->rpb_variation & SV_ST_MASK) < SV_ST_MIATA_1_5) { /* * Miata 1 systems have a bug: DMA cannot cross * an 8k boundary! Make sure PCI read prefetching * is disabled on these chips. Note that secondary * PCI busses don't have this problem, because of * the way PPBs handle PCI read requests. * * In the 21174 Technical Reference Manual, this is * actually documented as "Pyxis Pass 1", but apparently * there are chips that report themselves as "Pass 1" * which do not have the bug! Miatas with the Cypress * PCI-ISA bridge (i.e. Miata 1.5 and Miata 2) do not * have the bug, so we use this check. * * NOTE: This bug is actually worked around in cia_dma.c, * when direct-mapped DMA maps are created. * * XXX WE NEED TO THINK ABOUT HOW TO HANDLE THIS FOR * XXX SGMAP DMA MAPPINGS! */ uint32_t ctrl; /* XXX no bets... */ aprint_error_dev(self, "WARNING: Pyxis pass 1 DMA bug; no bets...\n"); ccp->cc_flags |= CCF_PYXISBUG; alpha_mb(); ctrl = REGVAL(CIA_CSR_CTRL); ctrl &= ~(CTRL_RD_TYPE|CTRL_RL_TYPE|CTRL_RM_TYPE); REGVAL(CIA_CSR_CTRL) = ctrl; alpha_mb(); } #endif /* DEC_550 */ cia_dma_init(ccp); switch (cputype) { #ifdef DEC_KN20AA case ST_DEC_KN20AA: pci_kn20aa_pickintr(ccp); break; #endif #ifdef DEC_EB164 case ST_EB164: pci_eb164_pickintr(ccp); break; #endif #ifdef DEC_550 case ST_DEC_550: pci_550_pickintr(ccp); break; #endif #ifdef DEC_1000A case ST_DEC_1000A: pci_1000a_pickintr(ccp, &ccp->cc_iot, &ccp->cc_memt, &ccp->cc_pc); break; #endif #ifdef DEC_1000 case ST_DEC_1000: pci_1000_pickintr(ccp, &ccp->cc_iot, &ccp->cc_memt, &ccp->cc_pc); break; #endif default: panic("ciaattach: shouldn't be here, really..."); } pba.pba_iot = &ccp->cc_iot; pba.pba_memt = &ccp->cc_memt; pba.pba_dmat = alphabus_dma_get_tag(&ccp->cc_dmat_direct, ALPHA_BUS_PCI); pba.pba_dmat64 = NULL; pba.pba_pc = &ccp->cc_pc; pba.pba_bus = 0; pba.pba_bridgetag = NULL; pba.pba_flags = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY; if ((ccp->cc_flags & CCF_PYXISBUG) == 0) pba.pba_flags |= PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY | PCI_FLAGS_MWI_OKAY; config_found_ia(self, "pcibus", &pba, pcibusprint); }
static void exynos_usb_attach(device_t parent, device_t self, void *aux) { struct exynos_usb_softc * const sc = &exynos_usb_sc; struct exyo_attach_args *exyoaa = (struct exyo_attach_args *) aux; struct exyo_locators *loc = &exyoaa->exyo_loc; bus_size_t ehci_offset, ohci_offset, usb2phy_offset; /* no locators expected */ KASSERT(loc->loc_port == EXYOCF_PORT_DEFAULT); KASSERT(loc->loc_intr != EXYOCF_INTR_DEFAULT); /* copy our device handle */ sc->sc_self = self; sc->sc_irq = loc->loc_intr; /* get our bushandles */ sc->sc_bst = exyoaa->exyo_core_bst; sc->sc_dmat = exyoaa->exyo_dmat; // sc->sc_dmat = exyoaa->exyo_coherent_dmat; #ifdef EXYNOS4 ehci_offset = EXYNOS4_USB2_HOST_EHCI_OFFSET; ohci_offset = EXYNOS4_USB2_HOST_OHCI_OFFSET; usb2phy_offset = EXYNOS4_USB2_HOST_PHYCTRL_OFFSET; #endif #ifdef EXYNOS5 ehci_offset = EXYNOS5_USB2_HOST_EHCI_OFFSET; ohci_offset = EXYNOS5_USB2_HOST_OHCI_OFFSET; usb2phy_offset = EXYNOS5_USB2_HOST_PHYCTRL_OFFSET; #endif bus_space_subregion(sc->sc_bst, exyoaa->exyo_core_bsh, ehci_offset, EXYNOS_BLOCK_SIZE, &sc->sc_ehci_bsh); bus_space_subregion(sc->sc_bst, exyoaa->exyo_core_bsh, ohci_offset, EXYNOS_BLOCK_SIZE, &sc->sc_ohci_bsh); bus_space_subregion(sc->sc_bst, exyoaa->exyo_core_bsh, usb2phy_offset, EXYNOS_BLOCK_SIZE, &sc->sc_usb2phy_bsh); aprint_naive("\n"); aprint_normal("\n"); /* power up USB subsystem */ exynos_usb_soc_powerup(); /* init USB phys */ exynos_usb_phy_init(sc->sc_usb2phy_bsh); /* * Disable interrupts * * To prevent OHCI lockups on Exynos5 SoCs, we first have to read the * address before we set it; this is most likely a bug in the SoC */ #if NOHCI > 0 int regval; regval = bus_space_read_1(sc->sc_bst, sc->sc_ohci_bsh, OHCI_INTERRUPT_DISABLE); regval = OHCI_ALL_INTRS; bus_space_write_4(sc->sc_bst, sc->sc_ohci_bsh, OHCI_INTERRUPT_DISABLE, regval); #endif #if NEHCI > 0 bus_size_t caplength = bus_space_read_1(sc->sc_bst, sc->sc_ehci_bsh, EHCI_CAPLENGTH); bus_space_write_4(sc->sc_bst, sc->sc_ehci_bsh, caplength + EHCI_USBINTR, 0); #endif /* claim shared interrupt for OHCI/EHCI */ sc->sc_intrh = intr_establish(sc->sc_irq, IPL_USB, IST_LEVEL, exynos_usb_intr, sc); if (!sc->sc_intrh) { aprint_error(": unable to establish interrupt at irq %d\n", sc->sc_irq); /* disable? TBD */ return; } aprint_normal_dev(sc->sc_self, "USB2 host interrupting on irq %d\n", sc->sc_irq); #if NOHCI > 0 /* attach OHCI */ struct exynos_usb_attach_args usb_ohci = { .name = "ohci", }; sc->sc_ohci_dev = config_found(self, &usb_ohci, NULL); #endif #if NEHCI > 0 /* attach EHCI */ struct exynos_usb_attach_args usb_ehci = { .name = "ehci", }; sc->sc_ehci_dev = config_found(self, &usb_ehci, NULL); #endif } static int exynos_usb_intr(void *arg) { struct exynos_usb_softc *sc = (struct exynos_usb_softc *) arg; void *private; int ret = 0; #if NEHCI > 0 private = device_private(sc->sc_ehci_dev); if (private) ret = ehci_intr(private); #endif /* XXX should we always deliver to ohci even if ehci takes it? */ // if (ret) // return ret; #if NOHCI > 0 private = device_private(sc->sc_ohci_dev); if (private) ret = ohci_intr(private); #endif return ret; }
static void itesio_isa_attach(device_t parent, device_t self, void *aux) { struct itesio_softc *sc = device_private(self); struct isa_attach_args *ia = aux; int i; uint8_t cr; sc->sc_iot = ia->ia_iot; if (bus_space_map(sc->sc_iot, ia->ia_io[0].ir_addr, 2, 0, &sc->sc_ioh)) { aprint_error(": can't map i/o space\n"); return; } aprint_naive("\n"); /* * Enter to the Super I/O MB PNP mode. */ itesio_enter(sc->sc_iot, sc->sc_ioh); /* * Get info from the Super I/O Global Configuration Registers: * Chip IDs and Device Revision. */ sc->sc_chipid = (itesio_readreg(sc->sc_iot, sc->sc_ioh, ITESIO_CHIPID1) << 8); sc->sc_chipid |= itesio_readreg(sc->sc_iot, sc->sc_ioh, ITESIO_CHIPID2); sc->sc_devrev = (itesio_readreg(sc->sc_iot, sc->sc_ioh, ITESIO_DEVREV) & 0x0f); /* * Select the EC LDN to get the Base Address. */ itesio_writereg(sc->sc_iot, sc->sc_ioh, ITESIO_LDNSEL, ITESIO_EC_LDN); sc->sc_hwmon_baseaddr = (itesio_readreg(sc->sc_iot, sc->sc_ioh, ITESIO_EC_MSB) << 8); sc->sc_hwmon_baseaddr |= itesio_readreg(sc->sc_iot, sc->sc_ioh, ITESIO_EC_LSB); /* * We are done, exit MB PNP mode. */ itesio_exit(sc->sc_iot, sc->sc_ioh); aprint_normal(": iTE IT%4xF Super I/O (rev %d)\n", sc->sc_chipid, sc->sc_devrev); aprint_normal_dev(self, "Hardware Monitor registers at 0x%x\n", sc->sc_hwmon_baseaddr); if (bus_space_map(sc->sc_ec_iot, sc->sc_hwmon_baseaddr, 8, 0, &sc->sc_ec_ioh)) { aprint_error_dev(self, "cannot map hwmon i/o space\n"); goto out2; } sc->sc_hwmon_mapped = true; /* Activate monitoring */ cr = itesio_ecreadreg(sc, ITESIO_EC_CONFIG); SET(cr, 0x01); itesio_ecwritereg(sc, ITESIO_EC_CONFIG, cr); #ifdef notyet /* Enable beep alarms */ cr = itesio_ecreadreg(sc, ITESIO_EC_BEEPEER); SET(cr, 0x02); /* Voltage exceeds limit */ SET(cr, 0x04); /* Temperature exceeds limit */ itesio_ecwritereg(sc, ITESIO_EC_BEEPEER, cr); #endif /* * Initialize and attach sensors. */ itesio_setup_sensors(sc); sc->sc_sme = sysmon_envsys_create(); for (i = 0; i < IT_NUM_SENSORS; i++) { if (sysmon_envsys_sensor_attach(sc->sc_sme, &sc->sc_sensor[i])) { sysmon_envsys_destroy(sc->sc_sme); goto out; } } /* * Hook into the system monitor. */ sc->sc_sme->sme_name = device_xname(self); sc->sc_sme->sme_cookie = sc; sc->sc_sme->sme_refresh = itesio_refresh; if ((i = sysmon_envsys_register(sc->sc_sme))) { aprint_error_dev(self, "unable to register with sysmon (%d)\n", i); sysmon_envsys_destroy(sc->sc_sme); goto out; } sc->sc_hwmon_enabled = true; if (!pmf_device_register(self, NULL, NULL)) aprint_error_dev(self, "couldn't establish power handler\n"); /* The IT8705 doesn't support the WDT */ if (sc->sc_chipid == ITESIO_ID8705) goto out2; /* * Initialize the watchdog timer. */ sc->sc_smw.smw_name = device_xname(self); sc->sc_smw.smw_cookie = sc; sc->sc_smw.smw_setmode = itesio_wdt_setmode; sc->sc_smw.smw_tickle = itesio_wdt_tickle; sc->sc_smw.smw_period = 60; if (sysmon_wdog_register(&sc->sc_smw)) { aprint_error_dev(self, "unable to register watchdog timer\n"); goto out2; } sc->sc_wdt_enabled = true; aprint_normal_dev(self, "Watchdog Timer present\n"); return; out: bus_space_unmap(sc->sc_ec_iot, sc->sc_ec_ioh, 8); out2: bus_space_unmap(sc->sc_iot, sc->sc_ioh, 2); }
/* * Initialise our interface to the controller. */ int cac_init(struct cac_softc *sc, const char *intrstr, int startfw) { struct cac_controller_info cinfo; int error, rseg, size, i; bus_dma_segment_t seg; struct cac_ccb *ccb; char firm[8]; if (intrstr != NULL) aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); SIMPLEQ_INIT(&sc->sc_ccb_free); SIMPLEQ_INIT(&sc->sc_ccb_queue); mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM); cv_init(&sc->sc_ccb_cv, "cacccb"); size = sizeof(struct cac_ccb) * CAC_MAX_CCBS; if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to allocate CCBs, error = %d\n", error); return (-1); } if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, (void **)&sc->sc_ccbs, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to map CCBs, error = %d\n", error); return (-1); } if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, error = %d\n", error); return (-1); } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_ccbs, size, NULL, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to load CCB DMA map, error = %d\n", error); return (-1); } sc->sc_ccbs_paddr = sc->sc_dmamap->dm_segs[0].ds_addr; memset(sc->sc_ccbs, 0, size); ccb = (struct cac_ccb *)sc->sc_ccbs; for (i = 0; i < CAC_MAX_CCBS; i++, ccb++) { /* Create the DMA map for this CCB's data */ error = bus_dmamap_create(sc->sc_dmat, CAC_MAX_XFER, CAC_SG_SIZE, CAC_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap_xfer); if (error) { aprint_error_dev(sc->sc_dev, "can't create ccb dmamap (%d)\n", error); break; } ccb->ccb_flags = 0; ccb->ccb_paddr = sc->sc_ccbs_paddr + i * sizeof(struct cac_ccb); SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_chain); } /* Start firmware background tasks, if needed. */ if (startfw) { if (cac_cmd(sc, CAC_CMD_START_FIRMWARE, &cinfo, sizeof(cinfo), 0, 0, CAC_CCB_DATA_IN, NULL)) { aprint_error_dev(sc->sc_dev, "CAC_CMD_START_FIRMWARE failed\n"); return (-1); } } if (cac_cmd(sc, CAC_CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 0, 0, CAC_CCB_DATA_IN, NULL)) { aprint_error_dev(sc->sc_dev, "CAC_CMD_GET_CTRL_INFO failed\n"); return (-1); } strlcpy(firm, cinfo.firm_rev, 4+1); printf("%s: %d channels, firmware <%s>\n", device_xname(sc->sc_dev), cinfo.scsi_chips, firm); /* Limit number of units to size of our sc_unitmask */ sc->sc_nunits = cinfo.num_drvs; if (sc->sc_nunits > sizeof(sc->sc_unitmask) * NBBY) sc->sc_nunits = sizeof(sc->sc_unitmask) * NBBY; /* Attach our units */ sc->sc_unitmask = 0; cac_rescan(sc->sc_dev, "cac", 0); /* Set our `shutdownhook' before we start any device activity. */ if (cac_sdh == NULL) cac_sdh = shutdownhook_establish(cac_shutdown, NULL); mutex_enter(&sc->sc_mutex); (*sc->sc_cl.cl_intr_enable)(sc, CAC_INTR_ENABLE); mutex_exit(&sc->sc_mutex); #if NBIO > 0 if (bio_register(sc->sc_dev, cac_ioctl) != 0) aprint_error_dev(sc->sc_dev, "controller registration failed"); else sc->sc_ioctl = cac_ioctl; if (cac_create_sensors(sc) != 0) aprint_error_dev(sc->sc_dev, "unable to create sensors\n"); #endif return (0); }
static void yds_attach(device_t parent, device_t self, void *aux) { struct yds_softc *sc; struct pci_attach_args *pa; pci_chipset_tag_t pc; char const *intrstr; pci_intr_handle_t ih; pcireg_t reg; struct yds_codec_softc *codec; int i, r, to; int revision; int ac97_id2; char intrbuf[PCI_INTRSTR_LEN]; sc = device_private(self); sc->sc_dev = self; pa = (struct pci_attach_args *)aux; pc = pa->pa_pc; revision = PCI_REVISION(pa->pa_class); pci_aprint_devinfo(pa, NULL); /* Map register to memory */ if (pci_mapreg_map(pa, YDS_PCI_MBA, PCI_MAPREG_TYPE_MEM, 0, &sc->memt, &sc->memh, NULL, NULL)) { aprint_error_dev(self, "can't map memory space\n"); return; } /* Map and establish the interrupt. */ if (pci_intr_map(pa, &ih)) { aprint_error_dev(self, "couldn't map interrupt\n"); return; } mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_AUDIO); /* XXX IPL_NONE? */ mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_AUDIO); intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); sc->sc_ih = pci_intr_establish(pc, ih, IPL_AUDIO, yds_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); sc->sc_dmatag = pa->pa_dmat; sc->sc_pc = pc; sc->sc_pcitag = pa->pa_tag; sc->sc_id = pa->pa_id; sc->sc_revision = revision; sc->sc_flags = yds_get_dstype(sc->sc_id); #ifdef AUDIO_DEBUG if (ydsdebug) { char bits[80]; snprintb(bits, sizeof(bits), YDS_CAP_BITS, sc->sc_flags); printf("%s: chip has %s\n", device_xname(self), bits); } #endif /* Disable legacy mode */ reg = pci_conf_read(pc, pa->pa_tag, YDS_PCI_LEGACY); pci_conf_write(pc, pa->pa_tag, YDS_PCI_LEGACY, reg & YDS_PCI_LEGACY_LAD); /* Enable the device. */ reg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); reg |= (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE); pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg); reg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); /* Mute all volumes */ for (i = 0x80; i < 0xc0; i += 2) YWRITE2(sc, i, 0); /* Initialize the device */ if (yds_init(sc)) { aprint_error_dev(self, "initialize failed\n"); mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } /* * Detect primary/secondary AC97 * YMF754 Hardware Specification Rev 1.01 page 24 */ reg = pci_conf_read(pc, pa->pa_tag, YDS_PCI_DSCTRL); pci_conf_write(pc, pa->pa_tag, YDS_PCI_DSCTRL, reg & ~YDS_DSCTRL_CRST); delay(400000); /* Needed for 740C. */ /* Primary */ for (to = 0; to < AC97_TIMEOUT; to++) { if ((YREAD2(sc, AC97_STAT_ADDR1) & AC97_BUSY) == 0) break; delay(1); } if (to == AC97_TIMEOUT) { aprint_error_dev(self, "no AC97 available\n"); mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } /* Secondary */ /* Secondary AC97 is used for 4ch audio. Currently unused. */ ac97_id2 = -1; if ((YREAD2(sc, YDS_ACTIVITY) & YDS_ACTIVITY_DOCKA) == 0) goto detected; #if 0 /* reset secondary... */ YWRITE2(sc, YDS_GPIO_OCTRL, YREAD2(sc, YDS_GPIO_OCTRL) & ~YDS_GPIO_GPO2); YWRITE2(sc, YDS_GPIO_FUNCE, (YREAD2(sc, YDS_GPIO_FUNCE)&(~YDS_GPIO_GPC2))|YDS_GPIO_GPE2); #endif for (to = 0; to < AC97_TIMEOUT; to++) { if ((YREAD2(sc, AC97_STAT_ADDR2) & AC97_BUSY) == 0) break; delay(1); } if (to < AC97_TIMEOUT) { /* detect id */ for (ac97_id2 = 1; ac97_id2 < 4; ac97_id2++) { YWRITE2(sc, AC97_CMD_ADDR, AC97_CMD_READ | AC97_ID(ac97_id2) | 0x28); for (to = 0; to < AC97_TIMEOUT; to++) { if ((YREAD2(sc, AC97_STAT_ADDR2) & AC97_BUSY) == 0) goto detected; delay(1); } } if (ac97_id2 == 4) ac97_id2 = -1; detected: ; } pci_conf_write(pc, pa->pa_tag, YDS_PCI_DSCTRL, reg | YDS_DSCTRL_CRST); delay (20); pci_conf_write(pc, pa->pa_tag, YDS_PCI_DSCTRL, reg & ~YDS_DSCTRL_CRST); delay (400000); for (to = 0; to < AC97_TIMEOUT; to++) { if ((YREAD2(sc, AC97_STAT_ADDR1) & AC97_BUSY) == 0) break; delay(1); } /* * Attach ac97 codec */ for (i = 0; i < 2; i++) { static struct { int data; int addr; } statregs[] = { {AC97_STAT_DATA1, AC97_STAT_ADDR1}, {AC97_STAT_DATA2, AC97_STAT_ADDR2}, }; if (i == 1 && ac97_id2 == -1) break; /* secondary ac97 not available */ codec = &sc->sc_codec[i]; codec->sc = sc; codec->id = i == 1 ? ac97_id2 : 0; codec->status_data = statregs[i].data; codec->status_addr = statregs[i].addr; codec->host_if.arg = codec; codec->host_if.attach = yds_attach_codec; codec->host_if.read = yds_read_codec; codec->host_if.write = yds_write_codec; codec->host_if.reset = yds_reset_codec; r = ac97_attach(&codec->host_if, self, &sc->sc_lock); if (r != 0) { aprint_error_dev(self, "can't attach codec (error 0x%X)\n", r); mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } } if (0 != auconv_create_encodings(yds_formats, YDS_NFORMATS, &sc->sc_encodings)) { mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } audio_attach_mi(&yds_hw_if, sc, self); sc->sc_legacy_iot = pa->pa_iot; config_defer(self, yds_configure_legacy); if (!pmf_device_register(self, yds_suspend, yds_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); }
static void fms_attach(device_t parent, device_t self, void *aux) { struct pci_attach_args *pa; struct fms_softc *sc; struct audio_attach_args aa; const char *intrstr; pci_chipset_tag_t pc; pcitag_t pt; pci_intr_handle_t ih; uint16_t k1; pa = aux; sc = device_private(self); sc->sc_dev = self; intrstr = NULL; pc = pa->pa_pc; pt = pa->pa_tag; aprint_naive(": Audio controller\n"); aprint_normal(": Forte Media FM-801\n"); if (pci_mapreg_map(pa, 0x10, PCI_MAPREG_TYPE_IO, 0, &sc->sc_iot, &sc->sc_ioh, &sc->sc_ioaddr, &sc->sc_iosize)) { aprint_error_dev(sc->sc_dev, "can't map i/o space\n"); return; } if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0x30, 2, &sc->sc_mpu_ioh)) panic("fms_attach: can't get mpu subregion handle"); if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0x68, 4, &sc->sc_opl_ioh)) panic("fms_attach: can't get opl subregion handle"); if (pci_intr_map(pa, &ih)) { aprint_error_dev(sc->sc_dev, "couldn't map interrupt\n"); return; } intrstr = pci_intr_string(pc, ih); mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE); mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_AUDIO); sc->sc_ih = pci_intr_establish(pc, ih, IPL_AUDIO, fms_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(sc->sc_dev, "couldn't establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } sc->sc_dmat = pa->pa_dmat; aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); /* Disable legacy audio (SBPro compatibility) */ pci_conf_write(pc, pt, 0x40, 0); /* Reset codec and AC'97 */ bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_CODEC_CTL, 0x0020); delay(2); /* > 1us according to AC'97 documentation */ bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_CODEC_CTL, 0x0000); delay(1); /* > 168.2ns according to AC'97 documentation */ /* Set up volume */ bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_PCM_VOLUME, 0x0808); bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_FM_VOLUME, 0x0808); bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_I2S_VOLUME, 0x0808); bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_RECORD_SOURCE, 0x0000); /* Unmask playback, record and mpu interrupts, mask the rest */ k1 = bus_space_read_2(sc->sc_iot, sc->sc_ioh, FM_INTMASK); bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_INTMASK, (k1 & ~(FM_INTMASK_PLAY | FM_INTMASK_REC | FM_INTMASK_MPU)) | FM_INTMASK_VOL); bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_INTSTATUS, FM_INTSTATUS_PLAY | FM_INTSTATUS_REC | FM_INTSTATUS_MPU | FM_INTSTATUS_VOL); sc->host_if.arg = sc; sc->host_if.attach = fms_attach_codec; sc->host_if.read = fms_read_codec; sc->host_if.write = fms_write_codec; sc->host_if.reset = fms_reset_codec; if (ac97_attach(&sc->host_if, self, &sc->sc_lock) != 0) { mutex_destroy(&sc->sc_intr_lock); mutex_destroy(&sc->sc_lock); return; } audio_attach_mi(&fms_hw_if, sc, sc->sc_dev); aa.type = AUDIODEV_TYPE_OPL; aa.hwif = NULL; aa.hdl = NULL; config_found(sc->sc_dev, &aa, audioprint); aa.type = AUDIODEV_TYPE_MPU; aa.hwif = NULL; aa.hdl = NULL; sc->sc_mpu_dev = config_found(sc->sc_dev, &aa, audioprint); }
static void epic_pci_attach(device_t parent, device_t self, void *aux) { struct epic_pci_softc *psc = device_private(self); struct epic_softc *sc = &psc->sc_epic; struct pci_attach_args *pa = aux; pci_chipset_tag_t pc = pa->pa_pc; pci_intr_handle_t ih; const char *intrstr = NULL; const struct epic_pci_product *epp; const struct epic_pci_subsys_info *esp; bus_space_tag_t iot, memt; bus_space_handle_t ioh, memh; int ioh_valid, memh_valid; int error; sc->sc_dev = self; epp = epic_pci_lookup(pa); if (epp == NULL) { aprint_normal("\n"); panic("%s: impossible", __func__); } pci_aprint_devinfo_fancy(pa, "Ethernet controller", epp->epp_name, 1); /* power up chip */ if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL)) && error != EOPNOTSUPP) { aprint_error_dev(self, "cannot activate %d\n", error); return; } /* * Map the device. */ ioh_valid = (pci_mapreg_map(pa, EPIC_PCI_IOBA, PCI_MAPREG_TYPE_IO, 0, &iot, &ioh, NULL, NULL) == 0); memh_valid = (pci_mapreg_map(pa, EPIC_PCI_MMBA, PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, &memt, &memh, NULL, NULL) == 0); if (memh_valid) { sc->sc_st = memt; sc->sc_sh = memh; } else if (ioh_valid) { sc->sc_st = iot; sc->sc_sh = ioh; } else { aprint_error_dev(self, "unable to map device registers\n"); return; } sc->sc_dmat = pa->pa_dmat; /* Make sure bus mastering is enabled. */ pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | PCI_COMMAND_MASTER_ENABLE); /* * Map and establish our interrupt. */ if (pci_intr_map(pa, &ih)) { aprint_error_dev(self, "unable to map interrupt\n"); return; } intrstr = pci_intr_string(pc, ih); psc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, epic_intr, sc); if (psc->sc_ih == NULL) { aprint_error_dev(self, "unable to establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); esp = epic_pci_subsys_lookup(pa); if (esp) sc->sc_hwflags = esp->flags; /* * Finish off the attach. */ epic_attach(sc); }
static void sf_pci_attach(device_t parent, device_t self, void *aux) { struct sf_pci_softc *psc = device_private(self); struct sf_softc *sc = &psc->sc_starfire; struct pci_attach_args *pa = aux; pci_intr_handle_t ih; const char *intrstr = NULL; const struct sf_pci_product *spp; bus_space_tag_t iot, memt; bus_space_handle_t ioh, memh; pcireg_t reg; int error, ioh_valid, memh_valid; char intrbuf[PCI_INTRSTR_LEN]; sc->sc_dev = self; spp = sf_pci_lookup(pa); if (spp == NULL) { printf("\n"); panic("sf_pci_attach: impossible"); } printf(": %s, rev. %d\n", spp->spp_name, PCI_REVISION(pa->pa_class)); /* power up chip */ if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL)) && error != EOPNOTSUPP) { aprint_error_dev(self, "cannot activate %d\n", error); return; } /* * Map the device. */ reg = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SF_PCI_MEMBA); switch (reg) { case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: memh_valid = (pci_mapreg_map(pa, SF_PCI_MEMBA, reg, 0, &memt, &memh, NULL, NULL) == 0); break; default: memh_valid = 0; } ioh_valid = (pci_mapreg_map(pa, (reg == (PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT)) ? SF_PCI_IOBA : SF_PCI_IOBA - 0x04, PCI_MAPREG_TYPE_IO, 0, &iot, &ioh, NULL, NULL) == 0); if (memh_valid) { sc->sc_st = memt; sc->sc_sh = memh; sc->sc_iomapped = 0; } else if (ioh_valid) { sc->sc_st = iot; sc->sc_sh = ioh; sc->sc_iomapped = 1; } else { aprint_error_dev(self, "unable to map device registers\n"); return; } sc->sc_dmat = pa->pa_dmat; /* Make sure bus mastering is enabled. */ pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | PCI_COMMAND_MASTER_ENABLE); /* * Map and establish our interrupt. */ if (pci_intr_map(pa, &ih)) { aprint_error_dev(self, "unable to map interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf)); psc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, sf_intr, sc); if (psc->sc_ih == NULL) { aprint_error_dev(self, "unable to establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); /* * Finish off the attach. */ sf_attach(sc); }
static void hdaudio_pci_attach(device_t parent, device_t self, void *opaque) { struct hdaudio_pci_softc *sc = device_private(self); struct pci_attach_args *pa = opaque; pci_intr_handle_t ih; const char *intrstr; pcireg_t csr; int err; aprint_naive("\n"); aprint_normal(": HD Audio Controller\n"); sc->sc_pc = pa->pa_pc; sc->sc_tag = pa->pa_tag; sc->sc_id = pa->pa_id; sc->sc_hdaudio.sc_subsystem = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG); /* Enable busmastering and MMIO access */ csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_BACKTOBACK_ENABLE; pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, csr); /* Map MMIO registers */ err = pci_mapreg_map(pa, HDAUDIO_PCI_AZBARL, PCI_MAPREG_TYPE_MEM, 0, &sc->sc_hdaudio.sc_memt, &sc->sc_hdaudio.sc_memh, &sc->sc_hdaudio.sc_membase, &sc->sc_hdaudio.sc_memsize); if (err) { aprint_error_dev(self, "couldn't map mmio space\n"); return; } sc->sc_hdaudio.sc_memvalid = true; sc->sc_hdaudio.sc_dmat = pa->pa_dmat; /* Map interrupt and establish handler */ err = pci_intr_map(pa, &ih); if (err) { aprint_error_dev(self, "couldn't map interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, ih); sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_AUDIO, hdaudio_pci_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt"); if (intrstr) aprint_error(" at %s", intrstr); aprint_error("\n"); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); if (!pmf_device_register(self, NULL, hdaudio_pci_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); hdaudio_pci_reinit(sc); /* Attach bus-independent HD audio layer */ hdaudio_attach(self, &sc->sc_hdaudio); }
static void ohci_pci_attach(device_t parent, device_t self, void *aux) { struct ohci_pci_softc *sc = device_private(self); struct pci_attach_args *pa = (struct pci_attach_args *)aux; pci_chipset_tag_t pc = pa->pa_pc; pcitag_t tag = pa->pa_tag; char const *intrstr; pci_intr_handle_t ih; pcireg_t csr; usbd_status r; const char *vendor; char intrbuf[PCI_INTRSTR_LEN]; sc->sc.sc_dev = self; sc->sc.sc_bus.hci_private = sc; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_USB) { sc->sc.sc_flags = OHCIF_SUPERIO; } pci_aprint_devinfo(pa, "USB Controller"); /* check if memory space access is enabled */ csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); #ifdef DEBUG printf("csr: %08x\n", csr); #endif if ((csr & PCI_COMMAND_MEM_ENABLE) == 0) { aprint_error_dev(self, "memory access is disabled\n"); return; } /* Map I/O registers */ if (pci_mapreg_map(pa, PCI_CBMEM, PCI_MAPREG_TYPE_MEM, 0, &sc->sc.iot, &sc->sc.ioh, NULL, &sc->sc.sc_size)) { sc->sc.sc_size = 0; aprint_error_dev(self, "can't map mem space\n"); return; } /* Disable interrupts, so we don't get any spurious ones. */ bus_space_write_4(sc->sc.iot, sc->sc.ioh, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); sc->sc_pc = pc; sc->sc_tag = tag; sc->sc.sc_bus.dmatag = pa->pa_dmat; /* Enable the device. */ pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr | PCI_COMMAND_MASTER_ENABLE); /* Map and establish the interrupt. */ if (pci_intr_map(pa, &ih)) { aprint_error_dev(self, "couldn't map interrupt\n"); goto fail; } /* * Allocate IRQ */ intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); sc->sc_ih = pci_intr_establish(pc, ih, IPL_SCHED, ohci_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); goto fail; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); /* Figure out vendor for root hub descriptor. */ vendor = pci_findvendor(pa->pa_id); sc->sc.sc_id_vendor = PCI_VENDOR(pa->pa_id); if (vendor) strlcpy(sc->sc.sc_vendor, vendor, sizeof(sc->sc.sc_vendor)); else snprintf(sc->sc.sc_vendor, sizeof(sc->sc.sc_vendor), "vendor 0x%04x", PCI_VENDOR(pa->pa_id)); r = ohci_init(&sc->sc); if (r != USBD_NORMAL_COMPLETION) { aprint_error_dev(self, "init failed, error=%d\n", r); goto fail; } #if NEHCI > 0 usb_pci_add(&sc->sc_pci, pa, self); #endif if (!pmf_device_register1(self, ohci_suspend, ohci_resume, ohci_shutdown)) aprint_error_dev(self, "couldn't establish power handler\n"); /* Attach usb device. */ sc->sc.sc_child = config_found(self, &sc->sc.sc_bus, usbctlprint); return; fail: if (sc->sc_ih) { pci_intr_disestablish(sc->sc_pc, sc->sc_ih); sc->sc_ih = NULL; } if (sc->sc.sc_size) { bus_space_unmap(sc->sc.iot, sc->sc.ioh, sc->sc.sc_size); sc->sc.sc_size = 0; } return; }
/* * sonic_attach: * * Attach a SONIC interface to the system. */ void sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr) { struct ifnet *ifp = &sc->sc_ethercom.ec_if; int i, rseg, error; bus_dma_segment_t seg; size_t cdatasize; uint8_t *nullbuf; /* * Allocate the control data structures, and create and load the * DMA map for it. */ if (sc->sc_32bit) cdatasize = sizeof(struct sonic_control_data32); else cdatasize = sizeof(struct sonic_control_data16); if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN, PAGE_SIZE, (64 * 1024), &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to allocate control data, error = %d\n", error); goto fail_0; } if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to map control data, error = %d\n", error); goto fail_1; } nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize; memset(nullbuf, 0, ETHER_PAD_LEN); if ((error = bus_dmamap_create(sc->sc_dmat, cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT, &sc->sc_cddmamap)) != 0) { aprint_error_dev(sc->sc_dev, "unable to create control data DMA map, error = %d\n", error); goto fail_2; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to load control data DMA map, error = %d\n", error); goto fail_3; } /* * Create the transmit buffer DMA maps. */ for (i = 0; i < SONIC_NTXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_txsoft[i].ds_dmamap)) != 0) { aprint_error_dev(sc->sc_dev, "unable to create tx DMA map %d, error = %d\n", i, error); goto fail_4; } } /* * Create the receive buffer DMA maps. */ for (i = 0; i < SONIC_NRXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rxsoft[i].ds_dmamap)) != 0) { aprint_error_dev(sc->sc_dev, "unable to create rx DMA map %d, error = %d\n", i, error); goto fail_5; } sc->sc_rxsoft[i].ds_mbuf = NULL; } /* * create and map the pad buffer */ if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) { aprint_error_dev(sc->sc_dev, "unable to create pad buffer DMA map, error = %d\n", error); goto fail_5; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to load pad buffer DMA map, error = %d\n", error); goto fail_6; } bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, BUS_DMASYNC_PREWRITE); /* * Reset the chip to a known state. */ sonic_reset(sc); aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", ether_sprintf(enaddr)); strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sonic_ioctl; ifp->if_start = sonic_start; ifp->if_watchdog = sonic_watchdog; ifp->if_init = sonic_init; ifp->if_stop = sonic_stop; IFQ_SET_READY(&ifp->if_snd); /* * We can support 802.1Q VLAN-sized frames. */ sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, enaddr); /* * Make sure the interface is shutdown during reboot. */ sc->sc_sdhook = shutdownhook_establish(sonic_shutdown, sc); if (sc->sc_sdhook == NULL) aprint_error_dev(sc->sc_dev, "WARNING: unable to establish shutdown hook\n"); return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_6: bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); fail_5: for (i = 0; i < SONIC_NRXDESC; i++) { if (sc->sc_rxsoft[i].ds_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].ds_dmamap); } fail_4: for (i = 0; i < SONIC_NTXDESC; i++) { if (sc->sc_txsoft[i].ds_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].ds_dmamap); } bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); fail_2: bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize); fail_1: bus_dmamem_free(sc->sc_dmat, &seg, rseg); fail_0: return; }
void ubsa_attach(device_t parent, device_t self, void *aux) { struct ubsa_softc *sc = device_private(self); struct usb_attach_arg *uaa = aux; usbd_device_handle dev = uaa->device; usb_config_descriptor_t *cdesc; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char *devinfop; usbd_status err; struct ucom_attach_args uca; int i; sc->sc_dev = self; aprint_naive("\n"); aprint_normal("\n"); devinfop = usbd_devinfo_alloc(dev, 0); aprint_normal_dev(self, "%s\n", devinfop); usbd_devinfo_free(devinfop); sc->sc_udev = dev; sc->sc_config_index = UBSA_DEFAULT_CONFIG_INDEX; sc->sc_numif = 1; /* default device has one interface */ /* * initialize rts, dtr variables to something * different from boolean 0, 1 */ sc->sc_dtr = -1; sc->sc_rts = -1; /* * Quad UMTS cards use different requests to * control com settings and only some. */ sc->sc_quadumts = 0; if (uaa->vendor == USB_VENDOR_OPTIONNV) { switch (uaa->product) { case USB_PRODUCT_OPTIONNV_QUADUMTS: case USB_PRODUCT_OPTIONNV_QUADUMTS2: sc->sc_quadumts = 1; break; } } DPRINTF(("ubsa attach: sc = %p\n", sc)); /* Move the device into the configured state. */ err = usbd_set_config_index(dev, sc->sc_config_index, 1); if (err) { aprint_error_dev(self, "failed to set configuration: %s\n", usbd_errstr(err)); sc->sc_dying = 1; goto error; } /* get the config descriptor */ cdesc = usbd_get_config_descriptor(sc->sc_udev); if (cdesc == NULL) { aprint_error_dev(self, "failed to get configuration descriptor\n"); sc->sc_dying = 1; goto error; } sc->sc_intr_number = -1; sc->sc_intr_pipe = NULL; /* get the interfaces */ err = usbd_device2interface_handle(dev, UBSA_IFACE_INDEX_OFFSET, &sc->sc_iface[0]); if (err) { /* can not get main interface */ sc->sc_dying = 1; goto error; } /* Find the endpoints */ id = usbd_get_interface_descriptor(sc->sc_iface[0]); sc->sc_iface_number[0] = id->bInterfaceNumber; /* initialize endpoints */ uca.bulkin = uca.bulkout = -1; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_iface[0], i); if (ed == NULL) { aprint_error_dev(self, "no endpoint descriptor for %d\n", i); break; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->sc_intr_number = ed->bEndpointAddress; sc->sc_isize = UGETW(ed->wMaxPacketSize); } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { uca.bulkin = ed->bEndpointAddress; uca.ibufsize = UGETW(ed->wMaxPacketSize); } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { uca.bulkout = ed->bEndpointAddress; uca.obufsize = UGETW(ed->wMaxPacketSize); } } /* end of Endpoint loop */ if (sc->sc_intr_number == -1) { aprint_error_dev(self, "Could not find interrupt in\n"); sc->sc_dying = 1; goto error; } if (uca.bulkin == -1) { aprint_error_dev(self, "Could not find data bulk in\n"); sc->sc_dying = 1; goto error; } if (uca.bulkout == -1) { aprint_error_dev(self, "Could not find data bulk out\n"); sc->sc_dying = 1; goto error; } uca.portno = 0; /* bulkin, bulkout set above */ uca.ibufsizepad = uca.ibufsize; uca.opkthdrlen = 0; uca.device = dev; uca.iface = sc->sc_iface[0]; uca.methods = &ubsa_methods; uca.arg = sc; uca.info = NULL; DPRINTF(("ubsa: int#=%d, in = 0x%x, out = 0x%x, intr = 0x%x\n", i, uca.bulkin, uca.bulkout, sc->sc_intr_number)); sc->sc_subdevs[0] = config_found_sm_loc(self, "ucombus", NULL, &uca, ucomprint, ucomsubmatch); usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); return; error: return; }
void fdcfinishattach(device_t self) { struct fdc_softc *fdc = device_private(self); bus_space_tag_t iot = fdc->sc_iot; bus_space_handle_t ioh = fdc->sc_ioh; struct fdc_attach_args fa; /* * Reset the controller to get it into a known state. Not all * probes necessarily need do this to discover the controller up * front, so don't assume anything. */ bus_space_write_1(iot, ioh, fdout, 0); delay(100); bus_space_write_1(iot, ioh, fdout, FDO_FRST); /* see if it can handle a command */ if (out_fdc(iot, ioh, NE7CMD_SPECIFY) < 0) { aprint_normal_dev(fdc->sc_dev, "can't reset controller\n"); return; } out_fdc(iot, ioh, 0xdf); out_fdc(iot, ioh, 2); #if defined(i386) || defined(x86_64) /* * The NVRAM info only tells us about the first two disks on the * `primary' floppy controller. */ /* XXX device_unit() abuse */ if (device_unit(fdc->sc_dev) == 0) { int type = mc146818_read(NULL, NVRAM_DISKETTE); /* XXX softc */ fdc->sc_known = 1; fdc->sc_knownfds[0] = fd_nvtotype(device_xname(fdc->sc_dev), type, 0); if (fdc->sc_knownfds[0] != NULL) fdc->sc_present |= 1; fdc->sc_knownfds[1] = fd_nvtotype(device_xname(fdc->sc_dev), type, 1); if (fdc->sc_knownfds[1] != NULL) fdc->sc_present |= 2; } #endif /* i386 || x86_64 */ /* physical limit: four drives per controller. */ fdc->sc_state = PROBING; for (fa.fa_drive = 0; fa.fa_drive < 4; fa.fa_drive++) { if (fdc->sc_known) { if (fdc->sc_present & (1 << fa.fa_drive)) { fa.fa_deftype = fdc->sc_knownfds[fa.fa_drive]; config_found(fdc->sc_dev, (void *)&fa, fdprint); } } else { #if defined(atari) /* * Atari has a different ordening, defaults to 1.44 */ fa.fa_deftype = &fd_types[2]; #else /* * Default to 1.44MB on Alpha and BeBox. How do we tell * on these platforms? */ fa.fa_deftype = &fd_types[0]; #endif (void)config_found_ia(fdc->sc_dev, "fdc", (void *)&fa, fdprint); } } fdc->sc_state = DEVIDLE; }
/* ARGSUSED */ void flash_attach(device_t parent, device_t self, void *aux) { struct flash_softc * const sc = device_private(self); struct flash_attach_args * const faa = aux; char pbuf[2][sizeof("9999 KB")]; sc->sc_dev = self; sc->sc_parent_dev = parent; sc->flash_if = faa->flash_if; sc->sc_partinfo = faa->partinfo; sc->hw_softc = device_private(parent); format_bytes(pbuf[0], sizeof(pbuf[0]), sc->sc_partinfo.part_size); format_bytes(pbuf[1], sizeof(pbuf[1]), sc->flash_if->erasesize); aprint_naive("\n"); switch (sc->flash_if->type) { case FLASH_TYPE_NOR: aprint_normal(": NOR flash partition size %s, offset %#jx", pbuf[0], (uintmax_t )sc->sc_partinfo.part_offset); break; case FLASH_TYPE_NAND: aprint_normal(": NAND flash partition size %s, offset %#jx", pbuf[0], (uintmax_t )sc->sc_partinfo.part_offset); break; default: aprint_normal(": %s unknown flash", pbuf[0]); } if (sc->sc_partinfo.part_flags & FLASH_PART_READONLY) { sc->sc_readonly = true; aprint_normal(", read only"); } else { sc->sc_readonly = false; } aprint_normal("\n"); if (sc->sc_partinfo.part_size == 0) { aprint_error_dev(self, "partition size must be larger than 0\n"); return; } switch (sc->flash_if->type) { case FLASH_TYPE_NOR: aprint_normal_dev(sc->sc_dev, "erase size %s bytes, write size %d bytes\n", pbuf[1], sc->flash_if->writesize); break; case FLASH_TYPE_NAND: default: aprint_normal_dev(sc->sc_dev, "erase size %s, page size %d bytes, write size %d bytes\n", pbuf[1], sc->flash_if->page_size, sc->flash_if->writesize); break; } if (!pmf_device_register1(sc->sc_dev, NULL, NULL, flash_shutdown)) aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n"); }
static void auacer_attach(struct device *parent, struct device *self, void *aux) { struct auacer_softc *sc; struct pci_attach_args *pa; pci_intr_handle_t ih; bus_size_t aud_size; pcireg_t v; const char *intrstr; int i; sc = (struct auacer_softc *)self; pa = aux; aprint_normal(": Acer Labs M5455 Audio controller\n"); if (pci_mapreg_map(pa, 0x10, PCI_MAPREG_TYPE_IO, 0, &sc->iot, &sc->aud_ioh, NULL, &aud_size)) { aprint_error(": can't map i/o space\n"); return; } sc->sc_pc = pa->pa_pc; sc->sc_pt = pa->pa_tag; sc->dmat = pa->pa_dmat; sc->sc_dmamap_flags = BUS_DMA_COHERENT; /* XXX remove */ /* enable bus mastering */ v = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, v | PCI_COMMAND_MASTER_ENABLE); /* Map and establish the interrupt. */ if (pci_intr_map(pa, &ih)) { aprint_error_dev(&sc->sc_dev, "can't map interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, ih); sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_AUDIO, auacer_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(&sc->sc_dev, "can't establish interrupt"); if (intrstr != NULL) aprint_normal(" at %s", intrstr); aprint_normal("\n"); return; } aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr); strlcpy(sc->sc_audev.name, "M5455 AC97", MAX_AUDIO_DEV_LEN); snprintf(sc->sc_audev.version, MAX_AUDIO_DEV_LEN, "0x%02x", PCI_REVISION(pa->pa_class)); strlcpy(sc->sc_audev.config, device_xname(&sc->sc_dev), MAX_AUDIO_DEV_LEN); /* Set up DMA lists. */ auacer_alloc_cdata(sc); sc->sc_pcmo.dmalist = sc->sc_cdata->ic_dmalist_pcmo; sc->sc_pcmo.ptr = 0; sc->sc_pcmo.port = ALI_BASE_PO; DPRINTF(ALI_DEBUG_DMA, ("auacer_attach: lists %p\n", sc->sc_pcmo.dmalist)); sc->host_if.arg = sc; sc->host_if.attach = auacer_attach_codec; sc->host_if.read = auacer_read_codec; sc->host_if.write = auacer_write_codec; sc->host_if.reset = auacer_reset_codec; if (ac97_attach(&sc->host_if, self) != 0) return; /* setup audio_format */ memcpy(sc->sc_formats, auacer_formats, sizeof(auacer_formats)); if (!AC97_IS_4CH(sc->codec_if)) AUFMT_INVALIDATE(&sc->sc_formats[AUACER_FORMATS_4CH]); if (!AC97_IS_6CH(sc->codec_if)) AUFMT_INVALIDATE(&sc->sc_formats[AUACER_FORMATS_6CH]); if (AC97_IS_FIXED_RATE(sc->codec_if)) { for (i = 0; i < AUACER_NFORMATS; i++) { sc->sc_formats[i].frequency_type = 1; sc->sc_formats[i].frequency[0] = 48000; } } if (0 != auconv_create_encodings(sc->sc_formats, AUACER_NFORMATS, &sc->sc_encodings)) { return; } audio_attach_mi(&auacer_hw_if, sc, &sc->sc_dev); auacer_reset(sc); if (!pmf_device_register(self, NULL, auacer_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); }
/* * Attach the interface. Allocate softc structures, do * setup and ethernet/BPF attach. */ void kue_attach(device_t parent, device_t self, void *aux) { struct kue_softc *sc = device_private(self); struct usb_attach_arg *uaa = aux; char *devinfop; int s; struct ifnet *ifp; usbd_device_handle dev = uaa->device; usbd_interface_handle iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; DPRINTFN(5,(" : kue_attach: sc=%p, dev=%p", sc, dev)); sc->kue_dev = self; aprint_naive("\n"); aprint_normal("\n"); devinfop = usbd_devinfo_alloc(dev, 0); aprint_normal_dev(self, "%s\n", devinfop); usbd_devinfo_free(devinfop); err = usbd_set_config_no(dev, KUE_CONFIG_NO, 1); if (err) { aprint_error_dev(self, "failed to set configuration" ", err=%s\n", usbd_errstr(err)); return; } sc->kue_udev = dev; sc->kue_product = uaa->product; sc->kue_vendor = uaa->vendor; /* Load the firmware into the NIC. */ if (kue_load_fw(sc)) { aprint_error_dev(self, "loading firmware failed\n"); return; } err = usbd_device2interface_handle(dev, KUE_IFACE_IDX, &iface); if (err) { aprint_error_dev(self, "getting interface handle failed\n"); return; } sc->kue_iface = iface; id = usbd_get_interface_descriptor(iface); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { aprint_error_dev(self, "couldn't get ep %d\n", i); return; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->kue_ed[KUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->kue_ed[KUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->kue_ed[KUE_ENDPT_INTR] = ed->bEndpointAddress; } } if (sc->kue_ed[KUE_ENDPT_RX] == 0 || sc->kue_ed[KUE_ENDPT_TX] == 0) { aprint_error_dev(self, "missing endpoint\n"); return; } /* Read ethernet descriptor */ err = kue_ctl(sc, KUE_CTL_READ, KUE_CMD_GET_ETHER_DESCRIPTOR, 0, &sc->kue_desc, sizeof(sc->kue_desc)); if (err) { aprint_error_dev(self, "could not read Ethernet descriptor\n"); return; } sc->kue_mcfilters = malloc(KUE_MCFILTCNT(sc) * ETHER_ADDR_LEN, M_USBDEV, M_NOWAIT); if (sc->kue_mcfilters == NULL) { aprint_error_dev(self, "no memory for multicast filter buffer\n"); return; } s = splnet(); /* * A KLSI chip was detected. Inform the world. */ aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(sc->kue_desc.kue_macaddr)); /* Initialize interface info.*/ ifp = GET_IFP(sc); ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = kue_ioctl; ifp->if_start = kue_start; ifp->if_watchdog = kue_watchdog; strncpy(ifp->if_xname, device_xname(sc->kue_dev), IFNAMSIZ); IFQ_SET_READY(&ifp->if_snd); /* Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, sc->kue_desc.kue_macaddr); rnd_attach_source(&sc->rnd_source, device_xname(sc->kue_dev), RND_TYPE_NET, RND_FLAG_DEFAULT); sc->kue_attached = true; splx(s); usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->kue_udev, sc->kue_dev); return; }
/* * Interface exists: make available by filling in network interface * record. System will initialize the interface when it is ready * to accept packets. */ void sgec_attach(struct ze_softc *sc) { struct ifnet *ifp = &sc->sc_if; struct ze_tdes *tp; struct ze_rdes *rp; bus_dma_segment_t seg; int i, rseg, error; /* * Allocate DMA safe memory for descriptors and setup memory. */ error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata), PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); if (error) { aprint_error(": unable to allocate control data, error = %d\n", error); goto fail_0; } error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata), (void **)&sc->sc_zedata, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); if (error) { aprint_error( ": unable to map control data, error = %d\n", error); goto fail_1; } error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1, sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap); if (error) { aprint_error( ": unable to create control data DMA map, error = %d\n", error); goto fail_2; } error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata, sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT); if (error) { aprint_error( ": unable to load control data DMA map, error = %d\n", error); goto fail_3; } /* * Zero the newly allocated memory. */ memset(sc->sc_zedata, 0, sizeof(struct ze_cdata)); /* * Create the transmit descriptor DMA maps. */ for (i = 0; error == 0 && i < TXDESCS; i++) { error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &sc->sc_xmtmap[i]); } if (error) { aprint_error(": unable to create tx DMA map %d, error = %d\n", i, error); goto fail_4; } /* * Create receive buffer DMA maps. */ for (i = 0; error == 0 && i < RXDESCS; i++) { error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]); } if (error) { aprint_error(": unable to create rx DMA map %d, error = %d\n", i, error); goto fail_5; } /* * Pre-allocate the receive buffers. */ for (i = 0; error == 0 && i < RXDESCS; i++) { error = ze_add_rxbuf(sc, i); } if (error) { aprint_error( ": unable to allocate or map rx buffer %d, error = %d\n", i, error); goto fail_6; } /* For vmstat -i */ evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, device_xname(sc->sc_dev), "intr"); evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr"); evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr"); evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain"); evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr"); evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR, &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr"); /* * Create ring loops of the buffer chains. * This is only done once. */ sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr; rp = sc->sc_zedata->zc_recv; rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW; rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA; rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv; tp = sc->sc_zedata->zc_xmit; tp[TXDESCS].ze_tdr = ZE_TDR_OW; tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA; tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit; if (zereset(sc)) return; strcpy(ifp->if_xname, device_xname(sc->sc_dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = zestart; ifp->if_ioctl = zeioctl; ifp->if_watchdog = zetimeout; IFQ_SET_READY(&ifp->if_snd); /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, sc->sc_enaddr); aprint_normal("\n"); aprint_normal_dev(sc->sc_dev, "hardware address %s\n", ether_sprintf(sc->sc_enaddr)); return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_6: for (i = 0; i < RXDESCS; i++) { if (sc->sc_rxmbuf[i] != NULL) { bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]); m_freem(sc->sc_rxmbuf[i]); } } fail_5: for (i = 0; i < RXDESCS; i++) { if (sc->sc_xmtmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]); } fail_4: for (i = 0; i < TXDESCS; i++) { if (sc->sc_rcvmap[i] != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]); } bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap); fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap); fail_2: bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata, sizeof(struct ze_cdata)); fail_1: bus_dmamem_free(sc->sc_dmat, &seg, rseg); fail_0: return; }