const void * mpbios_search(device_t self, paddr_t start, int count, struct mp_map *map) { struct mp_map t; int i, len; const struct mpbios_fps *m; int end = count - sizeof(*m); const uint8_t *base = mpbios_map (start, count, &t); if (mp_verbose) aprint_verbose_dev(self, "scanning 0x%jx to 0x%jx for MP signature\n", (uintmax_t)start, (uintmax_t)(start+count-sizeof(*m))); for (i = 0; i <= end; i += 4) { m = (const struct mpbios_fps *)&base[i]; if ((m->signature == MP_FP_SIG) && ((len = m->length << 4) != 0) && mpbios_cksum(m, (m->length << 4)) == 0) { mpbios_unmap (&t); return mpbios_map (start+i, len, map); } } mpbios_unmap(&t); return 0; }
static void awin_tve_attach(device_t parent, device_t self, void *aux) { struct awin_tve_softc *sc = device_private(self); struct awinio_attach_args * const aio = aux; const struct awin_locators * const loc = &aio->aio_loc; prop_dictionary_t cfg = device_properties(self); int8_t tcon_unit = -1; sc->sc_dev = self; sc->sc_bst = aio->aio_core_bst; bus_space_subregion(sc->sc_bst, aio->aio_core_bsh, loc->loc_offset, loc->loc_size, &sc->sc_bsh); if (prop_dictionary_get_int8(cfg, "tcon_unit", &tcon_unit)) { sc->sc_tcon_unit = tcon_unit; } else { sc->sc_tcon_unit = 0; /* default value */ } sc->sc_tcon_pll = awin_tcon_get_clk_pll(sc->sc_tcon_unit); switch (sc->sc_tcon_pll) { case 3: awin_pll3_enable(); break; case 7: awin_pll7_enable(); break; default: panic("awin_tve pll"); } /* for now assume we're always at 0 */ awin_reg_set_clear(aio->aio_core_bst, aio->aio_ccm_bsh, AWIN_AHB_GATING1_REG, AWIN_AHB_GATING1_TVE0, 0); aprint_naive("\n"); aprint_normal(": TV Encoder / VGA output\n"); if (tcon_unit >= 0) { aprint_verbose_dev(self, ": using TCON%d, pll%d\n", sc->sc_tcon_unit, sc->sc_tcon_pll); } sc->sc_i2c_blklen = 16; #if 0 sc->sc_ih = intr_establish(loc->loc_intr, IPL_SCHED, IST_LEVEL, awin_tve_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt %d\n", loc->loc_intr); return; } aprint_normal_dev(self, "interrupting on irq %d\n", loc->loc_intr); #endif awin_tve_i2c_init(sc); awin_tve_enable(sc); awin_tve_read_edid(sc); }
static int cuda_todr_get(todr_chip_handle_t tch, struct timeval *tvp) { struct cuda_softc *sc = tch->cookie; int cnt = 0; uint8_t cmd[] = { CUDA_PSEUDO, CMD_READ_RTC}; sc->sc_tod = 0; while (sc->sc_tod == 0) { cuda_send(sc, 0, 2, cmd); while ((sc->sc_tod == 0) && (cnt < 10)) { tsleep(&sc->sc_todev, 0, "todr", 10); cnt++; } if (sc->sc_tod == 0) { aprint_error_dev(sc->sc_dev, "unable to read a sane RTC value\n"); return EIO; } if ((sc->sc_tod > 0xf0000000UL) || (sc->sc_tod < DIFF19041970)) { /* huh? try again */ sc->sc_tod = 0; aprint_verbose_dev(sc->sc_dev, "got garbage reading RTC, trying again\n"); } } tvp->tv_sec = sc->sc_tod - DIFF19041970; DPRINTF("tod: %" PRIo64 "\n", tvp->tv_sec); tvp->tv_usec = 0; return 0; }
static bool xencons_resume(device_t dev, const pmf_qual_t *qual) { int evtch = -1; if (xendomain_is_dom0()) { /* dom0 console resume is required only during first start-up */ if (cold) { evtch = bind_virq_to_evtch(VIRQ_CONSOLE); event_set_handler(evtch, xencons_intr, xencons_console_device, IPL_TTY, "xencons"); } } else { evtch = xen_start_info.console_evtchn; event_set_handler(evtch, xencons_handler, xencons_console_device, IPL_TTY, "xencons"); } if (evtch != -1) { aprint_verbose_dev(dev, "using event channel %d\n", evtch); hypervisor_enable_event(evtch); } return true; }
static int u3g_detach(device_t self, int flags) { struct u3g_softc *sc = device_private(self); int rv; if (sc->sc_dying) return 0; pmf_device_deregister(self); if (sc->sc_ucom != NULL) { rv = config_detach(sc->sc_ucom, flags); if (rv != 0) { aprint_verbose_dev(self, "Can't deallocate " "port (%d)", rv); } } if (sc->sc_intr_pipe != NULL) { (void) usbd_abort_pipe(sc->sc_intr_pipe); (void) usbd_close_pipe(sc->sc_intr_pipe); sc->sc_intr_pipe = NULL; } if (sc->sc_intr_buff != NULL) { free(sc->sc_intr_buff, M_USBDEV); sc->sc_intr_buff = NULL; } return (0); }
static void * bthidev_int_newconn(void *arg, struct sockaddr_bt *laddr, struct sockaddr_bt *raddr) { struct bthidev_softc *sc = arg; if (bdaddr_same(&raddr->bt_bdaddr, &sc->sc_raddr) == 0) return NULL; if ((sc->sc_flags & BTHID_CONNECTING) || sc->sc_state != BTHID_WAIT_INT || sc->sc_ctl == NULL || sc->sc_int != NULL) { aprint_verbose_dev(sc->sc_dev, "reject int newconn %s%s%s%s\n", (sc->sc_flags & BTHID_CONNECTING) ? " (CONNECTING)" : "", (sc->sc_state == BTHID_WAIT_INT) ? " (WAITING)": "", (sc->sc_ctl == NULL) ? " (NO CONTROL)" : "", (sc->sc_int != NULL) ? " (GOT INTERRUPT)" : ""); return NULL; } l2cap_attach_pcb(&sc->sc_int, &bthidev_int_proto, sc); return sc->sc_int; }
static void imxccm_attach(device_t parent, device_t self, void *aux) { struct imxccm_softc * const sc = device_private(self); struct axi_attach_args *aa = aux; bus_space_tag_t iot = aa->aa_iot; ccm_softc = sc; sc->sc_dev = self; sc->sc_iot = iot; if (bus_space_map(iot, aa->aa_addr, CCMC_SIZE, 0, &sc->sc_ioh)) { aprint_error(": can't map registers\n"); return; } for (u_int i=1; i <= IMX51_N_DPLLS; ++i) { if (bus_space_map(iot, DPLL_BASE(i), DPLL_SIZE, 0, &sc->sc_pll[i-1].pll_ioh)) { aprint_error(": can't map pll registers\n"); return; } } aprint_normal(": Clock control module\n"); aprint_naive("\n"); imx51_get_pll_freq(1); imx51_get_pll_freq(2); imx51_get_pll_freq(3); aprint_verbose_dev(self, "CPU clock=%d, UART clock=%d\n", imx51_get_clock(IMX51CLK_ARM_ROOT), imx51_get_clock(IMX51CLK_UART_CLK_ROOT)); aprint_verbose_dev(self, "PLL1 clock=%d, PLL2 clock=%d, PLL3 clock=%d\n", imx51_get_clock(IMX51CLK_PLL1), imx51_get_clock(IMX51CLK_PLL2), imx51_get_clock(IMX51CLK_PLL3)); aprint_verbose_dev(self, "mainbus clock=%d, ahb clock=%d ipg clock=%d perclk=%d\n", imx51_get_clock(IMX51CLK_MAIN_BUS_CLK), imx51_get_clock(IMX51CLK_AHB_CLK_ROOT), imx51_get_clock(IMX51CLK_IPG_CLK_ROOT), imx51_get_clock(IMX51CLK_PERCLK_ROOT)); aprint_verbose_dev(self, "ESDHC1 clock=%d\n", imx51_get_clock(IMX51CLK_ESDHC1_CLK_ROOT)); }
static void artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; bus_size_t cmdsize, ctlsize; pcireg_t interface; int channel; if (pciide_chipen(sc, pa) == 0) return; interface = PCI_INTERFACE(pa->pa_class); if (interface == 0) { artisea_chip_map_dpa (sc, pa); return; } aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); #ifdef PCIIDE_I31244_DISABLEDMA if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_31244 && PCI_REVISION(pa->pa_class) == 0) { aprint_verbose(" but disabled due to rev. 0"); sc->sc_dma_ok = 0; } else #endif pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); /* * XXX Configure LEDs to show activity. */ sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; } sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); } }
void x86_errata(void) { struct cpu_info *ci; uint32_t descs[4]; errata_t *e, *ex; cpurev_t rev; int i, j, upgrade; static int again; if (cpu_vendor != CPUVENDOR_AMD) return; ci = curcpu(); x86_cpuid(0x80000001, descs); for (i = 0;; i += 2) { if ((rev = cpurevs[i]) == OINK) return; if (cpurevs[i + 1] == descs[0]) break; } ex = errata + sizeof(errata) / sizeof(errata[0]); for (upgrade = 0, e = errata; e < ex; e++) { if (e->e_reported) continue; if (e->e_set != NULL) { for (j = 0; e->e_set[j] != OINK; j++) if (e->e_set[j] == rev) break; if (e->e_set[j] == OINK) continue; } aprint_debug_dev(ci->ci_dev, "testing for erratum %d\n", e->e_num); if (e->e_act == NULL) e->e_reported = TRUE; else if ((*e->e_act)(ci, e) == FALSE) continue; aprint_verbose_dev(ci->ci_dev, "erratum %d present\n", e->e_num); upgrade = 1; } if (upgrade && !again) { again = 1; aprint_normal_dev(ci->ci_dev, "WARNING: errata present, BIOS upgrade " "may be\n"); aprint_normal_dev(ci->ci_dev, "WARNING: necessary to ensure reliable " "operation\n"); } }
static int iavc_receive_init(iavc_softc_t *sc, u_int8_t *dmabuf) { u_int32_t Length; u_int8_t *p; u_int8_t *cardtype, *serial, *profile, *vers, *caps, *prot; if (sc->sc_dma) { p = amcc_get_word(dmabuf, &Length); } else { Length = iavc_get_slice(sc, sc->sc_recvbuf); p = sc->sc_recvbuf; } #if 0 { int len = 0; printf("%s: rx_init: ", device_xname(&sc->sc_dev)); while (len < Length) { printf(" %02x", p[len]); if (len && (len % 16) == 0) printf("\n"); len++; } if (len % 16) printf("\n"); } #endif vers = (p + 1); p += (*p + 1); /* driver version */ cardtype = (p + 1); p += (*p + 1); /* card type */ p += (*p + 1); /* hardware ID */ serial = (p + 1); p += (*p + 1); /* serial number */ caps = (p + 1); p += (*p + 1); /* supported options */ prot = (p + 1); p += (*p + 1); /* supported protocols */ profile = (p + 1); if (cardtype && serial && profile) { int nbch = ((profile[3]<<8) | profile[2]); aprint_normal_dev(&sc->sc_dev, "AVM %s, s/n %s, %d chans, f/w rev %s, prot %s\n", cardtype, serial, nbch, vers, prot); aprint_verbose_dev(&sc->sc_dev, "%s\n", caps); capi_ll_control(&sc->sc_capi, CAPI_CTRL_PROFILE, (int) profile); } else { printf("%s: no profile data in info response?\n", device_xname(&sc->sc_dev)); } sc->sc_blocked = 1; /* controller will send START when ready */ return 0; }
void xb_suspend_comms(device_t dev) { int evtchn; evtchn = xen_start_info.store_evtchn; hypervisor_mask_event(evtchn); event_remove_handler(evtchn, wake_waiting, NULL); aprint_verbose_dev(dev, "removed event channel %d\n", evtchn); }
int wskbd_set_display(device_t dv, struct wsevsrc *me) { struct wskbd_softc *sc = device_private(dv); device_t displaydv = me != NULL ? me->me_dispdv : NULL; device_t odisplaydv; int error; DPRINTF(("wskbd_set_display: %s me=%p odisp=%p disp=%p cons=%d\n", device_xname(dv), me, sc->sc_base.me_dispdv, displaydv, sc->sc_isconsole)); if (sc->sc_isconsole) return (EBUSY); if (displaydv != NULL) { if (sc->sc_base.me_dispdv != NULL) return (EBUSY); } else { if (sc->sc_base.me_dispdv == NULL) return (ENXIO); } odisplaydv = sc->sc_base.me_dispdv; sc->sc_base.me_dispdv = NULL; error = wskbd_enable(sc, displaydv != NULL); sc->sc_base.me_dispdv = displaydv; if (error) { sc->sc_base.me_dispdv = odisplaydv; return (error); } if (displaydv) aprint_verbose_dev(sc->sc_base.me_dv, "connecting to %s\n", device_xname(displaydv)); else aprint_verbose_dev(sc->sc_base.me_dv, "disconnecting from %s\n", device_xname(odisplaydv)); return (0); }
void tcbus_attach(device_t parent, device_t self, void *aux) { struct vsbus_attach_args * const va = aux; struct tcbus_softc * const sc = device_private(self); struct tcbus_attach_args tba; struct pte *pte; const size_t nentries = 32768; int error; int i; error = bus_space_map(va->va_memt, KA4x_TURBO, 0x10000, BUS_SPACE_MAP_LINEAR, &sc->sc_memh); if (error) { aprint_error(": failed to map TC slot 0: %d\n", error); return; } sc->sc_slots[0].tcs_addr = sc->sc_memh; sc->sc_slots[0].tcs_cookie = sc; tba.tba_speed = TC_SPEED_12_5_MHZ; tba.tba_slots = sc->sc_slots; tba.tba_nslots = 1; tba.tba_intr_evcnt = tcbus_intr_evcnt; tba.tba_intr_establish = tcbus_intr_establish; tba.tba_intr_disestablish = tcbus_intr_disestablish; tba.tba_get_dma_tag = tcbus_get_dma_tag; vax_sgmap_dmatag_init(&sc->sc_dmatag, sc, nentries); pte = (struct pte *) vax_map_physmem(KA4x_TURBOMAPS, nentries * sizeof(pte[0])); for (i = nentries; i > 0; ) ((uint32_t *) pte)[--i] = 0; sc->sc_dmatag._sgmap = &sc->sc_sgmap; /* * Initialize the SGMAP. */ vax_sgmap_init(&sc->sc_dmatag, &sc->sc_sgmap, "tc_sgmap", sc->sc_dmatag._wbase, sc->sc_dmatag._wsize, pte, 0); aprint_normal("\n"); aprint_verbose_dev(self, "32K entry DMA SGMAP at PA 0x%x (VA %p)\n", KA4x_TURBOMAPS, pte); tcbus_dmat = &sc->sc_dmatag; /* XXX: why not config_found(9)?? */ tcattach(parent, self, &tba); }
/* Set up interrupt handler of store event channel. */ int xb_init_comms(device_t dev) { int evtchn; evtchn = xen_start_info.store_evtchn; event_set_handler(evtchn, wake_waiting, NULL, IPL_TTY, "xenbus"); hypervisor_enable_event(evtchn); aprint_verbose_dev(dev, "using event channel %d\n", evtchn); return 0; }
static void piixsata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface, cmdsts; int channel; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; /* Do all revisions require DMA alignment workaround? */ sc->sc_wdcdev.dma_init = piix_dma_init; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; } sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; cmdsts = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); cmdsts &= ~PCI_COMMAND_INTERRUPT_DISABLE; pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, cmdsts); if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID) sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID; interface = PCI_INTERFACE(pa->pa_class); wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
void wb_attach(struct wb_softc *wb) { switch (wb->wb_type) { case WB_DEVNO_SD: aprint_verbose_dev(wb->wb_dev, "SD/MMC Reader\n"); wb_sdmmc_attach(wb); break; case WB_DEVNO_MS: aprint_verbose_dev(wb->wb_dev, "Memory Stick Reader (not supported)\n"); break; case WB_DEVNO_SC: aprint_verbose_dev(wb->wb_dev, "Smart Card Reader (not supported)\n"); break; case WB_DEVNO_GPIO: aprint_verbose_dev(wb->wb_dev, "GPIO (not supported)\n"); break; } }
static void sis_sata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface = PCI_INTERFACE(pa->pa_class); int channel; if (pciide_chipen(sc, pa) == 0) return; if (interface == 0) { ATADEBUG_PRINT(("sis_sata_chip_map interface == 0\n"), DEBUG_PROBE); interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); } aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "Silicon Integrated Systems 180/96X SATA controller " "(rev. 0x%02x)\n", PCI_REVISION(pa->pa_class)); aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA | ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; sc->sc_wdcdev.wdc_maxdrives = 2; wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
static void piccolo_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface; int channel; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA32 | ATAC_CAP_DATA16; sc->sc_wdcdev.sc_atac.atac_pio_cap = 5; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.sc_atac.atac_dma_cap = 3; sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; } sc->sc_wdcdev.sc_atac.atac_set_modes = piccolo_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; sc->sc_wdcdev.wdc_maxdrives = 2; /* * XXX one for now. We'll figure out how to talk to the second channel * later, hopefully! Second interface config is via the * "alternate PCI Configuration Space" whatever that is! */ interface = PCI_INTERFACE(pa->pa_class); wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
/* ARGSUSED */ int imxsnvs_attach_common(device_t parent __unused, device_t self, bus_space_tag_t iot, paddr_t iobase, size_t size, int intr __unused, int flags __unused) { struct imxsnvs_softc *sc; uint32_t v1, v2; sc = device_private(self); sc->sc_dev = self; sc->sc_iot = iot; aprint_naive("\n"); aprint_normal(": Secure Non-Volatile Storage\n"); if (bus_space_map(sc->sc_iot, iobase, size, 0, &sc->sc_ioh)) { aprint_error_dev(self, "Cannot map registers\n"); return 1; } v1 = SNVS_READ(sc, SNVS_HPVIDR1); v2 = SNVS_READ(sc, SNVS_HPVIDR2); aprint_verbose_dev(self, "id=0x%llx, ver=%lld.%lld, ip_era=0x%llx, " "intg_opt=0x%llx, eco_rev=0x%llx, config_opt=0x%llx\n", __SHIFTOUT(v1, SNVS_HPVIDR1_IP_ID), __SHIFTOUT(v1, SNVS_HPVIDR1_MAJOR_REV), __SHIFTOUT(v1, SNVS_HPVIDR1_MINOR_REV), __SHIFTOUT(v2, SNVS_HPVIDR2_IP_ERA), __SHIFTOUT(v2, SNVS_HPVIDR2_INTG_OPT), __SHIFTOUT(v2, SNVS_HPVIDR2_ECO_REV), __SHIFTOUT(v2, SNVS_HPVIDR2_CONFIG_OPT)); if (imxsnvs_rtc_enable(sc) != 0) { aprint_error_dev(self, "cannot enable RTC\n"); return 1; } sc->sc_todr.todr_gettime = imxsnvs_gettime; sc->sc_todr.todr_settime = imxsnvs_settime; sc->sc_todr.cookie = sc; todr_attach(&sc->sc_todr); return 0; }
/* * start connecting to our device */ static int bthidev_connect(struct bthidev_softc *sc) { struct sockaddr_bt sa; int err; if (sc->sc_attempts++ > 0) aprint_verbose_dev(sc->sc_dev, "connect (#%d)\n", sc->sc_attempts); memset(&sa, 0, sizeof(sa)); sa.bt_len = sizeof(sa); sa.bt_family = AF_BLUETOOTH; err = l2cap_attach_pcb(&sc->sc_ctl, &bthidev_ctl_proto, sc); if (err) { aprint_error_dev(sc->sc_dev, "l2cap_attach failed (%d)\n", err); return err; } err = l2cap_setopt(sc->sc_ctl, &sc->sc_mode); if (err) { aprint_error_dev(sc->sc_dev, "l2cap_setopt failed (%d)\n", err); return err; } bdaddr_copy(&sa.bt_bdaddr, &sc->sc_laddr); err = l2cap_bind_pcb(sc->sc_ctl, &sa); if (err) { aprint_error_dev(sc->sc_dev, "l2cap_bind_pcb failed (%d)\n", err); return err; } sa.bt_psm = sc->sc_ctlpsm; bdaddr_copy(&sa.bt_bdaddr, &sc->sc_raddr); err = l2cap_connect_pcb(sc->sc_ctl, &sa); if (err) { aprint_error_dev(sc->sc_dev, "l2cap_connect_pcb failed (%d)\n", err); return err; } sc->sc_state = BTHID_WAIT_CTL; return 0; }
static bool xencons_suspend(device_t dev, const pmf_qual_t *qual) { int evtch; /* dom0 console should not be suspended */ if (!xendomain_is_dom0()) { evtch = xen_start_info.console_evtchn; hypervisor_mask_event(evtch); if (event_remove_handler(evtch, xencons_handler, xencons_console_device) != 0) { aprint_error_dev(dev, "can't remove handler: xencons_handler\n"); } aprint_verbose_dev(dev, "removed event channel %d\n", evtch); } return true; }
static int u3g_detach(device_t self, int flags) { struct u3g_softc *sc = device_private(self); int rv = 0; int i; if (sc->sc_pseudodev) return 0; pmf_device_deregister(self); for (i = 0; i < sc->numports; i++) { if(sc->sc_ucom[i]) { rv = config_detach(sc->sc_ucom[i], flags); if(rv != 0) { aprint_verbose_dev(self, "Can't deallocat port %d", i); return rv; } } } #ifdef U3G_DEBUG if (sc->sc_intr_pipe != NULL) { int err = usbd_abort_pipe(sc->sc_intr_pipe); if (err) aprint_error_dev(self, "abort interrupt pipe failed: %s\n", usbd_errstr(err)); err = usbd_close_pipe(sc->sc_intr_pipe); if (err) aprint_error_dev(self, "close interrupt pipe failed: %s\n", usbd_errstr(err)); free(sc->sc_intr_buf, M_USBDEV); sc->sc_intr_pipe = NULL; } #endif return 0; }
void xencons_attach(device_t parent, device_t self, void *aux) { struct xencons_softc *sc = device_private(self); aprint_normal(": Xen Virtual Console Driver\n"); sc->sc_dev = self; sc->sc_tty = tty_alloc(); tty_attach(sc->sc_tty); sc->sc_tty->t_oproc = xencons_start; sc->sc_tty->t_param = xencons_param; if (xencons_isconsole) { int maj; /* Locate the major number. */ maj = cdevsw_lookup_major(&xencons_cdevsw); /* There can be only one, but it can have any unit number. */ cn_tab->cn_dev = makedev(maj, device_unit(self)); aprint_verbose_dev(self, "console major %d, unit %d\n", maj, device_unit(self)); sc->sc_tty->t_dev = cn_tab->cn_dev; #ifdef DDB /* Set db_max_line to avoid paging. */ db_max_line = 0x7fffffff; #endif xencons_console_device = sc; xencons_resume(self, PMF_Q_NONE); } sc->polling = 0; if (!pmf_device_register(self, xencons_suspend, xencons_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); }
void npx_isa_attach(device_t parent, device_t self, void *aux) { struct npx_softc *sc = device_private(self); struct isa_attach_args *ia = aux; aprint_naive("\n"); aprint_normal("\n"); sc->sc_dev = self; sc->sc_type = (u_long) ia->ia_aux; switch (sc->sc_type) { case NPX_INTERRUPT: sc->sc_iot = ia->ia_iot; if (bus_space_map(sc->sc_iot, 0xf0, 16, 0, &sc->sc_ioh)) panic("%s: unable to map I/O space", __func__); lcr0(rcr0() & ~CR0_NE); sc->sc_ih = isa_intr_establish(ia->ia_ic, ia->ia_irq[0].ir_irq, IST_EDGE, IPL_NONE, (int (*)(void *))npxintr, 0); break; case NPX_EXCEPTION: /*FALLTHROUGH*/ case NPX_CPUID: aprint_verbose_dev(sc->sc_dev, "%s using exception 16\n", sc->sc_type == NPX_CPUID ? "reported by CPUID;" : ""); sc->sc_type = NPX_EXCEPTION; break; case NPX_BROKEN: aprint_error_dev(sc->sc_dev, "error reporting broken; not using\n"); sc->sc_type = NPX_NONE; return; case NPX_NONE: return; } npxattach(sc); }
static void stpc_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; pcireg_t interface = PCI_INTERFACE(pa->pa_class); if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; sc->sc_wdcdev.sc_atac.atac_set_modes = stpc_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
static void gcscide_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { pcireg_t interface; bus_size_t cmdsize, ctlsize; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; sc->sc_wdcdev.sc_atac.atac_set_modes = gcscide_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; interface = PCI_INTERFACE(pa->pa_class); wdc_allocate_regs(&sc->sc_wdcdev); if (pciide_chansetup(sc, 0, interface) == 0) return; pciide_mapchan(pa, &sc->pciide_channels[0], interface, &cmdsize, &ctlsize, pciide_pci_intr); }
static void upc1_attach(struct upc_softc *sc) { u_int8_t cr[16]; int i; aprint_normal(": 82C710\n"); /* Dump configuration */ for (i = 0; i < 16; i++) cr[i] = upc1_read_config(sc, i); aprint_verbose_dev(&sc->sc_dev, "config state"); for (i = 0; i < 16; i++) aprint_verbose(" %02x", cr[i]); aprint_verbose("\n"); /* FDC */ if (cr[UPC1_CFGADDR_CRC] & UPC1_CRC_FDCEN) upc_found(sc, "fdc", UPC_PORT_FDCBASE, 2, &sc->sc_fintr); /* IDE */ if (cr[UPC1_CFGADDR_CRC] & UPC1_CRC_IDEEN) upc_found2(sc, "wdc", UPC_PORT_IDECMDBASE, 8, UPC_PORT_IDECTLBASE, 2, &sc->sc_wintr); /* Parallel */ if (cr[UPC1_CFGADDR_CR0] & UPC1_CR0_PEN) upc_found(sc, "lpt", cr[UPC1_CFGADDR_PARBASE] << UPC1_PARBASE_SHIFT, LPT_NPORTS, &sc->sc_pintr); /* UART */ if (cr[UPC1_CFGADDR_CR0] & UPC1_CR0_SEN) upc_found(sc, "com", cr[UPC1_CFGADDR_UARTBASE] << UPC1_UARTBASE_SHIFT, COM_NPORTS, &sc->sc_irq4); /* Mouse */ /* XXX not yet supported */ }
/* * Keyboard and mouse share the interrupt * so don't install interrupt handler twice. */ static void pckbc_js_intr_establish(struct pckbc_softc *sc, pckbport_slot_t slot) { struct pckbc_js_softc *jsc = (struct pckbc_js_softc *)sc; void *res; if (jsc->jsc_establised) { #ifdef DEBUG aprint_verbose_dev(sc->sc_dv, "%s slot shares interrupt (already established)\n", pckbc_slot_names[slot]); #endif return; } /* * We can not choose the devic class interruptlevel freely, * so we debounce via a softinterrupt. */ jsc->jsc_int_cookie = softint_establish(SOFTINT_SERIAL, pckbcintr_soft, &jsc->jsc_pckbc); if (jsc->jsc_int_cookie == NULL) { aprint_error_dev(sc->sc_dv, "unable to establish %s soft interrupt\n", pckbc_slot_names[slot]); return; } res = bus_intr_establish(sc->id->t_iot, jsc->jsc_intr, IPL_SERIAL, jsc_pckbdintr, jsc); if (res == NULL) aprint_error_dev(sc->sc_dv, "unable to establish %s slot interrupt\n", pckbc_slot_names[slot]); else jsc->jsc_establised = 1; }
/* * Print some information about the controller */ static void twe_describe_controller(struct twe_softc *sc) { struct twe_param *p[6]; int i, rv = 0; uint32_t dsize; uint8_t ports; ports = 0; /* get the port count */ rv |= twe_param_get_1(sc, TWE_PARAM_CONTROLLER, TWE_PARAM_CONTROLLER_PortCount, &ports); /* get version strings */ rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_Mon, 16, NULL, &p[0]); rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_FW, 16, NULL, &p[1]); rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_BIOS, 16, NULL, &p[2]); rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCB, 8, NULL, &p[3]); rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_ATA, 8, NULL, &p[4]); rv |= twe_param_get(sc, TWE_PARAM_VERSION, TWE_PARAM_VERSION_PCI, 8, NULL, &p[5]); if (rv) { /* some error occurred */ aprint_error_dev(sc->sc_dev, "failed to fetch version information\n"); return; } aprint_normal_dev(sc->sc_dev, "%d ports, Firmware %.16s, BIOS %.16s\n", ports, p[1]->tp_data, p[2]->tp_data); aprint_verbose_dev(sc->sc_dev, "Monitor %.16s, PCB %.8s, Achip %.8s, Pchip %.8s\n", p[0]->tp_data, p[3]->tp_data, p[4]->tp_data, p[5]->tp_data); free(p[0], M_DEVBUF); free(p[1], M_DEVBUF); free(p[2], M_DEVBUF); free(p[3], M_DEVBUF); free(p[4], M_DEVBUF); free(p[5], M_DEVBUF); rv = twe_param_get(sc, TWE_PARAM_DRIVESUMMARY, TWE_PARAM_DRIVESUMMARY_Status, 16, NULL, &p[0]); if (rv) { aprint_error_dev(sc->sc_dev, "failed to get drive status summary\n"); return; } for (i = 0; i < ports; i++) { if (p[0]->tp_data[i] != TWE_PARAM_DRIVESTATUS_Present) continue; rv = twe_param_get_4(sc, TWE_PARAM_DRIVEINFO + i, TWE_PARAM_DRIVEINFO_Size, &dsize); if (rv) { aprint_error_dev(sc->sc_dev, "unable to get drive size for port %d\n", i); continue; } rv = twe_param_get(sc, TWE_PARAM_DRIVEINFO + i, TWE_PARAM_DRIVEINFO_Model, 40, NULL, &p[1]); if (rv) { aprint_error_dev(sc->sc_dev, "unable to get drive model for port %d\n", i); continue; } aprint_verbose_dev(sc->sc_dev, "port %d: %.40s %d MB\n", i, p[1]->tp_data, dsize / 2048); free(p[1], M_DEVBUF); } free(p[0], M_DEVBUF); }
static void ahci_pci_attach(device_t parent, device_t self, void *aux) { struct pci_attach_args *pa = aux; struct ahci_pci_softc *psc = device_private(self); struct ahci_softc *sc = &psc->ah_sc; const char *intrstr; bool ahci_cap_64bit; bool ahci_bad_64bit; pci_intr_handle_t intrhandle; sc->sc_atac.atac_dev = self; if (pci_mapreg_map(pa, AHCI_PCI_ABAR, PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_ahcit, &sc->sc_ahcih, NULL, &sc->sc_ahcis) != 0) { aprint_error_dev(self, "can't map ahci registers\n"); return; } psc->sc_pc = pa->pa_pc; psc->sc_pcitag = pa->pa_tag; pci_aprint_devinfo(pa, "AHCI disk controller"); if (pci_intr_map(pa, &intrhandle) != 0) { aprint_error_dev(self, "couldn't map interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, intrhandle); psc->sc_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, ahci_intr, sc); if (psc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt\n"); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr ? intrstr : "unknown interrupt"); sc->sc_dmat = pa->pa_dmat; sc->sc_ahci_quirks = ahci_pci_has_quirk(PCI_VENDOR(pa->pa_id), PCI_PRODUCT(pa->pa_id)); ahci_cap_64bit = (AHCI_READ(sc, AHCI_CAP) & AHCI_CAP_64BIT) != 0; ahci_bad_64bit = ((sc->sc_ahci_quirks & AHCI_PCI_QUIRK_BAD64) != 0); if (pci_dma64_available(pa) && ahci_cap_64bit) { if (!ahci_bad_64bit) sc->sc_dmat = pa->pa_dmat64; aprint_verbose_dev(self, "64-bit DMA%s\n", (sc->sc_dmat == pa->pa_dmat) ? " unavailable" : ""); } if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID) { AHCIDEBUG_PRINT(("%s: RAID mode\n", AHCINAME(sc)), DEBUG_PROBE); sc->sc_atac_capflags = ATAC_CAP_RAID; } else { AHCIDEBUG_PRINT(("%s: SATA mode\n", AHCINAME(sc)), DEBUG_PROBE); } ahci_attach(sc); if (!pmf_device_register(self, NULL, ahci_pci_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); }