STATIC void cardslotattach(struct device *parent, struct device *self, void *aux) { struct cardslot_softc *sc = (struct cardslot_softc *)self; struct cardslot_attach_args *caa = aux; struct cbslot_attach_args *cba = caa->caa_cb_attach; struct pcmciabus_attach_args *pa = caa->caa_16_attach; struct cardbus_softc *csc; struct pcmcia_softc *psc; sc->sc_slot = sc->sc_dev.dv_unit; sc->sc_cb_softc = NULL; sc->sc_16_softc = NULL; SIMPLEQ_INIT(&sc->sc_events); sc->sc_th_enable = 0; printf(" slot %d flags %x\n", sc->sc_slot, sc->sc_dev.dv_cfdata->cf_flags); DPRINTF(("%s attaching CardBus bus...\n", sc->sc_dev.dv_xname)); if (cba != NULL) { if ((csc = (void *)config_found(self, cba, cardslot_cb_print)) != NULL) { /* cardbus found */ DPRINTF(("cardslotattach: found cardbus on %s\n", sc->sc_dev.dv_xname)); sc->sc_cb_softc = csc; } } if (pa != NULL) { if ((psc = (void *)config_found_sm(self, pa, cardslot_16_print, cardslot_16_submatch)) != NULL) { /* pcmcia 16-bit bus found */ DPRINTF(("cardslotattach: found 16-bit pcmcia bus\n")); sc->sc_16_softc = psc; /* XXX: dirty. This code should be removed * to achieve MI */ caa->caa_ph->pcmcia = (struct device *)psc; } } if (csc != NULL || psc != NULL) kthread_create_deferred(create_slot_manager, (void *)sc); if (csc && (csc->sc_cf->cardbus_ctrl)(csc->sc_cc, CARDBUS_CD)) { DPRINTF(("cardslotattach: CardBus card found\n")); /* attach deferred */ cardslot_event_throw(sc, CARDSLOT_EVENT_INSERTION_CB); } if (psc && (psc->pct->card_detect)(psc->pch)) { DPRINTF(("cardbusattach: 16-bit card found\n")); /* attach deferred */ cardslot_event_throw(sc, CARDSLOT_EVENT_INSERTION_16); } }
void pxapcic_attach(struct pxapcic_softc *sc, void (*socket_setup_hook)(struct pxapcic_socket *)) { struct pcmciabus_attach_args paa; struct pxapcic_socket *so; int i; printf(": %d slot%s\n", sc->sc_nslots, sc->sc_nslots==1 ? "" : "s"); if (bus_space_map(sc->sc_iot, PXA2X0_MEMCTL_BASE, PXA2X0_MEMCTL_SIZE, 0, &sc->sc_memctl_ioh)) { printf("%s: failed to map MEMCTL\n", sc->sc_dev.dv_xname); return; } /* Clear CIT (card present) and set NOS correctly. */ bus_space_write_4(sc->sc_iot, sc->sc_memctl_ioh, MEMCTL_MECR, sc->sc_nslots == 2 ? MECR_NOS : 0); /* zaurus: configure slot 1 first to make internal drive be wd0. */ for (i = sc->sc_nslots-1; i >= 0; i--) { so = &sc->sc_socket[i]; so->sc = sc; so->socket = i; so->flags = 0; socket_setup_hook(so); paa.paa_busname = "pcmcia"; paa.pct = (pcmcia_chipset_tag_t)&pxapcic_pcmcia_functions; paa.pch = (pcmcia_chipset_handle_t)so; paa.iobase = 0; paa.iosize = 0x4000000; so->pcmcia = config_found_sm(&sc->sc_dev, &paa, pxapcic_print, pxapcic_submatch); pxa2x0_gpio_set_function(sc->sc_irqpin[i], GPIO_IN); pxa2x0_gpio_set_function(sc->sc_irqcfpin[i], GPIO_IN); /* Card slot interrupt */ so->irq = pxa2x0_gpio_intr_establish(sc->sc_irqcfpin[i], IST_EDGE_BOTH, IPL_BIO /* XXX */, pxapcic_intr, so, sc->sc_dev.dv_xname); /* GPIO pin for interrupt */ so->irqpin = sc->sc_irqpin[i]; #ifdef DO_CONFIG_PENDING config_pending_incr(); #endif kthread_create_deferred(pxapcic_create_event_thread, so); } }
void wskbd_hotkey_init(void) { if (wskbd_hotkey_initted == 0) { simple_lock_init(&queue_lock); queue_head = queue_tail = 0; kthread_create_deferred(init_hotkey_thread, NULL); wskbd_hotkey_initted = 1; } }
void usbf_attach(struct device *parent, struct device *self, void *aux) { struct usbf_softc *sc = (struct usbf_softc *)self; int usbrev; int speed; usbf_status err; /* Continue to set up the bus struct. */ sc->sc_bus = aux; sc->sc_bus->usbfctl = sc; usbrev = sc->sc_bus->usbrev; printf(": USB revision %s", usbrev_str[usbrev]); switch (usbrev) { case USBREV_2_0: speed = USB_SPEED_HIGH; break; case USBREV_1_1: case USBREV_1_0: speed = USB_SPEED_FULL; break; default: printf(", not supported\n"); sc->sc_dying = 1; return; } printf("\n"); /* Initialize the usbf struct. */ TAILQ_INIT(&sc->sc_tskq); /* Establish the software interrupt. */ if (usbf_softintr_establish(sc->sc_bus)) { printf("%s: can't establish softintr\n", DEVNAME(sc)); sc->sc_dying = 1; return; } /* Attach the function driver. */ err = usbf_new_device(self, sc->sc_bus, 0, speed, 0, &sc->sc_port); if (err) { printf("%s: usbf_new_device failed, %s\n", DEVNAME(sc), usbf_errstr(err)); sc->sc_dying = 1; return; } /* Create a process context for asynchronous tasks. */ config_pending_incr(); kthread_create_deferred(usbf_create_thread, sc); }
void pxa2x0_apm_attach_sub(struct pxa2x0_apm_softc *sc) { sc->sc_iot = &pxa2x0_bs_tag; if (bus_space_map(sc->sc_iot, PXA2X0_POWMAN_BASE, PXA2X0_POWMAN_SIZE, 0, &sc->sc_pm_ioh)) { printf("pxa2x0_apm_attach_sub: failed to map POWMAN\n"); return; } rw_init(&sc->sc_lock, "apmlk"); kthread_create_deferred(apm_thread_create, sc); printf("\n"); if (bus_space_map(sc->sc_iot, PXA2X0_CLKMAN_BASE, PXA2X0_CLKMAN_SIZE, 0, &pxa2x0_clkman_ioh)) { printf("%s: failed to map CLKMAN\n", sc->sc_dev.dv_xname); return; } if (bus_space_map(sc->sc_iot, PXA2X0_MEMCTL_BASE, PXA2X0_MEMCTL_SIZE, 0, &pxa2x0_memctl_ioh)) { printf("%s: failed to map MEMCTL\n", sc->sc_dev.dv_xname); return; } sc->sc_memctl_ioh = pxa2x0_memctl_ioh; if (bus_space_map(sc->sc_iot, PXA2X0_GPIO_BASE, PXA2X0_GPIO_SIZE, 0, &pxa2x0_gpio_ioh)) { printf("%s: can't map GPIO\n", sc->sc_dev.dv_xname); return; } /* Clear all reset status flags. */ bus_space_write_4(sc->sc_iot, sc->sc_pm_ioh, POWMAN_RCSR, RCSR_GPR | RCSR_SMR | RCSR_WDR | RCSR_HWR); }
struct taskq * taskq_create(const char *name, unsigned int nthreads, int ipl, unsigned int flags) { struct taskq *tq; tq = malloc(sizeof(*tq), M_DEVBUF, M_WAITOK); if (tq == NULL) return (NULL); tq->tq_state = TQ_S_CREATED; tq->tq_running = 0; tq->tq_nthreads = nthreads; tq->tq_name = name; tq->tq_flags = flags; mtx_init(&tq->tq_mtx, ipl); TAILQ_INIT(&tq->tq_worklist); /* try to create a thread to guarantee that tasks will be serviced */ kthread_create_deferred(taskq_create_thread, tq); return (tq); }
void taskq_init(void) { kthread_create_deferred(taskq_create_thread, systq); kthread_create_deferred(taskq_create_thread, systqmp); }
void uvm_init() { vaddr_t kvm_start, kvm_end; /* * step 0: ensure that the hardware set the page size */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } /* * step 1: zero the uvm structure */ memset(&uvm, 0, sizeof(uvm)); averunnable.fscale = FSCALE; /* * step 2: init the page sub-system. this includes allocating the * vm_page structures, and setting up all the page queues (and * locks). available memory will be put in the "free" queue. * kvm_start and kvm_end will be set to the area of kernel virtual * memory which is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * step 3: init the map sub-system. allocates the static pool of * vm_map_entry structures that are used for "special" kernel maps * (e.g. kernel_map, kmem_map, etc...). */ uvm_map_init(); /* * step 4: setup the kernel's virtual memory data structures. this * includes setting up the kernel_map/kernel_object and the kmem_map/ * kmem_object. */ uvm_km_init(kvm_start, kvm_end); /* * step 5: init the pmap module. the pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * step 6: init the kernel memory allocator. after this call the * kernel memory allocator (malloc) can be used. */ uvm_km_page_init(); kmeminit(); #if !defined(__HAVE_PMAP_DIRECT) kthread_create_deferred(uvm_km_createthread, NULL); #endif /* * step 7: init all pagers and the pager_map. */ uvm_pager_init(); /* * step 8: init anonymous memory system */ amap_init(); /* init amap module */ /* * the VM system is now up! now that malloc is up we can resize the * <obj,off> => <page> hash table for general use and enable paging * of kernel objects. */ uvm_page_rehash(); uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); /* * reserve some unmapped space for malloc/pool use after free usage */ #ifdef DEADBEEF0 kvm_start = trunc_page(DEADBEEF0) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x\n", DEADBEEF0); #endif #ifdef DEADBEEF1 kvm_start = trunc_page(DEADBEEF1) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x\n", DEADBEEF1); #endif /* * init anonymous memory systems */ uvm_anon_init(); }
void tslot_attach(struct device *parent, struct device *self, void *args) { struct confargs *ca = args; struct tslot_softc *sc = (struct tslot_softc *)self; struct romaux *ra; struct rom_range ranges[TS102_NUM_RANGES], *range; struct tslot_data *td; volatile u_int8_t *regs; int node, nranges, slot, rnum; ra = &ca->ca_ra; node = ra->ra_node; regs = mapiodev(&ra->ra_reg[0], 0, ra->ra_len); /* * Find memory ranges */ nranges = getproplen(node, "ranges") / sizeof(struct rom_range); if (nranges < TS102_NUM_RANGES) { printf(": expected %d memory ranges, got %d\n", TS102_NUM_RANGES, nranges); return; } getprop(node, "ranges", ranges, sizeof ranges); /* * Ranges being relative to this sbus slot, turn them into absolute * addresses. */ for (rnum = 0; rnum < TS102_NUM_RANGES; rnum++) { ranges[rnum].poffset -= TS102_OFFSET_REGISTERS; } sc->sc_ih.ih_fun = tslot_intr; sc->sc_ih.ih_arg = sc; intr_establish(ra->ra_intr[0].int_pri, &sc->sc_ih, -1, self->dv_xname); printf(" pri %d", ra->ra_intr[0].int_pri); printf(": %d slots\n", TS102_NUM_SLOTS); /* * Setup asynchronous event handler */ sc->sc_events = 0; kthread_create_deferred(tslot_create_event_thread, sc); sc->sc_pct = (pcmcia_chipset_tag_t)&tslot_functions; /* * Setup slots */ for (slot = 0; slot < TS102_NUM_SLOTS; slot++) { td = &sc->sc_slot[slot]; for (rnum = 0; rnum < TS102_RANGE_CNT; rnum++) { range = ranges + (slot * TS102_RANGE_CNT + rnum); td->td_rr = ra->ra_reg[0]; td->td_rr.rr_iospace = range->pspace; td->td_rr.rr_paddr = (void *) ((u_int32_t)td->td_rr.rr_paddr + range->poffset); td->td_space[rnum] = (vaddr_t)mapiodev(&td->td_rr, 0, TS102_ARBITRARY_MAP_SIZE); } td->td_parent = sc; td->td_regs = regs + slot * (TS102_REG_CARD_B_INT - TS102_REG_CARD_A_INT); td->td_slot = slot; SET_TAG_LITTLE_ENDIAN(&td->td_rr); tslot_reset(td, TS102_ARBITRARY_MAP_SIZE); } }
/* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ void wi_usb_attach(struct device *parent, struct device *self, void *aux) { struct wi_usb_softc *sc = (struct wi_usb_softc *)self; struct usb_attach_arg *uaa = aux; /* int s; */ struct usbd_device *dev = uaa->device; struct usbd_interface *iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; DPRINTFN(5,(" : wi_usb_attach: sc=%p", sc)); err = usbd_set_config_no(dev, WI_USB_CONFIG_NO, 1); if (err) { printf("%s: setting config no failed\n", sc->wi_usb_dev.dv_xname); return; } /* XXX - any tasks? */ err = usbd_device2interface_handle(dev, WI_USB_IFACE_IDX, &iface); if (err) { printf("%s: getting interface handle failed\n", sc->wi_usb_dev.dv_xname); return; } /* XXX - flags? */ sc->wi_usb_udev = dev; sc->wi_usb_iface = iface; sc->wi_usb_product = uaa->product; sc->wi_usb_vendor = uaa->vendor; sc->sc_wi.wi_usb_cdata = sc; sc->sc_wi.wi_flags |= WI_FLAGS_BUS_USB; sc->wi_lock = 0; sc->wi_lockwait = 0; sc->wi_resetonce = 0; id = usbd_get_interface_descriptor(iface); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { printf("%s: couldn't get endpoint descriptor %d\n", sc->wi_usb_dev.dv_xname, i); return; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->wi_usb_ed[WI_USB_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->wi_usb_ed[WI_USB_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->wi_usb_ed[WI_USB_ENDPT_INTR] = ed->bEndpointAddress; } } sc->wi_usb_nummem = 0; /* attach wi device */ if (wi_usb_rx_list_init(sc)) { printf("%s: rx list init failed\n", sc->wi_usb_dev.dv_xname); return; } if (wi_usb_tx_list_init(sc)) { printf("%s: tx list init failed\n", sc->wi_usb_dev.dv_xname); return; } if (wi_usb_open_pipes(sc)){ printf("%s: open pipes failed\n", sc->wi_usb_dev.dv_xname); return; } sc->wi_usb_attached = 1; kthread_create_deferred(wi_usb_start_thread, sc); }
void apmattach(struct device *parent, struct device *self, void *aux) { struct bios_attach_args *ba = aux; bios_apminfo_t *ap = ba->ba_apmp; struct apm_softc *sc = (void *)self; struct apmregs regs; u_int cbase, clen, l; bus_space_handle_t ch16, ch32, dh; apm_flags = ap->apm_detail; /* * set up GDT descriptors for APM */ if (apm_flags & APM_32BIT_SUPPORTED) { /* truncate segments' limits to a page */ ap->apm_code_len -= (ap->apm_code32_base + ap->apm_code_len + 1) & 0xfff; ap->apm_code16_len -= (ap->apm_code16_base + ap->apm_code16_len + 1) & 0xfff; ap->apm_data_len -= (ap->apm_data_base + ap->apm_data_len + 1) & 0xfff; /* adjust version */ if ((sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK) && (apm_flags & APM_VERMASK) != (sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK)) apm_flags = (apm_flags & ~APM_VERMASK) | (sc->sc_dev.dv_cfdata->cf_flags & APM_VERMASK); if (sc->sc_dev.dv_cfdata->cf_flags & APM_NOCLI) { extern int apm_cli; /* from apmcall.S */ apm_cli = 0; } if (sc->sc_dev.dv_cfdata->cf_flags & APM_BEBATT) sc->be_batt = 1; apm_ep.seg = GSEL(GAPM32CODE_SEL,SEL_KPL); apm_ep.entry = ap->apm_entry; cbase = min(ap->apm_code32_base, ap->apm_code16_base); clen = max(ap->apm_code32_base + ap->apm_code_len, ap->apm_code16_base + ap->apm_code16_len) - cbase; if ((cbase <= ap->apm_data_base && cbase + clen >= ap->apm_data_base) || (ap->apm_data_base <= cbase && ap->apm_data_base + ap->apm_data_len >= cbase)) { l = max(ap->apm_data_base + ap->apm_data_len + 1, cbase + clen + 1) - min(ap->apm_data_base, cbase); bus_space_map(ba->ba_memt, min(ap->apm_data_base, cbase), l, 1, &dh); ch16 = dh; if (ap->apm_data_base < cbase) ch16 += cbase - ap->apm_data_base; else dh += ap->apm_data_base - cbase; } else { bus_space_map(ba->ba_memt, cbase, clen + 1, 1, &ch16); bus_space_map(ba->ba_memt, ap->apm_data_base, ap->apm_data_len + 1, 1, &dh); } ch32 = ch16; if (ap->apm_code16_base == cbase) ch32 += ap->apm_code32_base - cbase; else ch16 += ap->apm_code16_base - cbase; setgdt(GAPM32CODE_SEL, (void *)ch32, ap->apm_code_len, SDT_MEMERA, SEL_KPL, 1, 0); setgdt(GAPM16CODE_SEL, (void *)ch16, ap->apm_code16_len, SDT_MEMERA, SEL_KPL, 0, 0); setgdt(GAPMDATA_SEL, (void *)dh, ap->apm_data_len, SDT_MEMRWA, SEL_KPL, 1, 0); DPRINTF((": flags %x code 32:%x/%x[%x] 16:%x/%x[%x] " "data %x/%x/%x ep %x (%x:%x)\n%s", apm_flags, ap->apm_code32_base, ch32, ap->apm_code_len, ap->apm_code16_base, ch16, ap->apm_code16_len, ap->apm_data_base, dh, ap->apm_data_len, ap->apm_entry, apm_ep.seg, ap->apm_entry+ch32, sc->sc_dev.dv_xname)); apm_set_ver(sc); if (apm_flags & APM_BIOS_PM_DISABLED) apm_powmgt_enable(1); /* Engage cooperative power management on all devices (v1.1) */ apm_powmgt_engage(1, APM_DEV_ALLDEVS); bzero(®s, sizeof(regs)); if (apm_get_powstat(®s) != 0) apm_perror("get power status", ®s); apm_cpu_busy(); rw_init(&sc->sc_lock, "apmlk"); /* * Do a check once, ignoring any errors. This avoids * gratuitous APM disconnects on laptops where the first * event in the queue (after a boot) is non-recognizable. * The IBM ThinkPad 770Z is one of those. */ apm_periodic_check(sc); if (apm_periodic_check(sc) == -1) { apm_disconnect(sc); /* Failed, nuke APM idle loop */ cpu_idle_enter_fcn = NULL; cpu_idle_cycle_fcn = NULL; cpu_idle_leave_fcn = NULL; } else { kthread_create_deferred(apm_thread_create, sc); /* Setup APM idle loop */ if (apm_flags & APM_IDLE_SLOWS) { cpu_idle_enter_fcn = apm_cpu_slow; cpu_idle_cycle_fcn = NULL; cpu_idle_leave_fcn = apm_cpu_busy; } else { cpu_idle_enter_fcn = NULL; cpu_idle_cycle_fcn = apm_cpu_idle; cpu_idle_leave_fcn = NULL; } /* All is well, let the rest of the world know */ acpiapm_open = apmopen; acpiapm_close = apmclose; acpiapm_ioctl = apmioctl; acpiapm_kqfilter = apmkqfilter; apm_attached = 1; } } else { setgdt(GAPM32CODE_SEL, NULL, 0, 0, 0, 0, 0); setgdt(GAPM16CODE_SEL, NULL, 0, 0, 0, 0, 0); setgdt(GAPMDATA_SEL, NULL, 0, 0, 0, 0, 0); } }
void xlights_attach(struct device *parent, struct device *self, void *aux) { struct xlights_softc *sc = (struct xlights_softc *)self; struct confargs *ca = aux; int nseg, error, intr[6]; u_int32_t reg[4]; int type; sc->sc_node = OF_child(ca->ca_node); OF_getprop(sc->sc_node, "reg", reg, sizeof(reg)); ca->ca_reg[0] += ca->ca_baseaddr; ca->ca_reg[2] += ca->ca_baseaddr; if ((sc->sc_reg = mapiodev(ca->ca_reg[0], ca->ca_reg[1])) == NULL) { printf(": cannot map registers\n"); return; } sc->sc_dmat = ca->ca_dmat; if ((sc->sc_dma = mapiodev(ca->ca_reg[2], ca->ca_reg[3])) == NULL) { printf(": cannot map DMA registers\n"); goto nodma; } if ((sc->sc_dbdma = dbdma_alloc(sc->sc_dmat, BL_DBDMA_CMDS)) == NULL) { printf(": cannot alloc DMA descriptors\n"); goto nodbdma; } sc->sc_dmacmd = sc->sc_dbdma->d_addr; if ((error = bus_dmamem_alloc(sc->sc_dmat, BL_BUFSZ, 0, 0, sc->sc_bufseg, 1, &nseg, BUS_DMA_NOWAIT))) { printf(": cannot allocate DMA mem (%d)\n", error); goto nodmamem; } if ((error = bus_dmamem_map(sc->sc_dmat, sc->sc_bufseg, nseg, BL_BUFSZ, (caddr_t *)&sc->sc_buf, BUS_DMA_NOWAIT))) { printf(": cannot map DMA mem (%d)\n", error); goto nodmamap; } sc->sc_bufpos = sc->sc_buf; if ((error = bus_dmamap_create(sc->sc_dmat, BL_BUFSZ, 1, BL_BUFSZ, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_bufmap))) { printf(": cannot create DMA map (%d)\n", error); goto nodmacreate; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_bufmap, sc->sc_buf, BL_BUFSZ, NULL, BUS_DMA_NOWAIT))) { printf(": cannot load DMA map (%d)\n", error); goto nodmaload; } /* XXX: Should probably extract this from the clock data * property of the soundchip node */ sc->sc_freq = 16384; OF_getprop(sc->sc_node, "interrupts", intr, sizeof(intr)); /* output interrupt */ sc->sc_intr = intr[2]; type = intr[3] ? IST_LEVEL : IST_EDGE; printf(": irq %d\n", sc->sc_intr); macobio_enable(I2SClockOffset, I2S0EN); out32rb(sc->sc_reg + I2S_INT, I2S_INT_CLKSTOPPEND); macobio_disable(I2SClockOffset, I2S0CLKEN); for (error = 0; error < 1000; error++) { if (in32rb(sc->sc_reg + I2S_INT) & I2S_INT_CLKSTOPPEND) { error = 0; break; } delay(1); } if (error) { printf("%s: i2s timeout\n", sc->sc_dev.dv_xname); goto nodmaload; } mac_intr_establish(parent, sc->sc_intr, intr[3] ? IST_LEVEL : type, IPL_AUDIO, xlights_intr, sc, sc->sc_dev.dv_xname); out32rb(sc->sc_reg + I2S_FORMAT, CLKSRC_VS); macobio_enable(I2SClockOffset, I2S0CLKEN); kthread_create_deferred(xlights_deferred, sc); timeout_set(&sc->sc_tmo, xlights_timeout, sc); return; nodmaload: bus_dmamap_destroy(sc->sc_dmat, sc->sc_bufmap); nodmacreate: bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_buf, BL_BUFSZ); nodmamap: bus_dmamem_free(sc->sc_dmat, sc->sc_bufseg, nseg); nodmamem: dbdma_free(sc->sc_dbdma); nodbdma: unmapiodev((void *)sc->sc_dma, ca->ca_reg[3]); nodma: unmapiodev(sc->sc_reg, ca->ca_reg[1]); }