void dt_attach(device_t parent, device_t self, void *aux) { struct ioasicdev_attach_args *d; struct dt_attach_args dta; struct dt_softc *sc; struct dt_msg *msg; int i; d = aux; sc = device_private(self); sc->sc_dev = self; dt_cninit(); msg = malloc(sizeof(*msg) * DT_BUF_CNT, M_DEVBUF, M_NOWAIT); if (msg == NULL) { printf("%s: memory exhausted\n", device_xname(self)); return; } sc->sc_sih = softint_establish(SOFTINT_SERIAL, dt_dispatch, sc); if (sc->sc_sih == NULL) { printf("%s: memory exhausted\n", device_xname(self)); free(msg, M_DEVBUF); } SIMPLEQ_INIT(&sc->sc_queue); SLIST_INIT(&sc->sc_free); for (i = 0; i < DT_BUF_CNT; i++, msg++) SLIST_INSERT_HEAD(&sc->sc_free, msg, chain.slist); ioasic_intr_establish(parent, d->iada_cookie, TC_IPL_TTY, dt_intr, sc); printf("\n"); dta.dta_addr = DT_ADDR_KBD; config_found(self, &dta, dt_print); dta.dta_addr = DT_ADDR_MOUSE; config_found(self, &dta, dt_print); }
void le_ioasic_attach(struct device *parent, struct device *self, void *aux) { struct le_ioasic_softc *sc = (void *)self; struct ioasicdev_attach_args *d = aux; struct am7990_softc *le = &sc->sc_am7990; bus_space_tag_t ioasic_bst; bus_space_handle_t ioasic_bsh; bus_dma_tag_t dmat; bus_dma_segment_t seg; tc_addr_t tca; u_int32_t ssr; int rseg; caddr_t le_iomem; ioasic_bst = ((struct ioasic_softc *)parent)->sc_bst; ioasic_bsh = ((struct ioasic_softc *)parent)->sc_bsh; dmat = sc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat; /* * Allocate a DMA area for the chip. */ if (bus_dmamem_alloc(dmat, LE_IOASIC_MEMSIZE, LE_IOASIC_MEMALIGN, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { printf("can't allocate DMA area for LANCE\n"); return; } if (bus_dmamem_map(dmat, &seg, rseg, LE_IOASIC_MEMSIZE, &le_iomem, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) { printf("can't map DMA area for LANCE\n"); bus_dmamem_free(dmat, &seg, rseg); return; } /* * Create and load the DMA map for the DMA area. */ if (bus_dmamap_create(dmat, LE_IOASIC_MEMSIZE, 1, LE_IOASIC_MEMSIZE, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { printf("can't create DMA map\n"); goto bad; } if (bus_dmamap_load(dmat, sc->sc_dmamap, le_iomem, LE_IOASIC_MEMSIZE, NULL, BUS_DMA_NOWAIT)) { printf("can't load DMA map\n"); goto bad; } /* * Bind 128KB buffer with IOASIC DMA. */ tca = IOASIC_DMA_ADDR(sc->sc_dmamap->dm_segs[0].ds_addr); bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_LANCE_DMAPTR, tca); ssr = bus_space_read_4(ioasic_bst, ioasic_bsh, IOASIC_CSR); ssr |= IOASIC_CSR_DMAEN_LANCE; bus_space_write_4(ioasic_bst, ioasic_bsh, IOASIC_CSR, ssr); sc->sc_r1 = (struct lereg1 *) TC_DENSE_TO_SPARSE(TC_PHYS_TO_UNCACHED(d->iada_addr)); le->sc_mem = (void *)TC_PHYS_TO_UNCACHED(le_iomem); le->sc_copytodesc = le_ioasic_copytobuf_gap2; le->sc_copyfromdesc = le_ioasic_copyfrombuf_gap2; le->sc_copytobuf = le_ioasic_copytobuf_gap16; le->sc_copyfrombuf = le_ioasic_copyfrombuf_gap16; le->sc_zerobuf = le_ioasic_zerobuf_gap16; dec_le_common_attach(&sc->sc_am7990, (u_char *)((struct ioasic_softc *)parent)->sc_base + IOASIC_SLOT_2_START); ioasic_intr_establish(parent, d->iada_cookie, IPL_NET, am7990_intr, sc, self->dv_xname); return; bad: bus_dmamem_unmap(dmat, le_iomem, LE_IOASIC_MEMSIZE); bus_dmamem_free(dmat, &seg, rseg); }