static int le_isa_attach(device_t dev) { struct le_isa_softc *lesc; struct lance_softc *sc; bus_size_t macstart, rap, rdp; int error, i, macstride; lesc = device_get_softc(dev); sc = &lesc->sc_am7990.lsc; lesc->sc_rrid = 0; switch (ISA_PNP_PROBE(device_get_parent(dev), dev, le_isa_ids)) { case 0: lesc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &lesc->sc_rrid, RF_ACTIVE); rap = PCNET_RAP; rdp = PCNET_RDP; macstart = 0; macstride = 1; break; case ENOENT: for (i = 0; i < sizeof(le_isa_params) / sizeof(le_isa_params[0]); i++) { if (le_isa_probe_legacy(dev, &le_isa_params[i]) == 0) { lesc->sc_rres = bus_alloc_resource(dev, SYS_RES_IOPORT, &lesc->sc_rrid, 0, ~0, le_isa_params[i].iosize, RF_ACTIVE); rap = le_isa_params[i].rap; rdp = le_isa_params[i].rdp; macstart = le_isa_params[i].macstart; macstride = le_isa_params[i].macstride; goto found; } } /* FALLTHROUGH */ case ENXIO: default: device_printf(dev, "cannot determine chip\n"); error = ENXIO; goto fail_mtx; } found: if (lesc->sc_rres == NULL) { device_printf(dev, "cannot allocate registers\n"); error = ENXIO; goto fail_mtx; } lesc->sc_regt = rman_get_bustag(lesc->sc_rres); lesc->sc_regh = rman_get_bushandle(lesc->sc_rres); lesc->sc_rap = rap; lesc->sc_rdp = rdp; lesc->sc_drid = 0; if ((lesc->sc_dres = bus_alloc_resource_any(dev, SYS_RES_DRQ, &lesc->sc_drid, RF_ACTIVE)) == NULL) { device_printf(dev, "cannot allocate DMA channel\n"); error = ENXIO; goto fail_rres; } lesc->sc_irid = 0; if ((lesc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &lesc->sc_irid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "cannot allocate interrupt\n"); error = ENXIO; goto fail_dres; } error = bus_dma_tag_create( NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_24BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ &lesc->sc_pdmat); if (error != 0) { device_printf(dev, "cannot allocate parent DMA tag\n"); goto fail_ires; } sc->sc_memsize = LE_ISA_MEMSIZE; /* * For Am79C90, Am79C961 and Am79C961A the init block must be 2-byte * aligned and the ring descriptors must be 8-byte aligned. */ error = bus_dma_tag_create( lesc->sc_pdmat, /* parent */ 8, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_24BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->sc_memsize, /* maxsize */ 1, /* nsegments */ sc->sc_memsize, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ &lesc->sc_dmat); if (error != 0) { device_printf(dev, "cannot allocate buffer DMA tag\n"); goto fail_pdtag; } error = bus_dmamem_alloc(lesc->sc_dmat, (void **)&sc->sc_mem, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &lesc->sc_dmam); if (error != 0) { device_printf(dev, "cannot allocate DMA buffer memory\n"); goto fail_dtag; } sc->sc_addr = 0; error = bus_dmamap_load(lesc->sc_dmat, lesc->sc_dmam, sc->sc_mem, sc->sc_memsize, le_isa_dma_callback, sc, 0); if (error != 0 || sc->sc_addr == 0) { device_printf(dev, "cannot load DMA buffer map\n"); goto fail_dmem; } isa_dmacascade(rman_get_start(lesc->sc_dres)); sc->sc_flags = 0; sc->sc_conf3 = 0; /* * Extract the physical MAC address from the ROM. */ for (i = 0; i < sizeof(sc->sc_enaddr); i++) sc->sc_enaddr[i] = bus_space_read_1(lesc->sc_regt, lesc->sc_regh, macstart + i * macstride); sc->sc_copytodesc = lance_copytobuf_contig; sc->sc_copyfromdesc = lance_copyfrombuf_contig; sc->sc_copytobuf = lance_copytobuf_contig; sc->sc_copyfrombuf = lance_copyfrombuf_contig; sc->sc_zerobuf = lance_zerobuf_contig; sc->sc_rdcsr = le_isa_rdcsr; sc->sc_wrcsr = le_isa_wrcsr; sc->sc_hwreset = NULL; sc->sc_hwinit = NULL; sc->sc_hwintr = NULL; sc->sc_nocarrier = NULL; sc->sc_mediachange = NULL; sc->sc_mediastatus = NULL; sc->sc_supmedia = NULL; error = am7990_config(&lesc->sc_am7990, device_get_name(dev), device_get_unit(dev)); if (error != 0) { device_printf(dev, "cannot attach Am7990\n"); goto fail_dmap; } error = bus_setup_intr(dev, lesc->sc_ires, INTR_MPSAFE, am7990_intr, sc, &lesc->sc_ih, sc->ifp->if_serializer); if (error != 0) { device_printf(dev, "cannot set up interrupt\n"); goto fail_am7990; } sc->ifp->if_cpuid = ithread_cpuid(rman_get_start(lesc->sc_ires)); KKASSERT(sc->ifp->if_cpuid >= 0 && sc->ifp->if_cpuid < ncpus); return (0); fail_am7990: am7990_detach(&lesc->sc_am7990); fail_dmap: bus_dmamap_unload(lesc->sc_dmat, lesc->sc_dmam); fail_dmem: bus_dmamem_free(lesc->sc_dmat, sc->sc_mem, lesc->sc_dmam); fail_dtag: bus_dma_tag_destroy(lesc->sc_dmat); fail_pdtag: bus_dma_tag_destroy(lesc->sc_pdmat); fail_ires: bus_release_resource(dev, SYS_RES_IRQ, lesc->sc_irid, lesc->sc_ires); fail_dres: bus_release_resource(dev, SYS_RES_DRQ, lesc->sc_drid, lesc->sc_dres); fail_rres: bus_release_resource(dev, SYS_RES_IOPORT, lesc->sc_rrid, lesc->sc_rres); fail_mtx: return (error); }
/* Read single byte using legacy interface. */ static inline u_int8_t tpm_legacy_in(bus_space_tag_t iot, bus_space_handle_t ioh, int reg) { bus_space_write_1(iot, ioh, 0, reg); return bus_space_read_1(iot, ioh, 1); }
/*---------------------------------------------------------------------------* * isic_probe_s016 - probe for Teles S0/16 and compatibles *---------------------------------------------------------------------------*/ int isic_probe_s016(device_t dev) { size_t unit = device_get_unit(dev); /* get unit */ struct l1_softc *sc = 0; /* softc */ void *ih = 0; /* dummy */ u_int8_t b0,b1,b2; /* for signature */ bus_space_tag_t t; /* bus things */ bus_space_handle_t h; /* check max unit range */ if(unit >= ISIC_MAXUNIT) { printf("isic%d: Error, unit %d >= ISIC_MAXUNIT for Teles S0/16!\n", unit, unit); return(ENXIO); } sc = &l1_sc[unit]; /* get pointer to softc */ sc->sc_unit = unit; /* set unit */ /* see if an io base was supplied */ if(!(sc->sc_resources.io_base[0] = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->sc_resources.io_rid[0], 0ul, ~0ul, 1, RF_ACTIVE))) { printf("isic%d: Could not allocate i/o port for Teles S0/16.\n", unit); return(ENXIO); } sc->sc_port = rman_get_start(sc->sc_resources.io_base[0]); /* * check if the provided io port is valid */ switch(sc->sc_port) { case 0xd80: case 0xe80: case 0xf80: break; default: printf("isic%d: Error, invalid iobase 0x%x specified for Teles S0/16!\n", unit, sc->sc_port); isic_detach_common(dev); return(ENXIO); break; } /* allocate memory resource */ if(!(sc->sc_resources.mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->sc_resources.mem_rid, 0ul, ~0ul, TELES_S016_MEMSIZE, RF_ACTIVE))) { printf("isic%d: Could not allocate memory for Teles S0/16.\n", unit); isic_detach_common(dev); return(ENXIO); } /* * get virtual addr. */ sc->sc_vmem_addr = rman_get_virtual(sc->sc_resources.mem); /* * check for valid adresses */ switch(kvtop(sc->sc_vmem_addr)) { case 0xc0000: case 0xc2000: case 0xc4000: case 0xc6000: case 0xc8000: case 0xca000: case 0xcc000: case 0xce000: case 0xd0000: case 0xd2000: case 0xd4000: case 0xd6000: case 0xd8000: case 0xda000: case 0xdc000: case 0xde000: break; default: printf("isic%d: Error, invalid memory address 0x%lx for Teles S0/16!\n", unit, kvtop(sc->sc_vmem_addr)); isic_detach_common(dev); return(ENXIO); break; } /* setup ISAC access routines */ sc->clearirq = NULL; sc->readreg = tels016_read_reg; sc->writereg = tels016_write_reg; sc->readfifo = tels016_read_fifo; sc->writefifo = tels016_write_fifo; /* setup card type */ sc->sc_cardtyp = CARD_TYPEP_16; /* setup IOM bus type */ sc->sc_bustyp = BUS_TYPE_IOM1; sc->sc_ipac = 0; sc->sc_bfifolen = HSCX_FIFO_LEN; /* setup ISAC base addr, though we don't really need it */ ISAC_BASE = (caddr_t)((sc->sc_vmem_addr) + 0x100); /* setup HSCX base addr */ HSCX_A_BASE = (caddr_t)((sc->sc_vmem_addr) + 0x180); HSCX_B_BASE = (caddr_t)((sc->sc_vmem_addr) + 0x1c0); t = rman_get_bustag(sc->sc_resources.io_base[0]); h = rman_get_bushandle(sc->sc_resources.io_base[0]); /* get signature bytes */ b0 = bus_space_read_1(t, h, 0); b1 = bus_space_read_1(t, h, 1); b2 = bus_space_read_1(t, h, 2); /* check signature bytes */ if(b0 != 0x51) { printf("isic%d: Error, signature 1 0x%x != 0x51 for Teles S0/16!\n", unit, b0); isic_detach_common(dev); return(ENXIO); } if(b1 != 0x93) { printf("isic%d: Error, signature 2 0x%x != 0x93 for Teles S0/16!\n", unit, b1); isic_detach_common(dev); return(ENXIO); } if((b2 != 0x1e) && (b2 != 0x1f)) { printf("isic%d: Error, signature 3 0x%x != 0x1e or 0x1f for Teles S0/16!\n", unit, b2); isic_detach_common(dev); return(ENXIO); } /* get our irq */ if(!(sc->sc_resources.irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_resources.irq_rid, 0ul, ~0ul, 1, RF_ACTIVE))) { printf("isic%d: Could not allocate irq for Teles S0/16.\n", unit); isic_detach_common(dev); return ENXIO; } /* register interupt routine */ bus_setup_intr(dev, sc->sc_resources.irq, INTR_TYPE_NET, (void(*)(void *))(isicintr), sc, &ih); /* get the irq number */ sc->sc_irq = rman_get_start(sc->sc_resources.irq); /* check IRQ validity */ if((intr_no[sc->sc_irq]) == 1) { printf("isic%d: Error, invalid IRQ [%d] specified for Teles S0/16!\n", unit, sc->sc_irq); isic_detach_common(dev); return(ENXIO); } return (0); }
static int le_pci_attach(device_t dev) { struct le_pci_softc *lesc; struct lance_softc *sc; int error, i; lesc = device_get_softc(dev); sc = &lesc->sc_am79900.lsc; pci_enable_busmaster(dev); pci_enable_io(dev, PCIM_CMD_PORTEN); lesc->sc_rrid = PCIR_BAR(0); lesc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &lesc->sc_rrid, RF_ACTIVE); if (lesc->sc_rres == NULL) { device_printf(dev, "cannot allocate registers\n"); error = ENXIO; goto fail_mtx; } lesc->sc_regt = rman_get_bustag(lesc->sc_rres); lesc->sc_regh = rman_get_bushandle(lesc->sc_rres); lesc->sc_irid = 0; if ((lesc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &lesc->sc_irid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "cannot allocate interrupt\n"); error = ENXIO; goto fail_rres; } error = bus_dma_tag_create( NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ &lesc->sc_pdmat); if (error != 0) { device_printf(dev, "cannot allocate parent DMA tag\n"); goto fail_ires; } sc->sc_memsize = PCNET_MEMSIZE; /* * For Am79C970A, Am79C971 and Am79C978 the init block must be 2-byte * aligned and the ring descriptors must be 16-byte aligned when using * a 32-bit software style. */ error = bus_dma_tag_create( lesc->sc_pdmat, /* parent */ 16, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->sc_memsize, /* maxsize */ 1, /* nsegments */ sc->sc_memsize, /* maxsegsize */ BUS_DMA_WAITOK, /* flags */ &lesc->sc_dmat); if (error != 0) { device_printf(dev, "cannot allocate buffer DMA tag\n"); goto fail_pdtag; } error = bus_dmamem_alloc(lesc->sc_dmat, (void **)&sc->sc_mem, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &lesc->sc_dmam); if (error != 0) { device_printf(dev, "cannot allocate DMA buffer memory\n"); goto fail_dtag; } sc->sc_addr = 0; error = bus_dmamap_load(lesc->sc_dmat, lesc->sc_dmam, sc->sc_mem, sc->sc_memsize, le_pci_dma_callback, sc, 0); if (error != 0 || sc->sc_addr == 0) { device_printf(dev, "cannot load DMA buffer map\n"); goto fail_dmem; } sc->sc_flags = LE_BSWAP; sc->sc_conf3 = 0; sc->sc_mediastatus = NULL; switch (pci_get_device(dev)) { case AMD_PCNET_HOME: sc->sc_mediachange = le_pci_mediachange; sc->sc_supmedia = le_home_supmedia; sc->sc_nsupmedia = sizeof(le_home_supmedia) / sizeof(int); sc->sc_defaultmedia = le_home_supmedia[0]; break; default: sc->sc_mediachange = le_pci_mediachange; sc->sc_supmedia = le_pci_supmedia; sc->sc_nsupmedia = sizeof(le_pci_supmedia) / sizeof(int); sc->sc_defaultmedia = le_pci_supmedia[0]; } /* * Extract the physical MAC address from the ROM. */ for (i = 0; i < sizeof(sc->sc_enaddr); i++) sc->sc_enaddr[i] = bus_space_read_1(lesc->sc_regt, lesc->sc_regh, i); sc->sc_copytodesc = lance_copytobuf_contig; sc->sc_copyfromdesc = lance_copyfrombuf_contig; sc->sc_copytobuf = lance_copytobuf_contig; sc->sc_copyfrombuf = lance_copyfrombuf_contig; sc->sc_zerobuf = lance_zerobuf_contig; sc->sc_rdcsr = le_pci_rdcsr; sc->sc_wrcsr = le_pci_wrcsr; sc->sc_hwreset = le_pci_hwreset; sc->sc_hwinit = NULL; sc->sc_hwintr = NULL; sc->sc_nocarrier = NULL; error = am79900_config(&lesc->sc_am79900, device_get_name(dev), device_get_unit(dev)); if (error != 0) { device_printf(dev, "cannot attach Am79900\n"); goto fail_dmap; } error = bus_setup_intr(dev, lesc->sc_ires, INTR_MPSAFE, am79900_intr, sc, &lesc->sc_ih, sc->ifp->if_serializer); if (error != 0) { device_printf(dev, "cannot set up interrupt\n"); goto fail_am79900; } sc->ifp->if_cpuid = ithread_cpuid(rman_get_start(lesc->sc_ires)); KKASSERT(sc->ifp->if_cpuid >= 0 && sc->ifp->if_cpuid < ncpus); return (0); fail_am79900: am79900_detach(&lesc->sc_am79900); fail_dmap: bus_dmamap_unload(lesc->sc_dmat, lesc->sc_dmam); fail_dmem: bus_dmamem_free(lesc->sc_dmat, sc->sc_mem, lesc->sc_dmam); fail_dtag: bus_dma_tag_destroy(lesc->sc_dmat); fail_pdtag: bus_dma_tag_destroy(lesc->sc_pdmat); fail_ires: bus_release_resource(dev, SYS_RES_IRQ, lesc->sc_irid, lesc->sc_ires); fail_rres: bus_release_resource(dev, SYS_RES_IOPORT, lesc->sc_rrid, lesc->sc_rres); fail_mtx: return (error); }
/* * Bus read (single) operations. */ u_int8_t bs_through_bs_r_1(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t offset) { return bus_space_read_1(t->bs_base, bsh, offset); }
/* * Calibrate card (some boards are overclocked and need scaling) */ static void auich_calibrate(struct auich_softc *sc) { struct timeval t1, t2; uint8_t ociv, nciv; uint64_t wait_us; uint32_t actual_48k_rate, bytes, ac97rate; void *temp_buffer; struct auich_dma *p; u_int rate; /* * Grab audio from input for fixed interval and compare how * much we actually get with what we expect. Interval needs * to be sufficiently short that no interrupts are * generated. */ /* Force the codec to a known state first. */ sc->codec_if->vtbl->set_clock(sc->codec_if, 48000); rate = sc->sc_ac97_clock = 48000; sc->codec_if->vtbl->set_rate(sc->codec_if, AC97_REG_PCM_LR_ADC_RATE, &rate); /* Setup a buffer */ bytes = 64000; temp_buffer = auich_allocm(sc, AUMODE_RECORD, bytes, M_DEVBUF, M_WAITOK); for (p = sc->sc_dmas; p && KERNADDR(p) != temp_buffer; p = p->next) continue; if (p == NULL) { printf("auich_calibrate: bad address %p\n", temp_buffer); return; } sc->pcmi.dmalist[0].base = DMAADDR(p); sc->pcmi.dmalist[0].len = (bytes >> sc->sc_sample_shift); /* * our data format is stereo, 16 bit so each sample is 4 bytes. * assuming we get 48000 samples per second, we get 192000 bytes/sec. * we're going to start recording with interrupts disabled and measure * the time taken for one block to complete. we know the block size, * we know the time in microseconds, we calculate the sample rate: * * actual_rate [bps] = bytes / (time [s] * 4) * actual_rate [bps] = (bytes * 1000000) / (time [us] * 4) * actual_rate [Hz] = (bytes * 250000) / time [us] */ /* prepare */ ociv = bus_space_read_1(sc->iot, sc->aud_ioh, ICH_PCMI + ICH_CIV); bus_space_write_4(sc->iot, sc->aud_ioh, ICH_PCMI + ICH_BDBAR, sc->sc_cddma + ICH_PCMI_OFF(0)); bus_space_write_1(sc->iot, sc->aud_ioh, ICH_PCMI + ICH_LVI, (0 - 1) & ICH_LVI_MASK); /* start */ microtime(&t1); bus_space_write_1(sc->iot, sc->aud_ioh, ICH_PCMI + ICH_CTRL, ICH_RPBM); /* wait */ nciv = ociv; do { microtime(&t2); if (t2.tv_sec - t1.tv_sec > 1) break; nciv = bus_space_read_1(sc->iot, sc->aud_ioh, ICH_PCMI + ICH_CIV); } while (nciv == ociv); microtime(&t2); /* stop */ bus_space_write_1(sc->iot, sc->aud_ioh, ICH_PCMI + ICH_CTRL, 0); /* reset */ DELAY(100); bus_space_write_1(sc->iot, sc->aud_ioh, ICH_PCMI + ICH_CTRL, ICH_RR); /* turn time delta into us */ wait_us = ((t2.tv_sec - t1.tv_sec) * 1000000) + t2.tv_usec - t1.tv_usec; auich_freem(sc, temp_buffer, M_DEVBUF); if (nciv == ociv) { printf("%s: ac97 link rate calibration timed out after %" PRIu64 " us\n", device_xname(sc->sc_dev), wait_us); return; } actual_48k_rate = (bytes * UINT64_C(250000)) / wait_us; if (actual_48k_rate < 50000) ac97rate = 48000; else ac97rate = ((actual_48k_rate + 500) / 1000) * 1000; printf("%s: measured ac97 link rate at %d Hz", device_xname(sc->sc_dev), actual_48k_rate); if (ac97rate != actual_48k_rate) printf(", will use %d Hz", ac97rate); printf("\n"); sc->sc_ac97_clock = ac97rate; }
void snc_nec16_read_eeprom(bus_space_tag_t iot, bus_space_handle_t ioh, u_int8_t *data) { u_int8_t n, val, bit; /* Read bytes from EEPROM; two bytes per an iteration. */ for (n = 0; n < SNEC_EEPROM_SIZE / 2; n++) { /* select SNECR_EEP */ bus_space_write_1(iot, ioh, SNEC_ADDR, SNECR_EEP); bus_space_write_1(iot, ioh, SNEC_CTRLB, 0x00); DELAY(SNEC_EEP_DELAY); /* Start EEPROM access. */ bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | SNECR_EEP_SK); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | SNECR_EEP_DI); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | SNECR_EEP_SK | SNECR_EEP_DI); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | SNECR_EEP_DI); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | SNECR_EEP_SK | SNECR_EEP_DI); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | SNECR_EEP_SK); DELAY(SNEC_EEP_DELAY); /* Pass the iteration count to the chip. */ for (bit = 0x20; bit != 0x00; bit >>= 1) { bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | ((n & bit) ? SNECR_EEP_DI : 0x00)); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | SNECR_EEP_SK | ((n & bit) ? SNECR_EEP_DI : 0x00)); DELAY(SNEC_EEP_DELAY); } bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS); (void) bus_space_read_1(iot, ioh, SNEC_CTRLB); /* ACK */ DELAY(SNEC_EEP_DELAY); /* Read a byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | SNECR_EEP_SK); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS); if (bus_space_read_1(iot, ioh, SNEC_CTRLB) & SNECR_EEP_DO) val |= bit; } *data++ = val; /* Read one more byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS | SNECR_EEP_SK); DELAY(SNEC_EEP_DELAY); bus_space_write_1(iot, ioh, SNEC_CTRLB, SNECR_EEP_CS); if (bus_space_read_1(iot, ioh, SNEC_CTRLB) & SNECR_EEP_DO) val |= bit; } *data++ = val; bus_space_write_1(iot, ioh, SNEC_CTRLB, 0x00); DELAY(SNEC_EEP_DELAY); } #ifdef SNCDEBUG /* Report what we got. */ data -= SNEC_EEPROM_SIZE; log(LOG_INFO, "%s: EEPROM:" " %02x%02x%02x%02x %02x%02x%02x%02x -" " %02x%02x%02x%02x %02x%02x%02x%02x -" " %02x%02x%02x%02x %02x%02x%02x%02x -" " %02x%02x%02x%02x %02x%02x%02x%02x\n", "snc_nec16_read_eeprom", data[ 0], data[ 1], data[ 2], data[ 3], data[ 4], data[ 5], data[ 6], data[ 7], data[ 8], data[ 9], data[10], data[11], data[12], data[13], data[14], data[15], data[16], data[17], data[18], data[19], data[20], data[21], data[22], data[23], data[24], data[25], data[26], data[27], data[28], data[29], data[30], data[31]); #endif }
static __inline uint8_t i2c_read_reg(struct i2c_softc *sc, bus_size_t off) { return (bus_space_read_1(sc->bst, sc->bsh, off)); }
__stdcall static uint8_t hal_readport_uchar(uint8_t *port) { return(bus_space_read_1(NDIS_BUS_SPACE_IO, 0x0, (bus_size_t)port)); }
static int dektec_write (struct cdev *cdev, struct uio *uio, int ioflag) { int error = 0; struct dektec_sc *sc = cdev->si_drv1; mtx_lock (&sc->dektec_mtx); while (uio->uio_resid > 0) { struct plx_dma_buffer *dma_buffer = &sc->tx_buffer; int amount = MIN (TX_BUFFER_SIZE, uio->uio_resid); error = bus_dmamap_load (sc->buffer_dma_tag, sc->tx_buffer.buffer_dmamap, sc->tx_buffer.buffer, amount, buffer_dmamap_cb, dma_buffer, BUS_DMA_NOWAIT); if (error) goto bus_dmamap_load; for (int i = 0; i < sc->tx_buffer.segment_count; i++) { struct plx_dma_desc *desc = (struct plx_dma_desc *) &sc->tx_buffer.desc_list[i]; desc->local_addr = sc->tx_base + DTA1XX_TX_REG_FIFO_FIRST; desc->next_desc &= ~PCI905X_DMADPR_DIROFTFR; } error = uiomove (sc->tx_buffer.buffer, amount, uio); if (error) goto uiomove; bus_dmamap_sync (sc->desc_dma_tag, sc->tx_buffer.desc_dmamap, BUS_DMASYNC_PREREAD); bus_dmamap_sync (sc->buffer_dma_tag, sc->tx_buffer.buffer_dmamap, BUS_DMASYNC_PREREAD); mtx_lock_spin (&sc->tx_buffer.buffer_mtx); /* FIXME check for DMA_COMPLETE */ sc->tx_buffer.flags &= ~DMA_COMPLETE; sc->tx_buffer.flags |= DMA_BUSY; mtx_unlock_spin (&sc->tx_buffer.buffer_mtx); if (sc->legacy_plx) { /* DMA0 is used for writing */ bus_space_write_4 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_DESC_PTR, sc->tx_buffer.desc_ds_addr | PCI905X_DMADPR_DESCLOC_PCI); bus_space_read_4 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_DESC_PTR); bus_space_write_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_COMMAND_STAT, PCI905X_DMACSR_ENABLE | PCI905X_DMACSR_START); bus_space_read_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_COMMAND_STAT); } else { /* DMA1 is used for writing */ bus_space_write_4 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_DMA_DESC, sc->tx_buffer.desc_ds_addr | PCI905X_DMADPR_DESCLOC_PCI); bus_space_read_4 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_DMA_DESC); bus_space_write_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_CMD_STAT, PCI905X_DMACSR_ENABLE | PCI905X_DMACSR_START); bus_space_read_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_CMD_STAT); } mtx_lock (&sc->tx_buffer.event_mtx); while ((sc->tx_buffer.flags & DMA_BUSY) == DMA_BUSY) { if ((error = cv_timedwait (&sc->tx_buffer.event_cv, &sc->tx_buffer.event_mtx, RX_TIMEOUT))) break; } mtx_unlock (&sc->tx_buffer.event_mtx); unload_tx_buffer (sc); } goto done; uiomove: unload_tx_buffer (sc); bus_dmamap_load: done: mtx_unlock (&sc->dektec_mtx); return error; }
static uint8_t ehci_bs_r_1(void *t, bus_space_handle_t h, bus_size_t o) { return bus_space_read_1((bus_space_tag_t) t, h, 0x100 + (o &~ 3) + (3 - (o & 3))); }
static void dektec_intr (void *parameter) { struct dektec_sc *sc = parameter; uint32_t status; if (dta1xx_gen_status_reg_get_per_int (sc->dta_base_bt, sc->dta_base_bh, sc->gen_base)) { dta1xx_gen_status_reg_clr_per_int (sc->dta_base_bt, sc->dta_base_bh, sc->gen_base); if (sc->model == BOARD_MODEL_145 || sc->model == BOARD_MODEL_2145) dta1xx_gen_pulse_watchdog (sc->dta_base_bt, sc->dta_base_bh, sc->gen_base); } /* FIXME use PCI905X_INTCSR_DMA0_INTACT / PCI905X_INTCSR_DMA1_INTACT */ if (sc->legacy_plx) { /* DMA0 is used for writing */ status = bus_space_read_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_COMMAND_STAT); if ((status & PCI905X_DMACSR_DONE) == PCI905X_DMACSR_DONE) { if ((sc->tx_buffer.flags & DMA_BUSY) == DMA_BUSY) { bus_space_write_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_COMMAND_STAT, PCI905X_DMACSR_ENABLE | PCI905X_DMACSR_CLEARINT); bus_space_read_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA0_COMMAND_STAT); mtx_lock_spin (&sc->tx_buffer.buffer_mtx); sc->tx_buffer.flags &= ~DMA_BUSY; sc->tx_buffer.flags |= DMA_COMPLETE; mtx_unlock_spin (&sc->tx_buffer.buffer_mtx); } } /* DMA1 is used for reading */ status = bus_space_read_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA1_COMMAND_STAT); if ((status & PCI905X_DMACSR_DONE) == PCI905X_DMACSR_DONE) { if ((sc->rx_buffer.flags & DMA_BUSY) == DMA_BUSY) { bus_space_write_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA1_COMMAND_STAT, PCI905X_DMACSR_ENABLE | PCI905X_DMACSR_CLEARINT); bus_space_read_1 (sc->plx_base_bt, sc->plx_base_bh, PCI905X_DMA1_COMMAND_STAT); mtx_lock_spin (&sc->rx_buffer.buffer_mtx); sc->rx_buffer.flags &= ~DMA_BUSY; sc->rx_buffer.flags |= DMA_COMPLETE; mtx_unlock_spin (&sc->rx_buffer.buffer_mtx); } } } else { /* DMA1 is used for writing */ status = bus_space_read_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_CMD_STAT); /* PCI905X_DMACSR_DONE */ if ((status & DTA1XX_DMACSR_INTACT) != 0) { bus_space_write_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_CMD_STAT, PCI905X_DMACSR_ENABLE | PCI905X_DMACSR_CLEARINT); bus_space_read_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base1 + REG_CMD_STAT); mtx_lock_spin (&sc->tx_buffer.buffer_mtx); sc->tx_buffer.flags &= ~DMA_BUSY; sc->tx_buffer.flags |= DMA_COMPLETE; mtx_unlock_spin (&sc->tx_buffer.buffer_mtx); } /* DMA0 is used for reading */ status = bus_space_read_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base0 + REG_CMD_STAT); if ((status & DTA1XX_DMACSR_INTACT) != 0) { bus_space_write_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base0 + REG_CMD_STAT, PCI905X_DMACSR_ENABLE | PCI905X_DMACSR_CLEARINT); bus_space_read_1 (sc->dta_base_bt, sc->dta_base_bh, sc->dma_base0 + REG_CMD_STAT); mtx_lock_spin (&sc->rx_buffer.buffer_mtx); sc->rx_buffer.flags &= ~DMA_BUSY; sc->rx_buffer.flags |= DMA_COMPLETE; mtx_unlock_spin (&sc->rx_buffer.buffer_mtx); } } dta1xx_rx_set_rx_status_reg (sc->dta_base_bt, sc->dta_base_bh, sc->rx_base, DTA1XX_RXSTAT_PERINT | DTA1XX_RXSTAT_OVFINT | DTA1XX_RXSTAT_SYNCINT | DTA1XX_RXSTAT_THRINT | DTA1XX_RXSTAT_RATEOVFINT); dta1xx_tx_set_tx_status_reg (sc->dta_base_bt, sc->dta_base_bh, sc->tx_base, DTA1XX_TXSTAT_PERINT | DTA1XX_TXSTAT_UFLINT | DTA1XX_TXSTAT_SYNCINT | DTA1XX_TXSTAT_THRINT | DTA1XX_TXSTAT_SHORTINT); if (SEL_WAITING (&sc->selinfo)) selwakeup (&sc->selinfo); taskqueue_enqueue (taskqueue_swi, &sc->task); }
int com_activate(struct device *self, int act) { struct com_softc *sc = (struct com_softc *)self; int s, rv = 0; s = spltty(); switch (act) { case DVACT_ACTIVATE: break; case DVACT_DEACTIVATE: #ifdef KGDB if (sc->sc_hwflags & (COM_HW_CONSOLE|COM_HW_KGDB)) { #else if (sc->sc_hwflags & COM_HW_CONSOLE) { #endif /* KGDB */ rv = EBUSY; break; } if (sc->disable != NULL && sc->enabled != 0) { (*sc->disable)(sc); sc->enabled = 0; } break; } splx(s); return (rv); } int comopen(dev_t dev, int flag, int mode, struct proc *p) { int unit = DEVUNIT(dev); struct com_softc *sc; bus_space_tag_t iot; bus_space_handle_t ioh; struct tty *tp; int s; int error = 0; if (unit >= com_cd.cd_ndevs) return ENXIO; sc = com_cd.cd_devs[unit]; if (!sc) return ENXIO; #ifdef KGDB /* * If this is the kgdb port, no other use is permitted. */ if (ISSET(sc->sc_hwflags, COM_HW_KGDB)) return (EBUSY); #endif /* KGDB */ s = spltty(); if (!sc->sc_tty) { tp = sc->sc_tty = ttymalloc(1000000); } else tp = sc->sc_tty; splx(s); tp->t_oproc = comstart; tp->t_param = comparam; tp->t_dev = dev; if (!ISSET(tp->t_state, TS_ISOPEN)) { SET(tp->t_state, TS_WOPEN); ttychars(tp); tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; #ifdef COM_CONSOLE if (ISSET(sc->sc_hwflags, COM_HW_CONSOLE)) { tp->t_cflag = comconscflag; tp->t_ispeed = tp->t_ospeed = comconsrate; } else #endif { tp->t_cflag = TTYDEF_CFLAG; tp->t_ispeed = tp->t_ospeed = comdefaultrate; } if (ISSET(sc->sc_swflags, COM_SW_CLOCAL)) SET(tp->t_cflag, CLOCAL); if (ISSET(sc->sc_swflags, COM_SW_CRTSCTS)) SET(tp->t_cflag, CRTSCTS); if (ISSET(sc->sc_swflags, COM_SW_MDMBUF)) SET(tp->t_cflag, MDMBUF); tp->t_lflag = TTYDEF_LFLAG; s = spltty(); sc->sc_initialize = 1; comparam(tp, &tp->t_termios); ttsetwater(tp); sc->sc_ibufp = sc->sc_ibuf = sc->sc_ibufs[0]; sc->sc_ibufhigh = sc->sc_ibuf + COM_IHIGHWATER; sc->sc_ibufend = sc->sc_ibuf + COM_IBUFSIZE; iot = sc->sc_iot; ioh = sc->sc_ioh; /* * Wake up the sleepy heads. */ switch (sc->sc_uarttype) { case COM_UART_ST16650: case COM_UART_ST16650V2: bus_space_write_1(iot, ioh, com_lcr, LCR_EFR); bus_space_write_1(iot, ioh, com_efr, EFR_ECB); bus_space_write_1(iot, ioh, com_ier, 0); bus_space_write_1(iot, ioh, com_efr, 0); bus_space_write_1(iot, ioh, com_lcr, 0); break; case COM_UART_TI16750: bus_space_write_1(iot, ioh, com_ier, 0); break; case COM_UART_PXA2X0: bus_space_write_1(iot, ioh, com_ier, IER_EUART); break; } if (ISSET(sc->sc_hwflags, COM_HW_FIFO)) { u_int8_t fifo = FIFO_ENABLE|FIFO_RCV_RST|FIFO_XMT_RST; u_int8_t lcr; if (tp->t_ispeed <= 1200) fifo |= FIFO_TRIGGER_1; else if (tp->t_ispeed <= 38400) fifo |= FIFO_TRIGGER_4; else fifo |= FIFO_TRIGGER_8; if (sc->sc_uarttype == COM_UART_TI16750) { fifo |= FIFO_ENABLE_64BYTE; lcr = bus_space_read_1(iot, ioh, com_lcr); bus_space_write_1(iot, ioh, com_lcr, lcr | LCR_DLAB); } /* * (Re)enable and drain FIFOs. * * Certain SMC chips cause problems if the FIFOs are * enabled while input is ready. Turn off the FIFO * if necessary to clear the input. Test the input * ready bit after enabling the FIFOs to handle races * between enabling and fresh input. * * Set the FIFO threshold based on the receive speed. */ for (;;) { bus_space_write_1(iot, ioh, com_fifo, 0); delay(100); (void) bus_space_read_1(iot, ioh, com_data); bus_space_write_1(iot, ioh, com_fifo, fifo | FIFO_RCV_RST | FIFO_XMT_RST); delay(100); if(!ISSET(bus_space_read_1(iot, ioh, com_lsr), LSR_RXRDY)) break; } if (sc->sc_uarttype == COM_UART_TI16750) bus_space_write_1(iot, ioh, com_lcr, lcr); } /* Flush any pending I/O. */ while (ISSET(bus_space_read_1(iot, ioh, com_lsr), LSR_RXRDY)) (void) bus_space_read_1(iot, ioh, com_data); /* You turn me on, baby! */ sc->sc_mcr = MCR_DTR | MCR_RTS; if (!ISSET(sc->sc_hwflags, COM_HW_NOIEN)) SET(sc->sc_mcr, MCR_IENABLE); bus_space_write_1(iot, ioh, com_mcr, sc->sc_mcr); sc->sc_ier = IER_ERXRDY | IER_ERLS | IER_EMSC; #ifdef COM_PXA2X0 if (sc->sc_uarttype == COM_UART_PXA2X0) sc->sc_ier |= IER_EUART | IER_ERXTOUT; #endif bus_space_write_1(iot, ioh, com_ier, sc->sc_ier); sc->sc_msr = bus_space_read_1(iot, ioh, com_msr); if (ISSET(sc->sc_swflags, COM_SW_SOFTCAR) || DEVCUA(dev) || ISSET(sc->sc_msr, MSR_DCD) || ISSET(tp->t_cflag, MDMBUF)) SET(tp->t_state, TS_CARR_ON); else CLR(tp->t_state, TS_CARR_ON); #ifdef COM_PXA2X0 if (sc->sc_uarttype == COM_UART_PXA2X0 && ISSET(sc->sc_hwflags, COM_HW_SIR)) { bus_space_write_1(iot, ioh, com_isr, ISR_RECV); #ifdef __zaurus__ scoop_set_irled(1); #endif } #endif } else if (ISSET(tp->t_state, TS_XCLUDE) && suser(p, 0) != 0) return EBUSY; else s = spltty(); if (DEVCUA(dev)) { if (ISSET(tp->t_state, TS_ISOPEN)) { /* Ah, but someone already is dialed in... */ splx(s); return EBUSY; } sc->sc_cua = 1; /* We go into CUA mode. */ } else { /* tty (not cua) device; wait for carrier if necessary. */ if (ISSET(flag, O_NONBLOCK)) { if (sc->sc_cua) { /* Opening TTY non-blocking... but the CUA is busy. */ splx(s); return EBUSY; } } else { while (sc->sc_cua || (!ISSET(tp->t_cflag, CLOCAL) && !ISSET(tp->t_state, TS_CARR_ON))) { SET(tp->t_state, TS_WOPEN); error = ttysleep(tp, &tp->t_rawq, TTIPRI | PCATCH, ttopen, 0); /* * If TS_WOPEN has been reset, that means the cua device * has been closed. We don't want to fail in that case, * so just go around again. */ if (error && ISSET(tp->t_state, TS_WOPEN)) { CLR(tp->t_state, TS_WOPEN); if (!sc->sc_cua && !ISSET(tp->t_state, TS_ISOPEN)) compwroff(sc); splx(s); return error; } } } } splx(s); return (*linesw[tp->t_line].l_open)(dev, tp, p); }
/* * Grovel the STI ROM image. */ int sti_check_rom(struct sti_pci_softc *spc, struct pci_attach_args *pa) { struct sti_softc *sc = &spc->sc_base; pcireg_t address, mask; bus_space_handle_t romh; bus_size_t romsize, subsize, stiromsize; bus_addr_t selected, offs, suboffs; u_int32_t tmp; int i; int rc; /* sort of inline sti_pci_enable_rom(sc) */ address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, ~PCI_ROM_ENABLE); mask = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG); address |= PCI_ROM_ENABLE; pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG, address); sc->sc_flags |= STI_ROM_ENABLED; /* * Map the complete ROM for now. */ romsize = PCI_ROM_SIZE(mask); rc = bus_space_map(pa->pa_memt, PCI_ROM_ADDR(address), romsize, 0, &romh); sti_pci_disable_rom(sc); if (rc != 0) { printf("%s: can't map PCI ROM (%d)\n", sc->sc_dev.dv_xname, rc); goto fail2; } /* * Iterate over the ROM images, pick the best candidate. */ selected = (bus_addr_t)-1; for (offs = 0; offs < romsize; offs += subsize) { sti_pci_enable_rom(sc); /* * Check for a valid ROM header. */ tmp = bus_space_read_4(pa->pa_memt, romh, offs + 0); tmp = letoh32(tmp); if (tmp != 0x55aa0000) { sti_pci_disable_rom(sc); if (offs == 0) { printf("%s: invalid PCI ROM header signature" " (%08x)\n", sc->sc_dev.dv_xname, tmp); rc = EINVAL; } break; } /* * Check ROM type. */ tmp = bus_space_read_4(pa->pa_memt, romh, offs + 4); tmp = letoh32(tmp); if (tmp != 0x00000001) { /* 1 == STI ROM */ sti_pci_disable_rom(sc); if (offs == 0) { printf("%s: invalid PCI ROM type (%08x)\n", sc->sc_dev.dv_xname, tmp); rc = EINVAL; } break; } subsize = (bus_addr_t)bus_space_read_2(pa->pa_memt, romh, offs + 0x0c); subsize <<= 9; #ifdef STIDEBUG sti_pci_disable_rom(sc); printf("ROM offset %08lx size %08lx type %08x", offs, subsize, tmp); sti_pci_enable_rom(sc); #endif /* * Check for a valid ROM data structure. * We do not need it except to know what architecture the ROM * code is for. */ suboffs = offs +(bus_addr_t)bus_space_read_2(pa->pa_memt, romh, offs + 0x18); tmp = bus_space_read_4(pa->pa_memt, romh, suboffs + 0); tmp = letoh32(tmp); if (tmp != 0x50434952) { /* PCIR */ sti_pci_disable_rom(sc); if (offs == 0) { printf("%s: invalid PCI data signature" " (%08x)\n", sc->sc_dev.dv_xname, tmp); rc = EINVAL; } else { #ifdef STIDEBUG printf(" invalid PCI data signature %08x\n", tmp); #endif continue; } } tmp = bus_space_read_1(pa->pa_memt, romh, suboffs + 0x14); sti_pci_disable_rom(sc); #ifdef STIDEBUG printf(" code %02x", tmp); #endif switch (tmp) { #ifdef __hppa__ case 0x10: if (selected == (bus_addr_t)-1) selected = offs; break; #endif #ifdef __i386__ case 0x00: if (selected == (bus_addr_t)-1) selected = offs; break; #endif default: #ifdef STIDEBUG printf(" (wrong architecture)"); #endif break; } #ifdef STIDEBUG if (selected == offs) printf(" -> SELECTED"); printf("\n"); #endif } if (selected == (bus_addr_t)-1) { if (rc == 0) { printf("%s: found no ROM with correct microcode" " architecture\n", sc->sc_dev.dv_xname); rc = ENOEXEC; } goto fail; } /* * Read the STI region BAR assignments. */ sti_pci_enable_rom(sc); offs = selected + (bus_addr_t)bus_space_read_2(pa->pa_memt, romh, selected + 0x0e); for (i = 0; i < STI_REGION_MAX; i++) { rc = sti_readbar(sc, pa, i, bus_space_read_1(pa->pa_memt, romh, offs + i)); if (rc != 0) goto fail; } /* * Find out where the STI ROM itself lies, and its size. */ offs = selected + (bus_addr_t)bus_space_read_4(pa->pa_memt, romh, selected + 0x08); stiromsize = (bus_addr_t)bus_space_read_4(pa->pa_memt, romh, offs + 0x18); stiromsize = letoh32(stiromsize); sti_pci_disable_rom(sc); /* * Replace our mapping with a smaller mapping of only the area * we are interested in. */ bus_space_unmap(pa->pa_memt, romh, romsize); rc = bus_space_map(pa->pa_memt, PCI_ROM_ADDR(address) + offs, stiromsize, 0, &spc->sc_romh); if (rc != 0) { printf("%s: can't map STI ROM (%d)\n", sc->sc_dev.dv_xname, rc); goto fail2; } return (0); fail: bus_space_unmap(pa->pa_memt, romh, romsize); fail2: sti_pci_disable_rom(sc); return (rc); }
static inline uint8_t cuda_read_reg(struct cuda_softc *sc, int offset) { return bus_space_read_1(sc->sc_memt, sc->sc_memh, offset); }
int sxiuart_intr(void *arg) { struct sxiuart_softc *sc = arg; bus_space_tag_t iot = sc->sc_iot; bus_space_handle_t ioh = sc->sc_ioh; struct tty *tp; uint32_t cnt; uint8_t c, iir, lsr, msr, delta; uint8_t *p; iir = bus_space_read_1(iot, ioh, SXIUART_IIR); if ((iir & IIR_IMASK) == IIR_BUSY) { (void)bus_space_read_1(iot, ioh, SXIUART_USR); return (0); } if (ISSET(iir, IIR_NOPEND)) return (0); if (sc->sc_tty == NULL) return (0); tp = sc->sc_tty; cnt = 0; loop: lsr = bus_space_read_1(iot, ioh, SXIUART_LSR); if (ISSET(lsr, LSR_RXRDY)) { if (cnt == 0) { p = sc->sc_ibufp; softintr_schedule(sc->sc_si); } cnt++; c = bus_space_read_1(iot, ioh, SXIUART_RBR); if (ISSET(lsr, LSR_BI)) { #if defined(DDB) if (ISSET(sc->sc_hwflags, COM_HW_CONSOLE)) { if (db_console) Debugger(); goto loop; } #endif c = 0; } if (p >= sc->sc_ibufend) { sc->sc_floods++; if (sc->sc_errors++ == 0) timeout_add_sec(&sc->sc_diag_tmo, 60); } else { *p++ = c; *p++ = lsr; if (p == sc->sc_ibufhigh && ISSET(tp->t_cflag, CRTSCTS)) { /* XXX */ CLR(sc->sc_mcr, MCR_RTS); bus_space_write_1(iot, ioh, SXIUART_MCR, sc->sc_mcr); } } goto loop; } else if (cnt > 0) sc->sc_ibufp = p; msr = bus_space_read_1(iot, ioh, SXIUART_MSR); if (msr != sc->sc_msr) { delta = msr ^ sc->sc_msr; ttytstamp(tp, sc->sc_msr & MSR_CTS, msr & MSR_CTS, sc->sc_msr & MSR_DCD, msr & MSR_DCD); sc->sc_msr = msr; if (ISSET(delta, MSR_DCD)) { if (!ISSET(sc->sc_swflags, COM_SW_SOFTCAR) && (*linesw[tp->t_line].l_modem)(tp, ISSET(msr, MSR_DCD)) == 0) { CLR(sc->sc_mcr, sc->sc_dtr); bus_space_write_1(iot, ioh, SXIUART_MCR, sc->sc_mcr); } } if (ISSET(delta & msr, MSR_CTS) && ISSET(tp->t_cflag, CRTSCTS)) (*linesw[tp->t_line].l_start)(tp); } if (ISSET(tp->t_state, TS_BUSY) && ISSET(lsr, LSR_TXRDY)) { CLR(tp->t_state, TS_BUSY | TS_FLUSH); if (sc->sc_halt > 0) wakeup(&tp->t_outq); (*linesw[tp->t_line].l_start)(tp); } iir = bus_space_read_1(iot, ioh, SXIUART_IIR); if (ISSET(iir, IIR_NOPEND)) goto done; cnt = 0; goto loop; done: return (1); }
/* * Probe for a supported board. */ static int dpt_isa_probe(struct isa_attach_args *ia, int iobase) { struct eata_cfg ec; bus_space_handle_t ioh; bus_space_tag_t iot; int i, j, stat, irq, drq; u_int16_t *p; iot = ia->ia_iot; if (bus_space_map(iot, iobase, DPT_ISA_IOSIZE, 0, &ioh) != 0) return(0); /* * Assumuing the DPT BIOS reset the board, we shouldn't need to * re-do it here. The tests below should weed out non-EATA devices * before we start poking any registers. */ for (i = 1000; i; i--) { if ((bus_space_read_1(iot, ioh, HA_STATUS) & HA_ST_READY) != 0) break; DELAY(2000); } if (i == 0) goto bad; while((((stat = bus_space_read_1(iot, ioh, HA_STATUS)) != (HA_ST_READY|HA_ST_SEEK_COMPLETE)) && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR)) && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ))) || (dpt_isa_wait(ioh, iot, HA_ST_BUSY, 0))) /* RAID drives still spinning up? */ if (bus_space_read_1(iot, ioh, HA_ERROR) != 'D' || bus_space_read_1(iot, ioh, HA_ERROR + 1) != 'P' || bus_space_read_1(iot, ioh, HA_ERROR + 2) != 'T') goto bad; /* * At this point we can be confident that we are dealing with a DPT * HBA. Issue the read-config command and wait for the data to * appear. XXX We shouldn't be doing this with PIO, but it makes it * a lot easier as no DMA setup is required. */ bus_space_write_1(iot, ioh, HA_COMMAND, CP_PIO_GETCFG); memset(&ec, 0, sizeof(ec)); i = ((uintptr_t)&((struct eata_cfg *)0)->ec_cfglen + sizeof(ec.ec_cfglen)) >> 1; p = (u_int16_t *)&ec; if (dpt_isa_wait(ioh, iot, 0xFF, HA_ST_DATA_RDY)) goto bad; /* Begin reading */ while (i--) *p++ = bus_space_read_stream_2(iot, ioh, HA_DATA); if ((i = ec.ec_cfglen) > (sizeof(struct eata_cfg) - (uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen)) - sizeof(ec.ec_cfglen))) i = sizeof(struct eata_cfg) - (uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen)) - sizeof(ec.ec_cfglen); j = i + (uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen)) + sizeof(ec.ec_cfglen); i >>= 1; while (i--) *p++ = bus_space_read_stream_2(iot, ioh, HA_DATA); /* Flush until we have read 512 bytes. */ i = (512 - j + 1) >> 1; while (i--) bus_space_read_stream_2(iot, ioh, HA_DATA); /* Puke if we don't like the returned configuration data. */ if ((bus_space_read_1(iot, ioh, HA_STATUS) & HA_ST_ERROR) != 0 || memcmp(ec.ec_eatasig, "EATA", 4) != 0 || (ec.ec_feat0 & (EC_F0_HBA_VALID | EC_F0_DMA_SUPPORTED)) != (EC_F0_HBA_VALID | EC_F0_DMA_SUPPORTED)) goto bad; /* * Which DMA channel to use: if it was hardwired in the kernel * configuration, use that value. If the HBA told us, use that * value. Otherwise, puke. */ if ((drq = ia->ia_drq[0].ir_drq) == ISA_UNKNOWN_DRQ) { int dmanum = ((ec.ec_feat1 & EC_F1_DMA_NUM_MASK) >> EC_F1_DMA_NUM_SHIFT); if ((ec.ec_feat0 & EC_F0_DMA_NUM_VALID) == 0 || dmanum > 3) goto bad; drq = "\0\7\6\5"[dmanum]; }
static void ne_isapnp_attach(struct device *parent, struct device *self, void *aux) { struct ne_isapnp_softc * const isc = (struct ne_isapnp_softc *)self; struct ne2000_softc * const nsc = &isc->sc_ne2000; struct dp8390_softc * const dsc = &nsc->sc_dp8390; struct isa_attach_args * const ipa = aux; bus_space_tag_t nict; bus_space_handle_t nich; bus_space_tag_t asict; bus_space_handle_t asich; const char *typestr; int netype; nict = ipa->ia_iot; nich = ipa->ipa_io[0].h; asict = nict; if (bus_space_subregion(nict, nich, NE2000_ASIC_OFFSET, NE2000_ASIC_NPORTS, &asich)) { printf("%s: can't subregion i/o space\n", dsc->sc_dev.dv_xname); return; } dsc->sc_regt = nict; dsc->sc_regh = nich; nsc->sc_asict = asict; nsc->sc_asich = asich; /* * Detect it again, so we can print some information about the * interface. */ netype = ne2000_detect(nsc); switch (netype) { case NE2000_TYPE_NE1000: typestr = "NE1000"; break; case NE2000_TYPE_NE2000: typestr = "NE2000"; /* * Check for a Realtek 8019. */ bus_space_write_1(nict, nich, ED_P0_CR, ED_CR_PAGE_0 | ED_CR_STP); if (bus_space_read_1(nict, nich, NERTL_RTL0_8019ID0) == RTL0_8019ID0 && bus_space_read_1(nict, nich, NERTL_RTL0_8019ID1) == RTL0_8019ID1) { typestr = "NE2000 (RTL8019)"; dsc->sc_mediachange = rtl80x9_mediachange; dsc->sc_mediastatus = rtl80x9_mediastatus; dsc->init_card = rtl80x9_init_card; dsc->sc_media_init = rtl80x9_media_init; } break; default: printf(": where did the card go?!\n"); return; } printf(": %s", typestr); /* This interface is always enabled. */ dsc->sc_enabled = 1; /* * Do generic NE2000 attach. This will read the station address * from the EEPROM. */ ne2000_attach(nsc, NULL); /* Establish the interrupt handler. */ isc->sc_ih = isa_intr_establish(ipa->ia_ic, ipa->ipa_irq[0].num, IST_EDGE, IPL_NET, dp8390_intr, dsc, dsc->sc_dev.dv_xname); if (isc->sc_ih == NULL) printf(": couldn't establish interrupt handler\n"); }
static int atkbdc_isa_probe(device_t dev) { struct resource *port0; struct resource *port1; u_long start; u_long count; int error; int rid; #if defined(__i386__) || defined(__amd64__) bus_space_tag_t tag; bus_space_handle_t ioh1; volatile int i; register_t flags; #endif /* check PnP IDs */ if (ISA_PNP_PROBE(device_get_parent(dev), dev, atkbdc_ids) == ENXIO) return ENXIO; device_set_desc(dev, "Keyboard controller (i8042)"); /* * Adjust I/O port resources. * The AT keyboard controller uses two ports (a command/data port * 0x60 and a status port 0x64), which may be given to us in * one resource (0x60 through 0x64) or as two separate resources * (0x60 and 0x64). Some brain-damaged ACPI BIOS has reversed * command/data port and status port. Furthermore, /boot/device.hints * may contain just one port, 0x60. We shall adjust resource settings * so that these two ports are available as two separate resources * in correct order. */ device_quiet(dev); rid = 0; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, &start, &count) != 0) return ENXIO; if (start == IO_KBD + KBD_STATUS_PORT) { start = IO_KBD; count++; } if (count > 1) /* adjust the count and/or start port */ bus_set_resource(dev, SYS_RES_IOPORT, rid, start, 1); port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port0 == NULL) return ENXIO; rid = 1; if (bus_get_resource(dev, SYS_RES_IOPORT, rid, NULL, NULL) != 0) bus_set_resource(dev, SYS_RES_IOPORT, 1, start + KBD_STATUS_PORT, 1); port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (port1 == NULL) { bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); return ENXIO; } #if defined(__i386__) || defined(__amd64__) /* * Check if we really have AT keyboard controller. Poll status * register until we get "all clear" indication. If no such * indication comes, it probably means that there is no AT * keyboard controller present. Give up in such case. Check relies * on the fact that reading from non-existing in/out port returns * 0xff on i386. May or may not be true on other platforms. */ tag = rman_get_bustag(port0); ioh1 = rman_get_bushandle(port1); flags = intr_disable(); for (i = 0; i != 65535; i++) { if ((bus_space_read_1(tag, ioh1, 0) & 0x2) == 0) break; } intr_restore(flags); if (i == 65535) { bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, port1); if (bootverbose) device_printf(dev, "AT keyboard controller not found\n"); return ENXIO; } #endif device_verbose(dev); error = atkbdc_probe_unit(device_get_unit(dev), port0, port1); if (error == 0) bus_generic_probe(dev); bus_release_resource(dev, SYS_RES_IOPORT, 0, port0); bus_release_resource(dev, SYS_RES_IOPORT, 1, port1); return error; }
static int wdc_atapi_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq) { struct atac_softc *atac = chp->ch_atac; struct wdc_softc *wdc = CHAN_TO_WDC(chp); struct wdc_regs *wdr = &wdc->regs[chp->ch_channel]; struct scsipi_xfer *sc_xfer = xfer->c_cmd; struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive]; int len, phase, i, retries=0; int ire; #if NATA_DMA int error; #endif #if NATA_DMA || NATA_PIOBM int dma_flags = 0; #endif void *cmd; ATADEBUG_PRINT(("wdc_atapi_intr %s:%d:%d\n", device_xname(atac->atac_dev), chp->ch_channel, drvp->drive), DEBUG_INTR); /* Is it not a transfer, but a control operation? */ if (drvp->state < READY) { printf("%s:%d:%d: bad state %d in wdc_atapi_intr\n", device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, drvp->state); panic("wdc_atapi_intr: bad state"); } /* * If we missed an interrupt in a PIO transfer, reset and restart. * Don't try to continue transfer, we may have missed cycles. */ if ((xfer->c_flags & (C_TIMEOU | C_DMA)) == C_TIMEOU) { sc_xfer->error = XS_TIMEOUT; wdc_atapi_reset(chp, xfer); return 1; } #if NATA_PIOBM /* Transfer-done interrupt for busmastering PIO operation */ if ((xfer->c_flags & C_PIOBM) && (chp->ch_flags & ATACH_PIOBM_WAIT)) { chp->ch_flags &= ~ATACH_PIOBM_WAIT; /* restore transfer length */ len = xfer->c_bcount; if (xfer->c_lenoff < 0) len += xfer->c_lenoff; if (sc_xfer->xs_control & XS_CTL_DATA_IN) goto end_piobm_datain; else goto end_piobm_dataout; } #endif /* Ack interrupt done in wdc_wait_for_unbusy */ if (wdc->select) wdc->select(chp, xfer->c_drive); bus_space_write_1(wdr->cmd_iot, wdr->cmd_iohs[wd_sdh], 0, WDSD_IBM | (xfer->c_drive << 4)); if (wdc_wait_for_unbusy(chp, (irq == 0) ? sc_xfer->timeout : 0, AT_POLL) == WDCWAIT_TOUT) { if (irq && (xfer->c_flags & C_TIMEOU) == 0) return 0; /* IRQ was not for us */ printf("%s:%d:%d: device timeout, c_bcount=%d, c_skip=%d\n", device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive, xfer->c_bcount, xfer->c_skip); #if NATA_DMA if (xfer->c_flags & C_DMA) { ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0); } #endif sc_xfer->error = XS_TIMEOUT; wdc_atapi_reset(chp, xfer); return 1; } if (wdc->irqack) wdc->irqack(chp); #if NATA_DMA /* * If we missed an IRQ and were using DMA, flag it as a DMA error * and reset device. */ if ((xfer->c_flags & C_TIMEOU) && (xfer->c_flags & C_DMA)) { ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0); sc_xfer->error = XS_RESET; wdc_atapi_reset(chp, xfer); return (1); } #endif /* * if the request sense command was aborted, report the short sense * previously recorded, else continue normal processing */ #if NATA_DMA || NATA_PIOBM if (xfer->c_flags & (C_DMA | C_PIOBM)) dma_flags = (sc_xfer->xs_control & XS_CTL_DATA_IN) ? WDC_DMA_READ : 0; #endif again: len = bus_space_read_1(wdr->cmd_iot, wdr->cmd_iohs[wd_cyl_lo], 0) + 256 * bus_space_read_1(wdr->cmd_iot, wdr->cmd_iohs[wd_cyl_hi], 0); ire = bus_space_read_1(wdr->cmd_iot, wdr->cmd_iohs[wd_ireason], 0); phase = (ire & (WDCI_CMD | WDCI_IN)) | (chp->ch_status & WDCS_DRQ); ATADEBUG_PRINT(("wdc_atapi_intr: c_bcount %d len %d st 0x%x err 0x%x " "ire 0x%x :", xfer->c_bcount, len, chp->ch_status, chp->ch_error, ire), DEBUG_INTR); switch (phase) { case PHASE_CMDOUT: cmd = sc_xfer->cmd; ATADEBUG_PRINT(("PHASE_CMDOUT\n"), DEBUG_INTR); #if NATA_DMA /* Init the DMA channel if necessary */ if (xfer->c_flags & C_DMA) { error = (*wdc->dma_init)(wdc->dma_arg, chp->ch_channel, xfer->c_drive, xfer->c_databuf, xfer->c_bcount, dma_flags); if (error) { if (error == EINVAL) { /* * We can't do DMA on this transfer * for some reason. Fall back to * PIO. */ xfer->c_flags &= ~C_DMA; error = 0; } else { sc_xfer->error = XS_DRIVER_STUFFUP; break; } } } #endif /* send packet command */ /* Commands are 12 or 16 bytes long. It's 32-bit aligned */ wdc->dataout_pio(chp, drvp->drive_flags, cmd, sc_xfer->cmdlen); #if NATA_DMA /* Start the DMA channel if necessary */ if (xfer->c_flags & C_DMA) { (*wdc->dma_start)(wdc->dma_arg, chp->ch_channel, xfer->c_drive); chp->ch_flags |= ATACH_DMA_WAIT; } #endif if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) { chp->ch_flags |= ATACH_IRQ_WAIT; } return 1; case PHASE_DATAOUT: /* write data */ ATADEBUG_PRINT(("PHASE_DATAOUT\n"), DEBUG_INTR); #if NATA_DMA if ((sc_xfer->xs_control & XS_CTL_DATA_OUT) == 0 || (xfer->c_flags & C_DMA) != 0) { printf("wdc_atapi_intr: bad data phase DATAOUT\n"); if (xfer->c_flags & C_DMA) { ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0); } sc_xfer->error = XS_TIMEOUT; wdc_atapi_reset(chp, xfer); return 1; } #endif xfer->c_lenoff = len - xfer->c_bcount; if (xfer->c_bcount < len) { printf("wdc_atapi_intr: warning: write only " "%d of %d requested bytes\n", xfer->c_bcount, len); len = xfer->c_bcount; } #if NATA_PIOBM if (xfer->c_flags & C_PIOBM) { /* start the busmastering PIO */ (*wdc->piobm_start)(wdc->dma_arg, chp->ch_channel, xfer->c_drive, xfer->c_skip, len, WDC_PIOBM_XFER_IRQ); chp->ch_flags |= ATACH_DMA_WAIT | ATACH_IRQ_WAIT | ATACH_PIOBM_WAIT; return 1; } #endif wdc->dataout_pio(chp, drvp->drive_flags, (char *)xfer->c_databuf + xfer->c_skip, len); #if NATA_PIOBM end_piobm_dataout: #endif for (i = xfer->c_lenoff; i > 0; i -= 2) bus_space_write_2(wdr->cmd_iot, wdr->cmd_iohs[wd_data], 0, 0); xfer->c_skip += len; xfer->c_bcount -= len; if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) { chp->ch_flags |= ATACH_IRQ_WAIT; } return 1; case PHASE_DATAIN: /* Read data */ ATADEBUG_PRINT(("PHASE_DATAIN\n"), DEBUG_INTR); #if NATA_DMA if ((sc_xfer->xs_control & XS_CTL_DATA_IN) == 0 || (xfer->c_flags & C_DMA) != 0) { printf("wdc_atapi_intr: bad data phase DATAIN\n"); if (xfer->c_flags & C_DMA) { ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0); } sc_xfer->error = XS_TIMEOUT; wdc_atapi_reset(chp, xfer); return 1; } #endif xfer->c_lenoff = len - xfer->c_bcount; if (xfer->c_bcount < len) { printf("wdc_atapi_intr: warning: reading only " "%d of %d bytes\n", xfer->c_bcount, len); len = xfer->c_bcount; } #if NATA_PIOBM if (xfer->c_flags & C_PIOBM) { /* start the busmastering PIO */ (*wdc->piobm_start)(wdc->dma_arg, chp->ch_channel, xfer->c_drive, xfer->c_skip, len, WDC_PIOBM_XFER_IRQ); chp->ch_flags |= ATACH_DMA_WAIT | ATACH_IRQ_WAIT | ATACH_PIOBM_WAIT; return 1; } #endif wdc->datain_pio(chp, drvp->drive_flags, (char *)xfer->c_databuf + xfer->c_skip, len); #if NATA_PIOBM end_piobm_datain: #endif if (xfer->c_lenoff > 0) wdcbit_bucket(chp, xfer->c_lenoff); xfer->c_skip += len; xfer->c_bcount -= len; if ((sc_xfer->xs_control & XS_CTL_POLL) == 0) { chp->ch_flags |= ATACH_IRQ_WAIT; } return 1; case PHASE_ABORTED: case PHASE_COMPLETED: ATADEBUG_PRINT(("PHASE_COMPLETED\n"), DEBUG_INTR); #if NATA_DMA if (xfer->c_flags & C_DMA) { xfer->c_bcount -= sc_xfer->datalen; } #endif sc_xfer->resid = xfer->c_bcount; wdc_atapi_phase_complete(xfer); return(1); default: if (++retries<500) { DELAY(100); chp->ch_status = bus_space_read_1(wdr->cmd_iot, wdr->cmd_iohs[wd_status], 0); chp->ch_error = bus_space_read_1(wdr->cmd_iot, wdr->cmd_iohs[wd_error], 0); goto again; } printf("wdc_atapi_intr: unknown phase 0x%x\n", phase); if (chp->ch_status & WDCS_ERR) { sc_xfer->error = XS_SHORTSENSE; sc_xfer->sense.atapi_sense = chp->ch_error; } else { #if NATA_DMA if (xfer->c_flags & C_DMA) { ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0); } #endif sc_xfer->error = XS_RESET; wdc_atapi_reset(chp, xfer); return (1); } } ATADEBUG_PRINT(("wdc_atapi_intr: wdc_atapi_done() (end), error 0x%x " "sense 0x%x\n", sc_xfer->error, sc_xfer->sense.atapi_sense), DEBUG_INTR); wdc_atapi_done(chp, xfer); return (1); }
static int iir_pci_attach(device_t dev) { struct gdt_softc *gdt; struct resource *io = NULL, *irq = NULL; int retries, rid, error = 0; void *ih; u_int8_t protocol; /* map DPMEM */ rid = PCI_DPMEM; io = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (io == NULL) { device_printf(dev, "can't allocate register resources\n"); error = ENOMEM; goto err; } /* get IRQ */ rid = 0; irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (irq == NULL) { device_printf(dev, "can't find IRQ value\n"); error = ENOMEM; goto err; } gdt = device_get_softc(dev); gdt->sc_devnode = dev; gdt->sc_init_level = 0; gdt->sc_dpmemt = rman_get_bustag(io); gdt->sc_dpmemh = rman_get_bushandle(io); gdt->sc_dpmembase = rman_get_start(io); gdt->sc_hanum = device_get_unit(dev); gdt->sc_bus = pci_get_bus(dev); gdt->sc_slot = pci_get_slot(dev); gdt->sc_vendor = pci_get_vendor(dev); gdt->sc_device = pci_get_device(dev); gdt->sc_subdevice = pci_get_subdevice(dev); gdt->sc_class = GDT_MPR; /* no FC ctr. if (gdt->sc_device >= GDT_PCI_PRODUCT_FC) gdt->sc_class |= GDT_FC; */ /* initialize RP controller */ /* check and reset interface area */ bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC, htole32(GDT_MPR_MAGIC)); if (bus_space_read_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC) != htole32(GDT_MPR_MAGIC)) { printf("cannot access DPMEM at 0x%jx (shadowed?)\n", (uintmax_t)gdt->sc_dpmembase); error = ENXIO; goto err; } bus_space_set_region_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_I960_SZ, htole32(0), GDT_MPR_SZ >> 2); /* Disable everything */ bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_EDOOR_EN, bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_EDOOR_EN) | 4); bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_EDOOR, 0xff); bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_STATUS, 0); bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_CMD_INDEX, 0); bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_INFO, htole32(gdt->sc_dpmembase)); bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_CMD_INDX, 0xff); bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_LDOOR, 1); DELAY(20); retries = GDT_RETRIES; while (bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_STATUS) != 0xff) { if (--retries == 0) { printf("DEINIT failed\n"); error = ENXIO; goto err; } DELAY(1); } protocol = (uint8_t)le32toh(bus_space_read_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_INFO)); bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_STATUS, 0); if (protocol != GDT_PROTOCOL_VERSION) { printf("unsupported protocol %d\n", protocol); error = ENXIO; goto err; } /* special commnd to controller BIOS */ bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_INFO, htole32(0)); bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_INFO + sizeof (u_int32_t), htole32(0)); bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_INFO + 2 * sizeof (u_int32_t), htole32(1)); bus_space_write_4(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_INFO + 3 * sizeof (u_int32_t), htole32(0)); bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_CMD_INDX, 0xfe); bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_LDOOR, 1); DELAY(20); retries = GDT_RETRIES; while (bus_space_read_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_STATUS) != 0xfe) { if (--retries == 0) { printf("initialization error\n"); error = ENXIO; goto err; } DELAY(1); } bus_space_write_1(gdt->sc_dpmemt, gdt->sc_dpmemh, GDT_MPR_IC + GDT_S_STATUS, 0); gdt->sc_ic_all_size = GDT_MPR_SZ; gdt->sc_copy_cmd = gdt_mpr_copy_cmd; gdt->sc_get_status = gdt_mpr_get_status; gdt->sc_intr = gdt_mpr_intr; gdt->sc_release_event = gdt_mpr_release_event; gdt->sc_set_sema0 = gdt_mpr_set_sema0; gdt->sc_test_busy = gdt_mpr_test_busy; /* Allocate a dmatag representing the capabilities of this attachment */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignemnt*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/GDT_MAXSG, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &gdt->sc_parent_dmat) != 0) { error = ENXIO; goto err; } gdt->sc_init_level++; if (iir_init(gdt) != 0) { iir_free(gdt); error = ENXIO; goto err; } /* Register with the XPT */ iir_attach(gdt); /* associate interrupt handler */ if (bus_setup_intr( dev, irq, INTR_TYPE_CAM, NULL, iir_intr, gdt, &ih )) { device_printf(dev, "Unable to register interrupt handler\n"); error = ENXIO; goto err; } gdt_pci_enable_intr(gdt); return (0); err: if (irq) bus_release_resource( dev, SYS_RES_IRQ, 0, irq ); /* if (io) bus_release_resource( dev, SYS_RES_MEMORY, rid, io ); */ return (error); }
static int clkbrd_attach(device_t dev) { struct clkbrd_softc *sc; int i, slots; uint8_t r; sc = device_get_softc(dev); sc->sc_dev = dev; for (i = CLKBRD_CF; i <= CLKBRD_CLKVER; i++) { sc->sc_rid[i] = i; sc->sc_res[i] = bus_alloc_resource_any(sc->sc_dev, SYS_RES_MEMORY, &sc->sc_rid[i], RF_ACTIVE); if (sc->sc_res[i] == NULL) { if (i != CLKBRD_CLKVER) { device_printf(sc->sc_dev, "could not allocate resource %d\n", i); goto fail; } continue; } sc->sc_bt[i] = rman_get_bustag(sc->sc_res[i]); sc->sc_bh[i] = rman_get_bushandle(sc->sc_res[i]); if (i == CLKBRD_CLKVER) sc->sc_flags |= CLKBRD_HAS_CLKVER; } slots = 4; r = bus_space_read_1(sc->sc_bt[CLKBRD_CLK], sc->sc_bh[CLKBRD_CLK], CLK_STS1); switch (r & CLK_STS1_SLOTS_MASK) { case CLK_STS1_SLOTS_16: slots = 16; break; case CLK_STS1_SLOTS_8: slots = 8; break; case CLK_STS1_SLOTS_4: if (sc->sc_flags & CLKBRD_HAS_CLKVER) { r = bus_space_read_1(sc->sc_bt[CLKBRD_CLKVER], sc->sc_bh[CLKBRD_CLKVER], CLKVER_SLOTS); if (r != 0 && (r & CLKVER_SLOTS_MASK) == CLKVER_SLOTS_PLUS) slots = 5; } } device_printf(sc->sc_dev, "Sun Enterprise Exx00 machine: %d slots\n", slots); sc->sc_clk_ctrl = bus_space_read_1(sc->sc_bt[CLKBRD_CLK], sc->sc_bh[CLKBRD_CLK], CLK_CTRL); sc->sc_led_dev = led_create(clkbrd_led_func, sc, "clockboard"); return (0); fail: clkbrd_free_resources(sc); return (ENXIO); }
int ichiic_i2c_exec(void *cookie, i2c_op_t op, i2c_addr_t addr, const void *cmdbuf, size_t cmdlen, void *buf, size_t len, int flags) { struct ichiic_softc *sc = cookie; u_int8_t *b; u_int8_t ctl, st; int retries; DPRINTF(("%s: exec: op %d, addr 0x%02x, cmdlen %d, len %d, " "flags 0x%02x\n", sc->sc_dev.dv_xname, op, addr, cmdlen, len, flags)); /* Wait for bus to be idle */ for (retries = 100; retries > 0; retries--) { st = bus_space_read_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_HS); if (!(st & ICH_SMB_HS_BUSY)) break; DELAY(ICHIIC_DELAY); } DPRINTF(("%s: exec: st 0x%b\n", sc->sc_dev.dv_xname, st, ICH_SMB_HS_BITS)); if (st & ICH_SMB_HS_BUSY) return (1); if (cold || sc->sc_poll) flags |= I2C_F_POLL; if (!I2C_OP_STOP_P(op) || cmdlen > 1 || len > 2) return (1); /* Setup transfer */ sc->sc_i2c_xfer.op = op; sc->sc_i2c_xfer.buf = buf; sc->sc_i2c_xfer.len = len; sc->sc_i2c_xfer.flags = flags; sc->sc_i2c_xfer.error = 0; /* Set slave address and transfer direction */ bus_space_write_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_TXSLVA, ICH_SMB_TXSLVA_ADDR(addr) | (I2C_OP_READ_P(op) ? ICH_SMB_TXSLVA_READ : 0)); b = (void *)cmdbuf; if (cmdlen > 0) /* Set command byte */ bus_space_write_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_HCMD, b[0]); if (I2C_OP_WRITE_P(op)) { /* Write data */ b = buf; if (len > 0) bus_space_write_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_HD0, b[0]); if (len > 1) bus_space_write_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_HD1, b[1]); } /* Set SMBus command */ if (len == 0) ctl = ICH_SMB_HC_CMD_BYTE; else if (len == 1) ctl = ICH_SMB_HC_CMD_BDATA; else if (len == 2) ctl = ICH_SMB_HC_CMD_WDATA; else panic("%s: unexpected len %zd", __func__, len); if ((flags & I2C_F_POLL) == 0) ctl |= ICH_SMB_HC_INTREN; /* Start transaction */ ctl |= ICH_SMB_HC_START; bus_space_write_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_HC, ctl); if (flags & I2C_F_POLL) { /* Poll for completion */ DELAY(ICHIIC_DELAY); for (retries = 1000; retries > 0; retries--) { st = bus_space_read_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_HS); if ((st & ICH_SMB_HS_BUSY) == 0) break; DELAY(ICHIIC_DELAY); } if (st & ICH_SMB_HS_BUSY) goto timeout; ichiic_intr(sc); } else { /* Wait for interrupt */ if (tsleep(sc, PRIBIO, "ichiic", ICHIIC_TIMEOUT * hz)) goto timeout; } if (sc->sc_i2c_xfer.error) return (1); return (0); timeout: /* * Transfer timeout. Kill the transaction and clear status bits. */ bus_space_write_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_HC, ICH_SMB_HC_KILL); DELAY(ICHIIC_DELAY); st = bus_space_read_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_HS); if ((st & ICH_SMB_HS_FAILED) == 0) printf("%s: abort failed, status 0x%b\n", sc->sc_dev.dv_xname, st, ICH_SMB_HS_BITS); bus_space_write_1(sc->sc_iot, sc->sc_ioh, ICH_SMB_HS, st); return (1); }
/* direct read */ uint8_t vsaudio_codec_dread(struct vsaudio_softc *sc, int reg) { return bus_space_read_1(sc->sc_bt, sc->sc_bh, reg << 2); }
uint8_t obio_read_1(int offset) { if (obio0 == NULL) return 0xff; return bus_space_read_1(obio0->sc_tag, obio0->sc_bh, offset); }
/* * Interrupt dispatch. */ int qeintr(void *arg) { struct qe_softc *sc = arg; bus_space_tag_t t = sc->sc_bustag; uint32_t qecstat, qestat; int r = 0; #if defined(SUN4U) || defined(__GNUC__) (void)&t; #endif /* Read QEC status and channel status */ qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT); #ifdef QEDEBUG if (sc->sc_debug) { printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat); } #endif /* Filter out status for this channel */ qecstat = qecstat >> (4 * sc->sc_channel); if ((qecstat & 0xf) == 0) return (r); qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT); #ifdef QEDEBUG if (sc->sc_debug) { char bits[64]; int i; bus_space_tag_t t1 = sc->sc_bustag; bus_space_handle_t mr = sc->sc_mr; snprintb(bits, sizeof(bits), QE_CR_STAT_BITS, qestat); printf("qe%d: intr: qestat=%s\n", sc->sc_channel, bits); printf("MACE registers:\n"); for (i = 0 ; i < 32; i++) { printf(" m[%d]=%x,", i, bus_space_read_1(t1, mr, i)); if (((i+1) & 7) == 0) printf("\n"); } } #endif if (qestat & QE_CR_STAT_ALLERRORS) { #ifdef QEDEBUG if (sc->sc_debug) { char bits[64]; snprintb(bits, sizeof(bits), QE_CR_STAT_BITS, qestat); printf("qe%d: eint: qestat=%s\n", sc->sc_channel, bits); } #endif r |= qe_eint(sc, qestat); if (r == -1) return (1); } if (qestat & QE_CR_STAT_TXIRQ) r |= qe_tint(sc); if (qestat & QE_CR_STAT_RXIRQ) r |= qe_rint(sc); return (r); }
int acpi_gasio(struct acpi_softc *sc, int iodir, int iospace, uint64_t address, int access_size, int len, void *buffer) { u_int8_t *pb; bus_space_handle_t ioh; struct acpi_mem_map mh; pci_chipset_tag_t pc; pcitag_t tag; bus_addr_t ioaddr; int reg, idx, ival, sval; dnprintf(50, "gasio: %.2x 0x%.8llx %s\n", iospace, address, (iodir == ACPI_IOWRITE) ? "write" : "read"); pb = (u_int8_t *)buffer; switch (iospace) { case GAS_SYSTEM_MEMORY: /* copy to/from system memory */ acpi_map(address, len, &mh); if (iodir == ACPI_IOREAD) memcpy(buffer, mh.va, len); else memcpy(mh.va, buffer, len); acpi_unmap(&mh); break; case GAS_SYSTEM_IOSPACE: /* read/write from I/O registers */ ioaddr = address; if (acpi_bus_space_map(sc->sc_iot, ioaddr, len, 0, &ioh) != 0) { printf("unable to map iospace\n"); return (-1); } for (reg = 0; reg < len; reg += access_size) { if (iodir == ACPI_IOREAD) { switch (access_size) { case 1: *(uint8_t *)(pb+reg) = bus_space_read_1( sc->sc_iot, ioh, reg); dnprintf(80, "os_in8(%llx) = %x\n", reg+address, *(uint8_t *)(pb+reg)); break; case 2: *(uint16_t *)(pb+reg) = bus_space_read_2( sc->sc_iot, ioh, reg); dnprintf(80, "os_in16(%llx) = %x\n", reg+address, *(uint16_t *)(pb+reg)); break; case 4: *(uint32_t *)(pb+reg) = bus_space_read_4( sc->sc_iot, ioh, reg); break; default: printf("rdio: invalid size %d\n", access_size); break; } } else { switch (access_size) { case 1: bus_space_write_1(sc->sc_iot, ioh, reg, *(uint8_t *)(pb+reg)); dnprintf(80, "os_out8(%llx,%x)\n", reg+address, *(uint8_t *)(pb+reg)); break; case 2: bus_space_write_2(sc->sc_iot, ioh, reg, *(uint16_t *)(pb+reg)); dnprintf(80, "os_out16(%llx,%x)\n", reg+address, *(uint16_t *)(pb+reg)); break; case 4: bus_space_write_4(sc->sc_iot, ioh, reg, *(uint32_t *)(pb+reg)); break; default: printf("wrio: invalid size %d\n", access_size); break; } } /* During autoconf some devices are still gathering * information. Delay here to give them an opportunity * to finish. During runtime we simply need to ignore * transient values. */ if (cold) delay(10000); } acpi_bus_space_unmap(sc->sc_iot, ioh, len, &ioaddr); break; case GAS_PCI_CFG_SPACE: /* format of address: * bits 00..15 = register * bits 16..31 = function * bits 32..47 = device * bits 48..63 = bus */ pc = NULL; tag = pci_make_tag(pc, ACPI_PCI_BUS(address), ACPI_PCI_DEV(address), ACPI_PCI_FN(address)); /* XXX: This is ugly. read-modify-write does a byte at a time */ reg = ACPI_PCI_REG(address); for (idx = reg; idx < reg+len; idx++) { ival = pci_conf_read(pc, tag, idx & ~0x3); if (iodir == ACPI_IOREAD) { *pb = ival >> (8 * (idx & 0x3)); } else { sval = *pb; ival &= ~(0xFF << (8* (idx & 0x3))); ival |= sval << (8* (idx & 0x3)); pci_conf_write(pc, tag, idx & ~0x3, ival); } pb++; }
void qeinit(struct qe_softc *sc) { struct ifnet *ifp = &sc->sc_ethercom.ec_if; bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t cr = sc->sc_cr; bus_space_handle_t mr = sc->sc_mr; struct qec_softc *qec = sc->sc_qec; uint32_t qecaddr; uint8_t *ea; int s; #if defined(SUN4U) || defined(__GNUC__) (void)&t; #endif s = splnet(); qestop(sc); /* * Allocate descriptor ring and buffers */ qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ); /* Channel registers: */ bus_space_write_4(t, cr, QE_CRI_RXDS, (uint32_t)sc->sc_rb.rb_rxddma); bus_space_write_4(t, cr, QE_CRI_TXDS, (uint32_t)sc->sc_rb.rb_txddma); bus_space_write_4(t, cr, QE_CRI_RIMASK, 0); bus_space_write_4(t, cr, QE_CRI_TIMASK, 0); bus_space_write_4(t, cr, QE_CRI_QMASK, 0); bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL); bus_space_write_4(t, cr, QE_CRI_CCNT, 0); bus_space_write_4(t, cr, QE_CRI_PIPG, 0); qecaddr = sc->sc_channel * qec->sc_msize; bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr); bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr); bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize); bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize); /* MACE registers: */ bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL); bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT); bus_space_write_1(t, mr, QE_MRI_RCVFC, 0); /* * Mask MACE's receive interrupt, since we're being notified * by the QEC after DMA completes. */ bus_space_write_1(t, mr, QE_MRI_IMR, QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM); bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS); bus_space_write_1(t, mr, QE_MRI_FIFOFC, QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 | QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU); bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP); /* * Station address */ ea = sc->sc_enaddr; bus_space_write_1(t, mr, QE_MRI_IAC, QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR); bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6); /* Apply media settings */ qe_ifmedia_upd(ifp); /* * Clear Logical address filter */ bus_space_write_1(t, mr, QE_MRI_IAC, QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8); bus_space_write_1(t, mr, QE_MRI_IAC, 0); /* Clear missed packet count (register cleared on read) */ (void)bus_space_read_1(t, mr, QE_MRI_MPC); #if 0 /* test register: */ bus_space_write_1(t, mr, QE_MRI_UTR, 0); #endif /* Reset multicast filter */ qe_mcreset(sc); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; splx(s); }
void ed_zbus_attach(device_t parent, device_t self, void *aux) { struct ed_zbus_softc *zsc = device_private(self); struct dp8390_softc *sc = &zsc->sc_dp8390; struct zbus_args *zap = aux; bus_space_handle_t promh; bus_addr_t memaddr, promaddr, regaddr; int i; zsc->sc_bst.base = (bus_addr_t)zap->va; zsc->sc_bst.absm = &amiga_bus_stride_1; if (zap->manid == HYDRA_MANID) { regaddr = HYDRA_REGADDR; memaddr = HYDRA_MEMADDR; promaddr = HYDRA_PROMADDR; } else { regaddr = ASDG_REGADDR; memaddr = ASDG_MEMADDR; promaddr = ASDG_PROMADDR; } sc->sc_dev = self; sc->sc_regt = &zsc->sc_bst; sc->sc_buft = &zsc->sc_bst; if (bus_space_map(sc->sc_regt, regaddr, 0x20, 0, &sc->sc_regh)) { aprint_error_dev(self, "can't map i/o space\n"); return; } if (bus_space_map(sc->sc_buft, memaddr, ED_ZBUS_MEMSIZE, 0, &sc->sc_bufh)) { aprint_error_dev(self, "can't map buffer space\n"); return; } /* SRAM buffer size is always 16K */ sc->mem_start = 0; sc->mem_size = ED_ZBUS_MEMSIZE; /* * Read the ethernet address from the PROM. * Interrupts must be inactive when reading the PROM, as the * interrupt line is shared with one of its address lines. */ NIC_PUT(sc->sc_regt, sc->sc_regh, ED_P0_IMR, 0x00); NIC_PUT(sc->sc_regt, sc->sc_regh, ED_P0_ISR, 0xff); if (bus_space_map(&zsc->sc_bst, promaddr, ETHER_ADDR_LEN * 2, 0, &promh) == 0) { for (i = 0; i < ETHER_ADDR_LEN; i++) sc->sc_enaddr[i] = bus_space_read_1(&zsc->sc_bst, promh, i * 2); bus_space_unmap(&zsc->sc_bst, promh, ETHER_ADDR_LEN * 2); } /* Initialize sc_reg_map[]. Registers have stride 2 on the bus. */ for (i = 0; i < 16; i++) sc->sc_reg_map[i] = i << 1; /* * Set 2 word FIFO threshold, no auto-init Remote DMA, * byte order 68k, word-wide DMA xfers. */ sc->dcr_reg = ED_DCR_FT0 | ED_DCR_WTS | ED_DCR_LS | ED_DCR_BOS; /* Remote DMA abort .*/ sc->cr_proto = ED_CR_RD2; /* * Override all functions which deal with the buffer, because * this implementation only allows 16-bit buffer accesses. */ sc->test_mem = ed_zbus_test_mem; sc->read_hdr = ed_zbus_read_hdr; sc->ring_copy = ed_zbus_ring_copy; sc->write_mbuf = ed_zbus_write_mbuf; sc->sc_flags = device_cfdata(self)->cf_flags; sc->is790 = 0; sc->sc_media_init = dp8390_media_init; sc->sc_enabled = 1; /* Do generic DS8390/WD83C690 config. */ if (dp8390_config(sc)) { bus_space_unmap(sc->sc_buft, sc->sc_bufh, ED_ZBUS_MEMSIZE); bus_space_unmap(sc->sc_regt, sc->sc_regh, 0x10); return; } /* establish level 2 interrupt handler */ zsc->sc_isr.isr_intr = dp8390_intr; zsc->sc_isr.isr_arg = sc; zsc->sc_isr.isr_ipl = 2; add_isr(&zsc->sc_isr); }
uint8_t sociic_read(struct sociic_softc *sc, bus_addr_t addr) { return (bus_space_read_1(sc->sc_iot, sc->sc_ioh, addr)); }