static void lehwreset(struct lance_softc *sc) { struct le_softc *lesc = (struct le_softc *)sc; struct lsi64854_softc *dma = lesc->sc_dma; uint32_t csr; u_int aui_bit; /* * Reset DMA channel. */ csr = L64854_GCSR(dma); aui_bit = csr & E_TP_AUI; DMA_RESET(dma); /* Write bits 24-31 of Lance address */ bus_space_write_4(dma->sc_bustag, dma->sc_regs, L64854_REG_ENBAR, lesc->sc_laddr & 0xff000000); DMA_ENINTR(dma); /* * Disable E-cache invalidates on chip writes. * Retain previous cable selection bit. */ csr = L64854_GCSR(dma); csr |= (E_DSBL_WR_INVAL | aui_bit); L64854_SCSR(dma, csr); delay(20000); /* must not touch le for 20ms */ }
static void esp_dma_stop(struct ncr53c9x_softc *sc) { struct esp_softc *esc = (struct esp_softc *)sc; L64854_SCSR(esc->sc_dma, L64854_GCSR(esc->sc_dma) & ~D_EN_DMA); }
static void le_dma_nocarrier(struct lance_softc *sc) { struct le_dma_softc *lesc = (struct le_dma_softc *)sc; /* * Check if the user has requested a certain cable type, and * if so, honor that request. */ if (L64854_GCSR(lesc->sc_dma) & E_TP_AUI) { switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) { case IFM_10_5: case IFM_AUTO: if_printf(sc->sc_ifp, "lost carrier on UTP port, " "switching to AUI port\n"); le_dma_setaui(sc); } } else { switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) { case IFM_10_T: case IFM_AUTO: if_printf(sc->sc_ifp, "lost carrier on AUI port, " "switching to UTP port\n"); le_dma_setutp(sc); } } }
static void lenocarrier(struct lance_softc *sc) { struct le_softc *lesc = (struct le_softc *)sc; /* * Check if the user has requested a certain cable type, and * if so, honor that request. */ printf("%s: lost carrier on ", device_xname(sc->sc_dev)); if (L64854_GCSR(lesc->sc_dma) & E_TP_AUI) { printf("UTP port"); switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) { case IFM_10_5: case IFM_AUTO: printf(", switching to AUI port"); lesetaui(sc); } } else { printf("AUI port"); switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) { case IFM_10_T: case IFM_AUTO: printf(", switching to UTP port"); lesetutp(sc); } } printf("\n"); }
static void le_dma_setaui(struct lance_softc *sc) { struct lsi64854_softc *dma = ((struct le_dma_softc *)sc)->sc_dma; L64854_SCSR(dma, L64854_GCSR(dma) & ~E_TP_AUI); DELAY(20000); /* We must not touch the LANCE chip for 20ms. */ }
/* * Finish attaching this DMA device. * Front-end must fill in these fields: * sc_bustag * sc_dmatag * sc_regs * sc_burst * sc_channel (one of SCSI, ENET, PP) * sc_client (one of SCSI, ENET, PP `soft_c' pointers) */ void lsi64854_attach(struct lsi64854_softc *sc) { uint32_t csr; /* Indirect functions */ switch (sc->sc_channel) { case L64854_CHANNEL_SCSI: sc->intr = lsi64854_scsi_intr; sc->setup = lsi64854_setup; break; case L64854_CHANNEL_ENET: sc->intr = lsi64854_enet_intr; break; case L64854_CHANNEL_PP: sc->setup = lsi64854_setup_pp; break; default: aprint_error(": unknown channel"); } sc->reset = lsi64854_reset; /* Allocate a dmamap */ if (bus_dmamap_create(sc->sc_dmatag, MAX_DMA_SZ, 1, MAX_DMA_SZ, 0, BUS_DMA_WAITOK, &sc->sc_dmamap) != 0) { aprint_error(": DMA map create failed\n"); return; } csr = L64854_GCSR(sc); sc->sc_rev = csr & L64854_DEVID; if (sc->sc_rev == DMAREV_HME) { return; } aprint_normal(": DMA rev "); switch (sc->sc_rev) { case DMAREV_0: aprint_normal("0"); break; case DMAREV_ESC: aprint_normal("esc"); break; case DMAREV_1: aprint_normal("1"); break; case DMAREV_PLUS: aprint_normal("1+"); break; case DMAREV_2: aprint_normal("2"); break; default: aprint_normal("unknown (0x%x)", sc->sc_rev); } DPRINTF(LDB_ANY, (", burst 0x%x, csr 0x%x", sc->sc_burst, csr)); aprint_normal("\n"); }
/* * Parallel port DMA interrupt. */ int lsi64854_pp_intr(void *arg) { struct lsi64854_softc *sc = arg; char bits[64]; int ret, trans, resid = 0; uint32_t csr; csr = L64854_GCSR(sc); DPRINTF(LDB_PP, ("%s: pp intr: addr 0x%x, csr %s\n", device_xname(sc->sc_dev), bus_space_read_4(sc->sc_bustag, sc->sc_regs, L64854_REG_ADDR), bitmask_snprintf(csr, PDMACSR_BITS, bits, sizeof(bits)))); if (csr & (P_ERR_PEND|P_SLAVE_ERR)) { resid = bus_space_read_4(sc->sc_bustag, sc->sc_regs, L64854_REG_CNT); printf("%s: pp error: resid %d csr=%s\n", device_xname(sc->sc_dev), resid, bitmask_snprintf(csr, PDMACSR_BITS, bits,sizeof(bits))); csr &= ~P_EN_DMA; /* Stop DMA */ /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */ csr |= P_INVALIDATE|P_SLAVE_ERR; L64854_SCSR(sc, csr); return 1; } ret = (csr & P_INT_PEND) != 0; if (sc->sc_active != 0) { DMA_DRAIN(sc, 0); resid = bus_space_read_4(sc->sc_bustag, sc->sc_regs, L64854_REG_CNT); } /* DMA has stopped */ csr &= ~D_EN_DMA; L64854_SCSR(sc, csr); sc->sc_active = 0; trans = sc->sc_dmasize - resid; if (trans < 0) { /* transferred < 0 ? */ trans = sc->sc_dmasize; } *sc->sc_dmalen -= trans; *sc->sc_dmaaddr += trans; if (sc->sc_dmamap->dm_nsegs > 0) { bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize, (csr & D_WRITE) != 0 ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap); } return ret != 0; }
/* * Parallel port DMA interrupt */ static int lsi64854_pp_intr(void *arg) { struct lsi64854_softc *sc = arg; bus_dma_tag_t dmat; bus_dmamap_t dmam; size_t dmasize; int ret, trans, resid = 0; uint32_t csr; csr = L64854_GCSR(sc); DPRINTF(LDB_PP, ("%s: addr 0x%x, csr %b\n", __func__, bus_read_4(sc->sc_res, L64854_REG_ADDR), csr, PDMACSR_BITS)); if ((csr & (P_ERR_PEND | P_SLAVE_ERR)) != 0) { resid = bus_read_4(sc->sc_res, L64854_REG_CNT); device_printf(sc->sc_dev, "error: resid %d csr=%b\n", resid, csr, PDMACSR_BITS); csr &= ~P_EN_DMA; /* Stop DMA. */ /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */ csr |= P_INVALIDATE | P_SLAVE_ERR; L64854_SCSR(sc, csr); return (-1); } ret = (csr & P_INT_PEND) != 0; if (sc->sc_active != 0) { DMA_DRAIN(sc, 0); resid = bus_read_4(sc->sc_res, L64854_REG_CNT); } /* DMA has stopped */ csr &= ~D_EN_DMA; L64854_SCSR(sc, csr); sc->sc_active = 0; dmasize = sc->sc_dmasize; trans = dmasize - resid; if (trans < 0) /* transferred < 0? */ trans = dmasize; *sc->sc_dmalen -= trans; *sc->sc_dmaaddr = (char *)*sc->sc_dmaaddr + trans; if (dmasize != 0) { dmat = sc->sc_buffer_dmat; dmam = sc->sc_dmamap; bus_dmamap_sync(dmat, dmam, (csr & D_WRITE) != 0 ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dmat, dmam); } return (ret != 0); }
void lesetaui(struct lance_softc *sc) { struct lsi64854_softc *dma = ((struct le_softc *)sc)->sc_dma; uint32_t csr; csr = L64854_GCSR(dma); csr &= ~E_TP_AUI; L64854_SCSR(dma, csr); delay(20000); /* must not touch le for 20ms */ }
/* * Setup a DMA transfer. */ static int lsi64854_setup_pp(struct lsi64854_softc *sc, void **addr, size_t *len, int datain, size_t *dmasize) { int error; uint32_t csr; DMA_FLUSH(sc, 0); sc->sc_dmaaddr = addr; sc->sc_dmalen = len; sc->sc_datain = datain; DPRINTF(LDB_PP, ("%s: pp start %ld@%p,%d\n", __func__, (long)*sc->sc_dmalen, *sc->sc_dmaaddr, datain != 0 ? 1 : 0)); KASSERT(*dmasize <= sc->sc_maxdmasize, ("%s: transfer size %ld too large", __func__, (long)*dmasize)); sc->sc_dmasize = *dmasize; DPRINTF(LDB_PP, ("%s: dmasize=%ld\n", __func__, (long)*dmasize)); /* Load the transfer buffer and program the DMA address. */ if (*dmasize != 0) { error = bus_dmamap_load(sc->sc_buffer_dmat, sc->sc_dmamap, *sc->sc_dmaaddr, *dmasize, lsi64854_map_pp, sc, BUS_DMA_NOWAIT); if (error != 0) return (error); } /* Setup the DMA control register. */ csr = L64854_GCSR(sc); csr &= ~L64854_BURST_SIZE; if (sc->sc_burst == 32) csr |= L64854_BURST_32; else if (sc->sc_burst == 16) csr |= L64854_BURST_16; else csr |= L64854_BURST_0; csr |= P_EN_DMA | P_INT_EN | P_EN_CNT; #if 0 /* This bit is read-only in PP csr register. */ if (datain != 0) csr |= P_WRITE; else csr &= ~P_WRITE; #endif L64854_SCSR(sc, csr); return (0); }
void lemediastatus(struct lance_softc *sc, struct ifmediareq *ifmr) { struct lsi64854_softc *dma = ((struct le_softc *)sc)->sc_dma; /* * Notify the world which media we're currently using. */ if (L64854_GCSR(dma) & E_TP_AUI) ifmr->ifm_active = IFM_ETHER|IFM_10_T; else ifmr->ifm_active = IFM_ETHER|IFM_10_5; }
/* * Pseudo (chained) interrupt to le driver to handle DMA errors. */ int lsi64854_enet_intr(void *arg) { struct lsi64854_softc *sc = arg; char bits[64]; uint32_t csr; static int dodrain = 0; int rv; csr = L64854_GCSR(sc); /* If the DMA logic shows an interrupt, claim it */ rv = ((csr & E_INT_PEND) != 0) ? 1 : 0; if (csr & (E_ERR_PEND|E_SLAVE_ERR)) { printf("%s: error: csr=%s\n", device_xname(sc->sc_dev), bitmask_snprintf(csr, EDMACSR_BITS, bits,sizeof(bits))); csr &= ~L64854_EN_DMA; /* Stop DMA */ /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */ csr |= E_INVALIDATE|E_SLAVE_ERR; L64854_SCSR(sc, csr); DMA_RESET(sc); dodrain = 1; return 1; } if (dodrain) { /* XXX - is this necessary with D_DSBL_WRINVAL on? */ int i = 10; csr |= E_DRAIN; L64854_SCSR(sc, csr); while (i-- > 0 && (L64854_GCSR(sc) & D_DRAINING)) delay(1); } return rv | (*sc->sc_intrchain)(sc->sc_intrchainarg); }
/* * Pseudo (chained) interrupt to le(4) driver to handle DMA errors */ static int lsi64854_enet_intr(void *arg) { struct lsi64854_softc *sc = arg; uint32_t csr; int i, rv; csr = L64854_GCSR(sc); /* If the DMA logic shows an interrupt, claim it */ rv = ((csr & E_INT_PEND) != 0) ? 1 : 0; if (csr & (E_ERR_PEND | E_SLAVE_ERR)) { device_printf(sc->sc_dev, "error: csr=%b\n", csr, EDMACSR_BITS); csr &= ~L64854_EN_DMA; /* Stop DMA. */ /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */ csr |= E_INVALIDATE | E_SLAVE_ERR; L64854_SCSR(sc, csr); /* Will be drained with the LE_C0_IDON interrupt. */ sc->sc_dodrain = 1; return (-1); } /* XXX - is this necessary with E_DSBL_WR_INVAL on? */ if (sc->sc_dodrain) { i = 10; csr |= E_DRAIN; L64854_SCSR(sc, csr); while (i-- > 0 && (L64854_GCSR(sc) & E_DRAINING)) DELAY(1); sc->sc_dodrain = 0; } return (rv); }
int lsi64854_detach(struct lsi64854_softc *sc) { if (sc->setup != NULL) { bus_dmamap_sync(sc->sc_buffer_dmat, sc->sc_dmamap, (L64854_GCSR(sc) & L64854_WRITE) != 0 ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); bus_dmamap_unload(sc->sc_buffer_dmat, sc->sc_dmamap); bus_dmamap_destroy(sc->sc_buffer_dmat, sc->sc_dmamap); bus_dma_tag_destroy(sc->sc_buffer_dmat); } return (0); }
/* * Pseudo (chained) interrupt from the esp driver to kick the * current running DMA transfer. Called from ncr53c9x_intr() * for now. * * return 1 if it was a DMA continue. */ static int lsi64854_scsi_intr(void *arg) { struct lsi64854_softc *sc = arg; struct ncr53c9x_softc *nsc = sc->sc_client; bus_dma_tag_t dmat; bus_dmamap_t dmam; size_t dmasize; int lxfer, resid, trans; uint32_t csr; csr = L64854_GCSR(sc); DPRINTF(LDB_SCSI, ("%s: addr 0x%x, csr %b\n", __func__, bus_read_4(sc->sc_res, L64854_REG_ADDR), csr, DDMACSR_BITS)); if (csr & (D_ERR_PEND | D_SLAVE_ERR)) { device_printf(sc->sc_dev, "error: csr=%b\n", csr, DDMACSR_BITS); csr &= ~D_EN_DMA; /* Stop DMA. */ /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */ csr |= D_INVALIDATE | D_SLAVE_ERR; L64854_SCSR(sc, csr); return (-1); } /* This is an "assertion" :) */ if (sc->sc_active == 0) panic("%s: DMA wasn't active", __func__); DMA_DRAIN(sc, 0); /* DMA has stopped */ csr &= ~D_EN_DMA; L64854_SCSR(sc, csr); sc->sc_active = 0; dmasize = sc->sc_dmasize; if (dmasize == 0) { /* A "Transfer Pad" operation completed. */ DPRINTF(LDB_SCSI, ("%s: discarded %d bytes (tcl=%d, " "tcm=%d)\n", __func__, NCR_READ_REG(nsc, NCR_TCL) | (NCR_READ_REG(nsc, NCR_TCM) << 8), NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM))); return (0); } resid = 0; /* * If a transfer onto the SCSI bus gets interrupted by the device * (e.g. for a SAVEPOINTER message), the data in the FIFO counts * as residual since the NCR53C9X counter registers get decremented * as bytes are clocked into the FIFO. */ if ((csr & D_WRITE) == 0 && (resid = (NCR_READ_REG(nsc, NCR_FFLAG) & NCRFIFO_FF)) != 0) { DPRINTF(LDB_SCSI, ("%s: empty esp FIFO of %d ", __func__, resid)); if (nsc->sc_rev == NCR_VARIANT_FAS366 && (NCR_READ_REG(nsc, NCR_CFG3) & NCRFASCFG3_EWIDE)) resid <<= 1; } if ((nsc->sc_espstat & NCRSTAT_TC) == 0) { lxfer = nsc->sc_features & NCR_F_LARGEXFER; /* * "Terminal count" is off, so read the residue * out of the NCR53C9X counter registers. */ resid += (NCR_READ_REG(nsc, NCR_TCL) | (NCR_READ_REG(nsc, NCR_TCM) << 8) | (lxfer != 0 ? (NCR_READ_REG(nsc, NCR_TCH) << 16) : 0)); if (resid == 0 && dmasize == 65536 && lxfer == 0) /* A transfer of 64k is encoded as TCL=TCM=0. */ resid = 65536; } trans = dmasize - resid; if (trans < 0) { /* transferred < 0? */ #if 0 /* * This situation can happen in perfectly normal operation * if the ESP is reselected while using DMA to select * another target. As such, don't print the warning. */ device_printf(sc->sc_dev, "xfer (%d) > req (%d)\n", trans, dmasize); #endif trans = dmasize; } DPRINTF(LDB_SCSI, ("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n", __func__, NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM), (nsc->sc_features & NCR_F_LARGEXFER) != 0 ? NCR_READ_REG(nsc, NCR_TCH) : 0, trans, resid)); if (dmasize != 0) { dmat = sc->sc_buffer_dmat; dmam = sc->sc_dmamap; bus_dmamap_sync(dmat, dmam, (csr & D_WRITE) != 0 ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dmat, dmam); } *sc->sc_dmalen -= trans; *sc->sc_dmaaddr = (char *)*sc->sc_dmaaddr + trans; #if 0 /* this is not normal operation just yet */ if (*sc->sc_dmalen == 0 || nsc->sc_phase != nsc->sc_prevphase) return (0); /* and again */ dma_start(sc, sc->sc_dmaaddr, sc->sc_dmalen, DMACSR(sc) & D_WRITE); return (1); #endif return (0); }
/* * setup a DMA transfer */ static int lsi64854_setup(struct lsi64854_softc *sc, void **addr, size_t *len, int datain, size_t *dmasize) { long bcnt; int error; uint32_t csr; DMA_FLUSH(sc, 0); #if 0 DMACSR(sc) &= ~D_INT_EN; #endif sc->sc_dmaaddr = addr; sc->sc_dmalen = len; sc->sc_datain = datain; KASSERT(*dmasize <= sc->sc_maxdmasize, ("%s: transfer size %ld too large", __func__, (long)*dmasize)); sc->sc_dmasize = *dmasize; DPRINTF(LDB_ANY, ("%s: dmasize=%ld\n", __func__, (long)*dmasize)); /* * XXX what length? */ if (sc->sc_rev == DMAREV_HME) { L64854_SCSR(sc, sc->sc_dmactl | L64854_RESET); L64854_SCSR(sc, sc->sc_dmactl); bus_write_4(sc->sc_res, L64854_REG_CNT, *dmasize); } /* * Load the transfer buffer and program the DMA address. * Note that the NCR53C9x core can't handle EINPROGRESS so we set * BUS_DMA_NOWAIT. */ if (*dmasize != 0) { error = bus_dmamap_load(sc->sc_buffer_dmat, sc->sc_dmamap, *sc->sc_dmaaddr, *dmasize, lsi64854_map_scsi, sc, BUS_DMA_NOWAIT); if (error != 0) return (error); } if (sc->sc_rev == DMAREV_ESC) { /* DMA ESC chip bug work-around */ bcnt = *dmasize; if (((bcnt + (long)*sc->sc_dmaaddr) & PAGE_MASK_8K) != 0) bcnt = roundup(bcnt, PAGE_SIZE_8K); bus_write_4(sc->sc_res, L64854_REG_CNT, bcnt); } /* Setup the DMA control register. */ csr = L64854_GCSR(sc); if (datain != 0) csr |= L64854_WRITE; else csr &= ~L64854_WRITE; csr |= L64854_INT_EN; if (sc->sc_rev == DMAREV_HME) csr |= (D_DSBL_SCSI_DRN | D_EN_DMA); L64854_SCSR(sc, csr); return (0); }
static void lsi64854_reset(struct lsi64854_softc *sc) { bus_dma_tag_t dmat; bus_dmamap_t dmam; uint32_t csr; DMA_FLUSH(sc, 1); csr = L64854_GCSR(sc); DPRINTF(LDB_ANY, ("%s: csr 0x%x\n", __func__, csr)); if (sc->sc_dmasize != 0) { dmat = sc->sc_buffer_dmat; dmam = sc->sc_dmamap; bus_dmamap_sync(dmat, dmam, (csr & D_WRITE) != 0 ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); bus_dmamap_unload(dmat, dmam); } if (sc->sc_rev == DMAREV_HME) L64854_SCSR(sc, csr | D_HW_RESET_FAS366); csr |= L64854_RESET; /* reset DMA */ L64854_SCSR(sc, csr); DELAY(200); /* > 10 Sbus clocks(?) */ /*DMAWAIT1(sc); why was this here? */ csr = L64854_GCSR(sc); csr &= ~L64854_RESET; /* de-assert reset line */ L64854_SCSR(sc, csr); DELAY(5); /* allow a few ticks to settle */ csr = L64854_GCSR(sc); csr |= L64854_INT_EN; /* enable interrupts */ if (sc->sc_rev > DMAREV_1 && sc->sc_channel == L64854_CHANNEL_SCSI) { if (sc->sc_rev == DMAREV_HME) csr |= D_TWO_CYCLE; else csr |= D_FASTER; } /* Set burst */ switch (sc->sc_rev) { case DMAREV_HME: case DMAREV_2: csr &= ~L64854_BURST_SIZE; if (sc->sc_burst == 32) csr |= L64854_BURST_32; else if (sc->sc_burst == 16) csr |= L64854_BURST_16; else csr |= L64854_BURST_0; break; case DMAREV_ESC: csr |= D_ESC_AUTODRAIN; /* Auto-drain */ if (sc->sc_burst == 32) csr &= ~D_ESC_BURST; else csr |= D_ESC_BURST; break; default: break; } L64854_SCSR(sc, csr); if (sc->sc_rev == DMAREV_HME) { bus_write_4(sc->sc_res, L64854_REG_ADDR, 0); sc->sc_dmactl = csr; } sc->sc_active = 0; DPRINTF(LDB_ANY, ("%s: done, csr 0x%x\n", __func__, csr)); }
/* * Finish attaching this DMA device. * Front-end must fill in these fields: * sc_res * sc_burst * sc_channel (one of SCSI, ENET, PP) * sc_client (one of SCSI, ENET, PP `soft_c' pointers) */ int lsi64854_attach(struct lsi64854_softc *sc) { bus_dma_lock_t *lockfunc; struct ncr53c9x_softc *nsc; void *lockfuncarg; uint32_t csr; int error; lockfunc = NULL; lockfuncarg = NULL; sc->sc_maxdmasize = MAX_DMA_SZ; switch (sc->sc_channel) { case L64854_CHANNEL_SCSI: nsc = sc->sc_client; if (NCR_LOCK_INITIALIZED(nsc) == 0) { device_printf(sc->sc_dev, "mutex not initialized\n"); return (ENXIO); } lockfunc = busdma_lock_mutex; lockfuncarg = &nsc->sc_lock; sc->sc_maxdmasize = nsc->sc_maxxfer; sc->intr = lsi64854_scsi_intr; sc->setup = lsi64854_setup; break; case L64854_CHANNEL_ENET: sc->intr = lsi64854_enet_intr; break; case L64854_CHANNEL_PP: sc->intr = lsi64854_pp_intr; sc->setup = lsi64854_setup_pp; break; default: device_printf(sc->sc_dev, "unknown channel\n"); } sc->reset = lsi64854_reset; if (sc->setup != NULL) { error = bus_dma_tag_create( sc->sc_parent_dmat, /* parent */ 1, BOUNDARY, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->sc_maxdmasize, /* maxsize */ 1, /* nsegments */ sc->sc_maxdmasize, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ lockfunc, lockfuncarg, /* lockfunc, lockfuncarg */ &sc->sc_buffer_dmat); if (error != 0) { device_printf(sc->sc_dev, "cannot allocate buffer DMA tag\n"); return (error); } error = bus_dmamap_create(sc->sc_buffer_dmat, 0, &sc->sc_dmamap); if (error != 0) { device_printf(sc->sc_dev, "DMA map create failed\n"); bus_dma_tag_destroy(sc->sc_buffer_dmat); return (error); } } csr = L64854_GCSR(sc); sc->sc_rev = csr & L64854_DEVID; if (sc->sc_rev == DMAREV_HME) return (0); device_printf(sc->sc_dev, "DMA rev. "); switch (sc->sc_rev) { case DMAREV_0: printf("0"); break; case DMAREV_ESC: printf("ESC"); break; case DMAREV_1: printf("1"); break; case DMAREV_PLUS: printf("1+"); break; case DMAREV_2: printf("2"); break; default: printf("unknown (0x%x)", sc->sc_rev); } DPRINTF(LDB_ANY, (", burst 0x%x, csr 0x%x", sc->sc_burst, csr)); printf("\n"); return (0); }
/* * setup a DMA transfer */ int lsi64854_setup(struct lsi64854_softc *sc, uint8_t **addr, size_t *len, int datain, size_t *dmasize) { uint32_t csr; DMA_FLUSH(sc, 0); #if 0 DMACSR(sc) &= ~D_INT_EN; #endif sc->sc_dmaaddr = addr; sc->sc_dmalen = len; /* * the rules say we cannot transfer more than the limit * of this DMA chip (64k for old and 16Mb for new), * and we cannot cross a 16Mb boundary. */ *dmasize = sc->sc_dmasize = min(*dmasize, DMAMAX((size_t)*sc->sc_dmaaddr)); DPRINTF(LDB_ANY, ("%s: dmasize = %ld\n", __func__, (long)sc->sc_dmasize)); /* * XXX what length? */ if (sc->sc_rev == DMAREV_HME) { L64854_SCSR(sc, sc->sc_dmactl | L64854_RESET); L64854_SCSR(sc, sc->sc_dmactl); bus_space_write_4(sc->sc_bustag, sc->sc_regs, L64854_REG_CNT, *dmasize); } /* Program the DMA address */ if (sc->sc_dmasize) { sc->sc_dvmaaddr = *sc->sc_dmaaddr; if (bus_dmamap_load(sc->sc_dmatag, sc->sc_dmamap, *sc->sc_dmaaddr, sc->sc_dmasize, NULL /* kernel address */, BUS_DMA_NOWAIT | BUS_DMA_STREAMING)) panic("%s: cannot allocate DVMA address", device_xname(sc->sc_dev)); bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize, datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); bus_space_write_4(sc->sc_bustag, sc->sc_regs, L64854_REG_ADDR, sc->sc_dmamap->dm_segs[0].ds_addr); } if (sc->sc_rev == DMAREV_ESC) { /* DMA ESC chip bug work-around */ long bcnt = sc->sc_dmasize; long eaddr = bcnt + (long)*sc->sc_dmaaddr; if ((eaddr & PGOFSET) != 0) bcnt = roundup(bcnt, PAGE_SIZE); bus_space_write_4(sc->sc_bustag, sc->sc_regs, L64854_REG_CNT, bcnt); } /* Setup DMA control register */ csr = L64854_GCSR(sc); if (datain) csr |= L64854_WRITE; else csr &= ~L64854_WRITE; csr |= L64854_INT_EN; if (sc->sc_rev == DMAREV_HME) { csr |= (D_DSBL_SCSI_DRN | D_EN_DMA); } L64854_SCSR(sc, csr); return 0; }
/* * Pseudo (chained) interrupt from the esp driver to kick the * current running DMA transfer. Called from ncr53c9x_intr() * for now. * * return 1 if it was a DMA continue. */ int lsi64854_scsi_intr(void *arg) { struct lsi64854_softc *sc = arg; struct ncr53c9x_softc *nsc = sc->sc_client; char bits[64]; int trans, resid; uint32_t csr; csr = L64854_GCSR(sc); DPRINTF(LDB_SCSI, ("%s: %s: addr 0x%x, csr %s\n", device_xname(sc->sc_dev), __func__, bus_space_read_4(sc->sc_bustag, sc->sc_regs, L64854_REG_ADDR), bitmask_snprintf(csr, DDMACSR_BITS, bits, sizeof(bits)))); if (csr & (D_ERR_PEND|D_SLAVE_ERR)) { printf("%s: error: csr=%s\n", device_xname(sc->sc_dev), bitmask_snprintf(csr, DDMACSR_BITS, bits,sizeof(bits))); csr &= ~D_EN_DMA; /* Stop DMA */ /* Invalidate the queue; SLAVE_ERR bit is write-to-clear */ csr |= D_INVALIDATE|D_SLAVE_ERR; L64854_SCSR(sc, csr); return -1; } /* This is an "assertion" :) */ if (sc->sc_active == 0) panic("%s: DMA wasn't active", __func__); DMA_DRAIN(sc, 0); /* DMA has stopped */ csr &= ~D_EN_DMA; L64854_SCSR(sc, csr); sc->sc_active = 0; if (sc->sc_dmasize == 0) { /* A "Transfer Pad" operation completed */ DPRINTF(LDB_SCSI, ("%s: discarded %d bytes (tcl=%d, tcm=%d)\n", __func__, NCR_READ_REG(nsc, NCR_TCL) | (NCR_READ_REG(nsc, NCR_TCM) << 8), NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM))); return 0; } resid = 0; /* * If a transfer onto the SCSI bus gets interrupted by the device * (e.g. for a SAVEPOINTER message), the data in the FIFO counts * as residual since the NCR53C9X counter registers get decremented * as bytes are clocked into the FIFO. */ if (!(csr & D_WRITE) && (resid = (NCR_READ_REG(nsc, NCR_FFLAG) & NCRFIFO_FF)) != 0) { DPRINTF(LDB_SCSI, ("%s: empty esp FIFO of %d ", __func__, resid)); if (nsc->sc_rev == NCR_VARIANT_FAS366 && (NCR_READ_REG(nsc, NCR_CFG3) & NCRFASCFG3_EWIDE)) resid <<= 1; } if ((nsc->sc_espstat & NCRSTAT_TC) == 0) { /* * `Terminal count' is off, so read the residue * out of the NCR53C9X counter registers. */ resid += (NCR_READ_REG(nsc, NCR_TCL) | (NCR_READ_REG(nsc, NCR_TCM) << 8) | ((nsc->sc_cfg2 & NCRCFG2_FE) ? (NCR_READ_REG(nsc, NCR_TCH) << 16) : 0)); if (resid == 0 && sc->sc_dmasize == 65536 && (nsc->sc_cfg2 & NCRCFG2_FE) == 0) /* A transfer of 64K is encoded as `TCL=TCM=0' */ resid = 65536; } trans = sc->sc_dmasize - resid; if (trans < 0) { /* transferred < 0 ? */ #if 0 /* * This situation can happen in perfectly normal operation * if the ESP is reselected while using DMA to select * another target. As such, don't print the warning. */ printf("%s: xfer (%d) > req (%d)\n", device_xname(&sc->sc_dev), trans, sc->sc_dmasize); #endif trans = sc->sc_dmasize; } DPRINTF(LDB_SCSI, ("%s: tcl=%d, tcm=%d, tch=%d; trans=%d, resid=%d\n", __func__, NCR_READ_REG(nsc, NCR_TCL), NCR_READ_REG(nsc, NCR_TCM), (nsc->sc_cfg2 & NCRCFG2_FE) ? NCR_READ_REG(nsc, NCR_TCH) : 0, trans, resid)); if (sc->sc_dmamap->dm_nsegs > 0) { bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize, (csr & D_WRITE) != 0 ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap); } *sc->sc_dmalen -= trans; *sc->sc_dmaaddr += trans; #if 0 /* this is not normal operation just yet */ if (*sc->sc_dmalen == 0 || nsc->sc_phase != nsc->sc_prevphase) return 0; /* and again */ dma_start(sc, sc->sc_dmaaddr, sc->sc_dmalen, DMACSR(sc) & D_WRITE); return 1; #endif return 0; }
void lsi64854_reset(struct lsi64854_softc *sc) { uint32_t csr; DMA_FLUSH(sc, 1); csr = L64854_GCSR(sc); DPRINTF(LDB_ANY, ("%s: csr 0x%x\n", __func__, csr)); /* * XXX is sync needed? */ if (sc->sc_dmamap->dm_nsegs > 0) bus_dmamap_unload(sc->sc_dmatag, sc->sc_dmamap); if (sc->sc_rev == DMAREV_HME) L64854_SCSR(sc, csr | D_HW_RESET_FAS366); csr |= L64854_RESET; /* reset DMA */ L64854_SCSR(sc, csr); DELAY(200); /* > 10 Sbus clocks(?) */ /*DMAWAIT1(sc); why was this here? */ csr = L64854_GCSR(sc); csr &= ~L64854_RESET; /* de-assert reset line */ L64854_SCSR(sc, csr); DELAY(5); /* allow a few ticks to settle */ csr = L64854_GCSR(sc); csr |= L64854_INT_EN; /* enable interrupts */ if (sc->sc_rev > DMAREV_1 && sc->sc_channel == L64854_CHANNEL_SCSI) { if (sc->sc_rev == DMAREV_HME) csr |= D_TWO_CYCLE; else csr |= D_FASTER; } /* Set burst */ switch (sc->sc_rev) { case DMAREV_HME: case DMAREV_2: csr &= ~L64854_BURST_SIZE; if (sc->sc_burst == 32) { csr |= L64854_BURST_32; } else if (sc->sc_burst == 16) { csr |= L64854_BURST_16; } else { csr |= L64854_BURST_0; } break; case DMAREV_ESC: csr |= D_ESC_AUTODRAIN; /* Auto-drain */ if (sc->sc_burst == 32) { csr &= ~D_ESC_BURST; } else csr |= D_ESC_BURST; break; default: break; } L64854_SCSR(sc, csr); if (sc->sc_rev == DMAREV_HME) { bus_space_write_4(sc->sc_bustag, sc->sc_regs, L64854_REG_ADDR, 0); sc->sc_dmactl = csr; } sc->sc_active = 0; DPRINTF(LDB_ANY, ("%s: done, csr 0x%x\n", __func__, csr)); }
/* * setup a DMA transfer */ int lsi64854_setup_pp(struct lsi64854_softc *sc, uint8_t **addr, size_t *len, int datain, size_t *dmasize) { uint32_t csr; DMA_FLUSH(sc, 0); sc->sc_dmaaddr = addr; sc->sc_dmalen = len; DPRINTF(LDB_PP, ("%s: pp start %ld@%p,%d\n", device_xname(sc->sc_dev), (long)*sc->sc_dmalen, *sc->sc_dmaaddr, datain ? 1 : 0)); /* * the rules say we cannot transfer more than the limit * of this DMA chip (64k for old and 16Mb for new), * and we cannot cross a 16Mb boundary. */ *dmasize = sc->sc_dmasize = min(*dmasize, DMAMAX((size_t) *sc->sc_dmaaddr)); DPRINTF(LDB_PP, ("%s: dmasize = %ld\n", __func__, (long)sc->sc_dmasize)); /* Program the DMA address */ if (sc->sc_dmasize) { sc->sc_dvmaaddr = *sc->sc_dmaaddr; if (bus_dmamap_load(sc->sc_dmatag, sc->sc_dmamap, *sc->sc_dmaaddr, sc->sc_dmasize, NULL /* kernel address */, BUS_DMA_NOWAIT/*|BUS_DMA_COHERENT*/)) panic("%s: pp cannot allocate DVMA address", device_xname(sc->sc_dev)); bus_dmamap_sync(sc->sc_dmatag, sc->sc_dmamap, 0, sc->sc_dmasize, datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); bus_space_write_4(sc->sc_bustag, sc->sc_regs, L64854_REG_ADDR, sc->sc_dmamap->dm_segs[0].ds_addr); bus_space_write_4(sc->sc_bustag, sc->sc_regs, L64854_REG_CNT, sc->sc_dmasize); } /* Setup DMA control register */ csr = L64854_GCSR(sc); csr &= ~L64854_BURST_SIZE; if (sc->sc_burst == 32) { csr |= L64854_BURST_32; } else if (sc->sc_burst == 16) { csr |= L64854_BURST_16; } else { csr |= L64854_BURST_0; } csr |= P_EN_DMA|P_INT_EN|P_EN_CNT; #if 0 /* This bit is read-only in PP csr register */ if (datain) csr |= P_WRITE; else csr &= ~P_WRITE; #endif L64854_SCSR(sc, csr); return 0; }
static int dma_attach(device_t dev) { struct dma_softc *dsc; struct lsi64854_softc *lsc; struct dma_devinfo *ddi; device_t cdev; const char *name; char *cabletype; uint32_t csr; phandle_t child, node; int error, i; dsc = device_get_softc(dev); lsc = &dsc->sc_lsi64854; name = ofw_bus_get_name(dev); node = ofw_bus_get_node(dev); dsc->sc_ign = sbus_get_ign(dev); dsc->sc_slot = sbus_get_slot(dev); i = 0; lsc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (lsc->sc_res == NULL) { device_printf(dev, "cannot allocate resources\n"); return (ENXIO); } if (strcmp(name, "espdma") == 0 || strcmp(name, "dma") == 0) lsc->sc_channel = L64854_CHANNEL_SCSI; else if (strcmp(name, "ledma") == 0) { /* * Check to see which cable type is currently active and * set the appropriate bit in the ledma csr so that it * gets used. If we didn't netboot, the PROM won't have * the "cable-selection" property; default to TP and then * the user can change it via a "media" option to ifconfig. */ csr = L64854_GCSR(lsc); if ((OF_getprop_alloc(node, "cable-selection", 1, (void **)&cabletype)) == -1) { /* assume TP if nothing there */ csr |= E_TP_AUI; } else { if (strcmp(cabletype, "aui") == 0) csr &= ~E_TP_AUI; else csr |= E_TP_AUI; free(cabletype, M_OFWPROP); } L64854_SCSR(lsc, csr); DELAY(20000); /* manual says we need a 20ms delay */ lsc->sc_channel = L64854_CHANNEL_ENET; } else { device_printf(dev, "unsupported DMA channel\n"); error = ENXIO; goto fail_lres; } error = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* no locking */ &lsc->sc_parent_dmat); if (error != 0) { device_printf(dev, "cannot allocate parent DMA tag\n"); goto fail_lres; } i = sbus_get_burstsz(dev); lsc->sc_burst = (i & SBUS_BURST_32) ? 32 : (i & SBUS_BURST_16) ? 16 : 0; lsc->sc_dev = dev; /* Attach children. */ i = 0; for (child = OF_child(node); child != 0; child = OF_peer(child)) { if ((ddi = dma_setup_dinfo(dev, dsc, child)) == NULL) continue; if (i != 0) { device_printf(dev, "<%s>: only one child per DMA channel supported\n", ddi->ddi_obdinfo.obd_name); dma_destroy_dinfo(ddi); continue; } if ((cdev = device_add_child(dev, NULL, -1)) == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ddi->ddi_obdinfo.obd_name); dma_destroy_dinfo(ddi); continue; } device_set_ivars(cdev, ddi); i++; } return (bus_generic_attach(dev)); fail_lres: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(lsc->sc_res), lsc->sc_res); return (error); }
void dmaattach_sbus(device_t parent, device_t self, void *aux) { struct dma_softc *dsc = device_private(self); struct lsi64854_softc *sc = &dsc->sc_lsi64854; struct sbus_attach_args *sa = aux; struct sbus_softc *sbsc = device_private(parent); bus_space_tag_t sbt; int sbusburst, burst; int node; node = sa->sa_node; sc->sc_dev = self; sc->sc_bustag = sa->sa_bustag; sc->sc_dmatag = sa->sa_dmatag; /* Map registers */ if (sa->sa_npromvaddrs) { sbus_promaddr_to_handle(sa->sa_bustag, sa->sa_promvaddrs[0], &sc->sc_regs); } else { if (sbus_bus_map(sa->sa_bustag, sa->sa_slot, sa->sa_offset, sa->sa_size, 0, &sc->sc_regs) != 0) { aprint_error(": cannot map registers\n"); return; } } /* * Get transfer burst size from PROM and plug it into the * controller registers. This is needed on the Sun4m; do * others need it too? */ sbusburst = sbsc->sc_burst; if (sbusburst == 0) sbusburst = SBUS_BURST_32 - 1; /* 1->16 */ burst = prom_getpropint(node,"burst-sizes", -1); if (burst == -1) /* take SBus burst sizes */ burst = sbusburst; /* Clamp at parent's burst sizes */ burst &= sbusburst; sc->sc_burst = (burst & SBUS_BURST_32) ? 32 : (burst & SBUS_BURST_16) ? 16 : 0; if (device_is_a(self, "ledma")) { char *cabletype; uint32_t csr; /* * Check to see which cable type is currently active and * set the appropriate bit in the ledma csr so that it * gets used. If we didn't netboot, the PROM won't have * the "cable-selection" property; default to TP and then * the user can change it via a "media" option to ifconfig. */ cabletype = prom_getpropstring(node, "cable-selection"); csr = L64854_GCSR(sc); if (strcmp(cabletype, "tpe") == 0) { csr |= E_TP_AUI; } else if (strcmp(cabletype, "aui") == 0) { csr &= ~E_TP_AUI; } else { /* assume TP if nothing there */ csr |= E_TP_AUI; } L64854_SCSR(sc, csr); delay(20000); /* manual says we need a 20ms delay */ sc->sc_channel = L64854_CHANNEL_ENET; } else { sc->sc_channel = L64854_CHANNEL_SCSI; } sbus_establish(&dsc->sc_sd, self); if ((sbt = bus_space_tag_alloc(sc->sc_bustag, dsc)) == NULL) { aprint_error(": out of memory\n"); return; } sbt->sparc_intr_establish = dmabus_intr_establish; lsi64854_attach(sc); /* Attach children */ for (node = firstchild(sa->sa_node); node; node = nextsibling(node)) { struct sbus_attach_args sax; sbus_setup_attach_args(sbsc, sbt, sc->sc_dmatag, node, &sax); (void)config_found(self, (void *)&sax, dmaprint_sbus); sbus_destroy_attach_args(&sax); } }