void dma_txreset(dma_info_t *di) { uint32 status; DMA_TRACE(("%s: dma_txreset\n", di->name)); /* address PR8249/PR7577 issue */ /* suspend tx DMA first */ W_REG(&di->regs->xmtcontrol, XC_SE); SPINWAIT((status = (R_REG(&di->regs->xmtstatus) & XS_XS_MASK)) != XS_XS_DISABLED && status != XS_XS_IDLE && status != XS_XS_STOPPED, 10000); /* PR2414 WAR: DMA engines are not disabled until transfer finishes */ W_REG(&di->regs->xmtcontrol, 0); SPINWAIT((status = (R_REG(&di->regs->xmtstatus) & XS_XS_MASK)) != XS_XS_DISABLED, 10000); if (status != XS_XS_DISABLED) { DMA_ERROR(("%s: dma_txreset: dma cannot be stopped\n", di->name)); } /* wait for the last transaction to complete */ OSL_DELAY(300); }
void dma_rxreclaim(dma_info_t *di) { void *p; DMA_TRACE(("%s: dma_rxreclaim\n", di->name)); while ((p = dma_getnextrxp(di, TRUE))) PKTFREE(di->drv, p, FALSE); }
void dma_txreclaim(dma_info_t *di, bool forceall) { void *p; DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : "")); while ((p = dma_getnexttxp(di, forceall))) PKTFREE(di->drv, p, TRUE); }
void dma_rxinit(dma_info_t *di) { DMA_TRACE(("%s: dma_rxinit\n", di->name)); di->rxin = di->rxout = 0; /* clear rx descriptor ring */ BZERO_SM((void*)di->rxd, (di->nrxd * sizeof (dmadd_t))); dma_rxenable(di); W_REG(&di->regs->rcvaddr, ((uint32)di->rxdpa + di->ddoffset)); }
void dma_txinit(dma_info_t *di) { DMA_TRACE(("%s: dma_txinit\n", di->name)); di->txin = di->txout = 0; di->txavail = di->ntxd - 1; /* clear tx descriptor ring */ BZERO_SM((void*)di->txd, (di->ntxd * sizeof (dmadd_t))); W_REG(&di->regs->xmtcontrol, XC_XE); W_REG(&di->regs->xmtaddr, ((uint32)di->txdpa + di->ddoffset)); }
void dma_rxreset(dma_info_t *di) { uint32 status; DMA_TRACE(("%s: dma_rxreset\n", di->name)); /* PR2414 WAR: DMA engines are not disabled until transfer finishes */ W_REG(&di->regs->rcvcontrol, 0); SPINWAIT((status = (R_REG(&di->regs->rcvstatus) & RS_RS_MASK)) != RS_RS_DISABLED, 10000); if (status != RS_RS_DISABLED) { DMA_ERROR(("%s: dma_rxreset: dma cannot be stopped\n", di->name)); } }
/* !! may be called with core in reset */ static void _dma_detach(dma_info_t *di) { if (di == NULL) return; DMA_TRACE(("%s: dma_detach\n", di->name)); /* shouldn't be here if descriptors are unreclaimed */ ASSERT(di->txin == di->txout); ASSERT(di->rxin == di->rxout); /* free dma descriptor rings */ if (DMA64_ENAB(di)) { if (di->txd64) DMA_FREE_CONSISTENT(di->osh, ((int8*)(uintptr)di->txd64 - di->txdalign), di->txdalloc, (di->txdpa - di->txdalign), &di->tx_dmah); if (di->rxd64) DMA_FREE_CONSISTENT(di->osh, ((int8*)(uintptr)di->rxd64 - di->rxdalign), di->rxdalloc, (di->rxdpa - di->rxdalign), &di->rx_dmah); } else { if (di->txd32) DMA_FREE_CONSISTENT(di->osh, ((int8*)(uintptr)di->txd32 - di->txdalign), di->txdalloc, (di->txdpa - di->txdalign), &di->tx_dmah); if (di->rxd32) DMA_FREE_CONSISTENT(di->osh, ((int8*)(uintptr)di->rxd32 - di->rxdalign), di->rxdalloc, (di->rxdpa - di->rxdalign), &di->rx_dmah); } /* free packet pointer vectors */ if (di->txp) MFREE(di->osh, (void *)di->txp, (di->ntxd * sizeof(void *))); if (di->rxp) MFREE(di->osh, (void *)di->rxp, (di->nrxd * sizeof(void *))); /* free tx packet DMA handles */ if (di->txp_dmah) MFREE(di->osh, (void *)di->txp_dmah, di->ntxd * sizeof(osldma_t **)); /* free rx packet DMA handles */ if (di->rxp_dmah) MFREE(di->osh, (void *)di->rxp_dmah, di->nrxd * sizeof(osldma_t **)); /* free our private info structure */ MFREE(di->osh, (void *)di, sizeof(dma_info_t)); }
/* * Reclaim next completed txd (txds if using chained buffers) and * return associated packet. * If 'force' is true, reclaim txd(s) and return associated packet * regardless of the value of the hardware "curr" pointer. */ void* dma_getnexttxp(dma_info_t *di, bool forceall) { uint start, end, i; void *txp; DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : "")); txp = NULL; /* if forcing, dma engine must be disabled */ ASSERT(!forceall || !dma_txenabled(di)); start = di->txin; if (forceall) end = di->txout; else end = B2I(R_REG(&di->regs->xmtstatus) & XS_CD_MASK); /* PR4738 - xmt disable/re-enable does not clear CURR */ if ((start == 0) && (end > di->txout)) goto bogus; for (i = start; i != end && !txp; i = NEXTTXD(i)) { DMA_UNMAP(di->dev, (BUS_SWAP32(R_SM(&di->txd[i].addr)) - di->dataoffset), (BUS_SWAP32(R_SM(&di->txd[i].ctrl)) & CTRL_BC_MASK), DMA_TX, di->txp[i]); W_SM(&di->txd[i].addr, 0); txp = di->txp[i]; di->txp[i] = NULL; } di->txin = i; /* tx flow control */ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; return (txp); bogus: /* DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall)); */ return (NULL); }
void dma_detach(dma_info_t *di) { if (di == NULL) return; DMA_TRACE(("%s: dma_detach\n", di->name)); /* shouldn't be here if descriptors are unreclaimed */ ASSERT(di->txin == di->txout); ASSERT(di->rxin == di->rxout); /* free dma descriptor rings */ if (di->txd) DMA_FREE_CONSISTENT(di->dev, (void *)(di->txd - di->txdalign), (DMAMAXRINGSZ + DMARINGALIGN), di->txdpa); if (di->rxd) DMA_FREE_CONSISTENT(di->dev, (void *)(di->rxd - di->rxdalign), (DMAMAXRINGSZ + DMARINGALIGN), di->rxdpa); /* free our private info structure */ MFREE((void*)di, sizeof (dma_info_t)); }
/* returns a pointer to the next frame received, or NULL if there are no more */ void* dma_rx(dma_info_t *di) { void *p; uint len; int skiplen = 0; while ((p = dma_getnextrxp(di, FALSE))) { /* skip giant packets which span multiple rx descriptors */ if (skiplen > 0) { skiplen -= di->rxbufsize; if (skiplen < 0) skiplen = 0; PKTFREE(di->drv, p, FALSE); continue; } len = ltoh16(*(uint16*)(PKTDATA(di->drv, p))); DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); /* bad frame length check */ if (len > (di->rxbufsize - di->rxoffset)) { DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len)); if (len > 0) skiplen = len - (di->rxbufsize - di->rxoffset); PKTFREE(di->drv, p, FALSE); di->hnddma.rxgiants++; continue; } /* set actual length */ PKTSETLEN(di->drv, p, (di->rxoffset + len)); break; } return (p); }
/* post receive buffers */ void dma_rxfill(dma_info_t *di) { void *p; uint rxin, rxout; uint ctrl; uint n; uint i; uint32 pa; uint rxbufsize; /* * Determine how many receive buffers we're lacking * from the full complement, allocate, initialize, * and post them, then update the chip rx lastdscr. */ rxin = di->rxin; rxout = di->rxout; rxbufsize = di->rxbufsize; n = di->nrxpost - NRXDACTIVE(rxin, rxout); DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n)); for (i = 0; i < n; i++) { if ((p = PKTGET(di->drv, rxbufsize, FALSE)) == NULL) { DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name)); di->hnddma.rxnobuf++; break; } /* PR3263 & PR3387 & PR4642 war: rxh.len=0 means dma writes not complete */ *(uint32*)(OSL_UNCACHED(PKTDATA(di->drv, p))) = 0; pa = (uint32) DMA_MAP(di->dev, PKTDATA(di->drv, p), rxbufsize, DMA_RX, p); ASSERT(ISALIGNED(pa, 4)); /* save the free packet pointer */ #if 0 ASSERT(di->rxp[rxout] == NULL); #endif di->rxp[rxout] = p; /* paranoia */ ASSERT(R_SM(&di->rxd[rxout].addr) == 0); /* prep the descriptor control value */ ctrl = rxbufsize; if (rxout == (di->nrxd - 1)) ctrl |= CTRL_EOT; /* init the rx descriptor */ W_SM(&di->rxd[rxout].ctrl, BUS_SWAP32(ctrl)); W_SM(&di->rxd[rxout].addr, BUS_SWAP32(pa + di->dataoffset)); DMA_TRACE(("%s: dma_rxfill: ctrl %08x dataoffset: %08x\n", di->name, BUS_SWAP32(ctrl), BUS_SWAP32(pa + di->dataoffset))); rxout = NEXTRXD(rxout); } di->rxout = rxout; /* update the chip lastdscr pointer */ W_REG(&di->regs->rcvptr, I2B(rxout)); }
void dma_fifoloopbackenable(dma_info_t *di) { DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name)); OR_REG(&di->regs->xmtcontrol, XC_LE); }
void dma_txresume(dma_info_t *di) { DMA_TRACE(("%s: dma_txresume\n", di->name)); AND_REG(&di->regs->xmtcontrol, ~XC_SE); }
void dma_txsuspend(dma_info_t *di) { DMA_TRACE(("%s: dma_txsuspend\n", di->name)); OR_REG(&di->regs->xmtcontrol, XC_SE); }
void* dma_attach(void *drv, void *dev, char *name, dmaregs_t *regs, uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint ddoffset, uint dataoffset, uint *msg_level) { dma_info_t *di; void *va; ASSERT(ntxd <= MAXDD); ASSERT(nrxd <= MAXDD); /* allocate private info structure */ if ((di = MALLOC(sizeof (dma_info_t))) == NULL) return (NULL); bzero((char*)di, sizeof (dma_info_t)); /* set message level */ di->msg_level = msg_level ? msg_level : &dma_msg_level; DMA_TRACE(("%s: dma_attach: drv 0x%x dev 0x%x regs 0x%x ntxd %d nrxd %d rxbufsize %d nrxpost %d rxoffset %d ddoffset 0x%x dataoffset 0x%x\n", name, (uint)drv, (uint)dev, (uint)regs, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, ddoffset, dataoffset)); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL-1] = '\0'; di->drv = drv; di->dev = dev; di->regs = regs; /* allocate transmit descriptor ring */ if (ntxd) { if ((va = DMA_ALLOC_CONSISTENT(dev, (DMAMAXRINGSZ + DMARINGALIGN), &di->txdpa)) == NULL) goto fail; di->txd = (dmadd_t*) ROUNDUP(va, DMARINGALIGN); di->txdalign = ((uint)di->txd - (uint)va); di->txdpa = (void*) ((uint)di->txdpa + di->txdalign); ASSERT(ISALIGNED(di->txd, DMARINGALIGN)); } /* allocate receive descriptor ring */ if (nrxd) { if ((va = DMA_ALLOC_CONSISTENT(dev, (DMAMAXRINGSZ + DMARINGALIGN), &di->rxdpa)) == NULL) goto fail; di->rxd = (dmadd_t*) ROUNDUP(va, DMARINGALIGN); di->rxdalign = ((uint)di->rxd - (uint)va); di->rxdpa = (void*) ((uint)di->rxdpa + di->rxdalign); ASSERT(ISALIGNED(di->rxd, DMARINGALIGN)); } /* save tunables */ di->ntxd = ntxd; di->nrxd = nrxd; di->rxbufsize = rxbufsize; di->nrxpost = nrxpost; di->rxoffset = rxoffset; di->ddoffset = ddoffset; di->dataoffset = dataoffset; return ((void*)di); fail: dma_detach((void*)di); return (NULL); }
hnddma_t * dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level) { dma_info_t *di; uint size; /* allocate private info structure */ if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) { return (NULL); } bzero((char *)di, sizeof(dma_info_t)); di->msg_level = msg_level ? msg_level : &dma_msg_level; /* old chips w/o sb is no longer supported */ ASSERT(sbh != NULL); di->dma64 = ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64); #ifndef BCMDMA64 if (di->dma64) { DMA_ERROR(("dma_attach: driver doesn't have the capability to support " "64 bits DMA\n")); goto fail; } #endif if ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64) { //printk("DMA64 supported!\n"); } /* check arguments */ ASSERT(ISPOWEROF2(ntxd)); ASSERT(ISPOWEROF2(nrxd)); if (nrxd == 0) ASSERT(dmaregsrx == NULL); if (ntxd == 0) ASSERT(dmaregstx == NULL); /* init dma reg pointer */ if (di->dma64) { ASSERT(ntxd <= D64MAXDD); ASSERT(nrxd <= D64MAXDD); di->d64txregs = (dma64regs_t *)dmaregstx; di->d64rxregs = (dma64regs_t *)dmaregsrx; di->dma64align = D64RINGALIGN; if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) { /* for smaller dd table, HW relax the alignment requirement */ di->dma64align = D64RINGALIGN / 2; } } else { ASSERT(ntxd <= D32MAXDD); ASSERT(nrxd <= D32MAXDD); di->d32txregs = (dma32regs_t *)dmaregstx; di->d32rxregs = (dma32regs_t *)dmaregsrx; } DMA_TRACE(("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d " "rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (di->dma64 ? "DMA64" : "DMA32"), osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, dmaregstx, dmaregsrx)); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL-1] = '\0'; di->osh = osh; di->sbh = sbh; /* save tunables */ di->ntxd = ntxd; di->nrxd = nrxd; /* the actual dma size doesn't include the extra headroom */ if (rxbufsize > BCMEXTRAHDROOM) di->rxbufsize = rxbufsize - BCMEXTRAHDROOM; else di->rxbufsize = rxbufsize; di->nrxpost = nrxpost; di->rxoffset = rxoffset; /* * figure out the DMA physical address offset for dd and data * for old chips w/o sb, use zero * for new chips w sb, * PCI/PCIE: they map silicon backplace address to zero based memory, need offset * Other bus: use zero * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor */ di->ddoffsetlow = 0; di->dataoffsetlow = 0; /* for pci bus, add offset */ if (sbh->bustype == PCI_BUS) { if ((sbh->buscoretype == SB_PCIE) && di->dma64) { /* pcie with DMA64 */ di->ddoffsetlow = 0; di->ddoffsethigh = SB_PCIE_DMA_H32; } else { /* pci(DMA32/DMA64) or pcie with DMA32 */ di->ddoffsetlow = SB_PCI_DMA; di->ddoffsethigh = 0; } di->dataoffsetlow = di->ddoffsetlow; di->dataoffsethigh = di->ddoffsethigh; } #if defined(__mips__) && defined(IL_BIGENDIAN) di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED; #endif di->addrext = _dma_isaddrext(di); /* allocate tx packet pointer vector */ if (ntxd) { size = ntxd * sizeof(void *); if ((di->txp = MALLOC(osh, size)) == NULL) { DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh))); goto fail; } bzero((char *)di->txp, size); } /* allocate rx packet pointer vector */ if (nrxd) { size = nrxd * sizeof(void *); if ((di->rxp = MALLOC(osh, size)) == NULL) { DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh))); goto fail; } bzero((char *)di->rxp, size); } /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */ if (ntxd) { if (!_dma_alloc(di, DMA_TX)) goto fail; } /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */ if (nrxd) { if (!_dma_alloc(di, DMA_RX)) goto fail; } if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) { DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n", di->name, di->txdpa)); goto fail; } if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) { DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n", di->name, di->rxdpa)); goto fail; } DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext)); /* allocate tx packet pointer vector and DMA mapping vectors */ if (ntxd) { size = ntxd * sizeof(osldma_t **); if ((di->txp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL) goto fail; bzero((char*)di->txp_dmah, size); }else di->txp_dmah = NULL; /* allocate rx packet pointer vector and DMA mapping vectors */ if (nrxd) { size = nrxd * sizeof(osldma_t **); if ((di->rxp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL) goto fail; bzero((char*)di->rxp_dmah, size); } else di->rxp_dmah = NULL; /* initialize opsvec of function pointers */ di->hnddma.di_fn = DMA64_ENAB(di) ? dma64proc : dma32proc; return ((hnddma_t *)di); fail: _dma_detach(di); return (NULL); }
void dma_rxenable(dma_info_t *di) { DMA_TRACE(("%s: dma_rxenable\n", di->name)); W_REG(&di->regs->rcvcontrol, ((di->rxoffset << RC_RO_SHIFT) | RC_RE)); }
struct hnddma_pub *dma_attach(char *name, si_t *sih, void *dmaregstx, void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, uint rxoffset, uint *msg_level) { dma_info_t *di; uint size; /* allocate private info structure */ di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC); if (di == NULL) { #ifdef BCMDBG printk(KERN_ERR "dma_attach: out of memory\n"); #endif return NULL; } di->msg_level = msg_level ? msg_level : &dma_msg_level; di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); /* init dma reg pointer */ di->d64txregs = (dma64regs_t *) dmaregstx; di->d64rxregs = (dma64regs_t *) dmaregsrx; di->hnddma.di_fn = (const di_fcn_t *)&dma64proc; /* Default flags (which can be changed by the driver calling dma_ctrlflags * before enable): For backwards compatibility both Rx Overflow Continue * and Parity are DISABLED. * supports it. */ di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d " "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " "dmaregstx %p dmaregsrx %p\n", name, "DMA64", di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx)); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL - 1] = '\0'; di->pbus = ((struct si_info *)sih)->pbus; /* save tunables */ di->ntxd = (u16) ntxd; di->nrxd = (u16) nrxd; /* the actual dma size doesn't include the extra headroom */ di->rxextrahdrroom = (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; if (rxbufsize > BCMEXTRAHDROOM) di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom); else di->rxbufsize = (u16) rxbufsize; di->nrxpost = (u16) nrxpost; di->rxoffset = (u8) rxoffset; /* * figure out the DMA physical address offset for dd and data * PCI/PCIE: they map silicon backplace address to zero based memory, need offset * Other bus: use zero * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor */ di->ddoffsetlow = 0; di->dataoffsetlow = 0; /* for pci bus, add offset */ if (sih->bustype == PCI_BUS) { /* pcie with DMA64 */ di->ddoffsetlow = 0; di->ddoffsethigh = SI_PCIE_DMA_H32; di->dataoffsetlow = di->ddoffsetlow; di->dataoffsethigh = di->ddoffsethigh; } #if defined(__mips__) && defined(IL_BIGENDIAN) di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED; #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */ /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ if ((ai_coreid(sih) == SDIOD_CORE_ID) && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) di->addrext = 0; else if ((ai_coreid(sih) == I2S_CORE_ID) && ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) di->addrext = 0; else di->addrext = _dma_isaddrext(di); /* does the descriptors need to be aligned and if yes, on 4K/8K or not */ di->aligndesc_4k = _dma_descriptor_align(di); if (di->aligndesc_4k) { di->dmadesc_align = D64RINGALIGN_BITS; if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) { /* for smaller dd table, HW relax alignment reqmnt */ di->dmadesc_align = D64RINGALIGN_BITS - 1; } } else di->dmadesc_align = 4; /* 16 byte alignment */ DMA_NONE(("DMA descriptor align_needed %d, align %d\n", di->aligndesc_4k, di->dmadesc_align)); /* allocate tx packet pointer vector */ if (ntxd) { size = ntxd * sizeof(void *); di->txp = kzalloc(size, GFP_ATOMIC); if (di->txp == NULL) { DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name)); goto fail; } } /* allocate rx packet pointer vector */ if (nrxd) { size = nrxd * sizeof(void *); di->rxp = kzalloc(size, GFP_ATOMIC); if (di->rxp == NULL) { DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name)); goto fail; } } /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */ if (ntxd) { if (!_dma_alloc(di, DMA_TX)) goto fail; } /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */ if (nrxd) { if (!_dma_alloc(di, DMA_RX)) goto fail; } if ((di->ddoffsetlow != 0) && !di->addrext) { if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) { DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa))); goto fail; } if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) { DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa))); goto fail; } } DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext)); /* allocate DMA mapping vectors */ if (DMASGLIST_ENAB) { if (ntxd) { size = ntxd * sizeof(hnddma_seg_map_t); di->txp_dmah = kzalloc(size, GFP_ATOMIC); if (di->txp_dmah == NULL) goto fail; } if (nrxd) { size = nrxd * sizeof(hnddma_seg_map_t); di->rxp_dmah = kzalloc(size, GFP_ATOMIC); if (di->rxp_dmah == NULL) goto fail; } } return (struct hnddma_pub *) di; fail: _dma_detach(di); return NULL; }
/* * Just like above except go through the extra effort of splitting * buffers that cross 4Kbyte boundaries into multiple tx descriptors. */ int dma_tx(dma_info_t *di, void *p0, uint32 coreflags) { void *p, *next; uchar *data; uint plen, len; uchar *page, *start, *end; uint txout; uint32 ctrl; uint32 pa; DMA_TRACE(("%s: dma_tx\n", di->name)); txout = di->txout; ctrl = 0; /* * Walk the chain of packet buffers * splitting those that cross 4 Kbyte boundaries * allocating and initializing transmit descriptor entries. */ for (p = p0; p; p = next) { data = PKTDATA(di->drv, p); plen = PKTLEN(di->drv, p); next = PKTNEXT(di->drv, p); /* PR988 - skip zero length buffers */ if (plen == 0) continue; for (page = (uchar*)PAGEBASE(data); page <= (uchar*)PAGEBASE(data + plen - 1); page += PAGESZ) { /* return nonzero if out of tx descriptors */ if (NEXTTXD(txout) == di->txin) goto outoftxd; start = (page == (uchar*)PAGEBASE(data))? data: page; end = (page == (uchar*)PAGEBASE(data + plen))? (data + plen): (page + PAGESZ); len = end - start; /* build the descriptor control value */ ctrl = len & CTRL_BC_MASK; /* PR3697: Descriptor flags are not ignored for descriptors where SOF is clear */ ctrl |= coreflags; if ((p == p0) && (start == data)) ctrl |= CTRL_SOF; if ((next == NULL) && (end == (data + plen))) ctrl |= (CTRL_IOC | CTRL_EOF); if (txout == (di->ntxd - 1)) ctrl |= CTRL_EOT; /* get physical address of buffer start */ pa = (uint32) DMA_MAP(di->dev, start, len, DMA_TX, p); /* init the tx descriptor */ W_SM(&di->txd[txout].ctrl, BUS_SWAP32(ctrl)); W_SM(&di->txd[txout].addr, BUS_SWAP32(pa + di->dataoffset)); ASSERT(di->txp[txout] == NULL); txout = NEXTTXD(txout); } } /* if last txd eof not set, fix it */ if (!(ctrl & CTRL_EOF)) W_SM(&di->txd[PREVTXD(txout)].ctrl, BUS_SWAP32(ctrl | CTRL_IOC | CTRL_EOF)); /* save the packet */ di->txp[di->txout] = p0; /* bump the tx descriptor index */ di->txout = txout; /* kick the chip */ W_REG(&di->regs->xmtptr, I2B(txout)); /* tx flow control */ di->txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; return (0); outoftxd: DMA_ERROR(("%s: dma_tx: out of txds\n", di->name)); PKTFREE(di->drv, p0, TRUE); di->txavail = 0; di->hnddma.txnobuf++; return (-1); }