struct hnddma_pub *dma_attach(char *name, si_t *sih, void *dmaregstx, void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, uint rxoffset, uint *msg_level) { dma_info_t *di; uint size; /* allocate private info structure */ di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC); if (di == NULL) { #ifdef BCMDBG printk(KERN_ERR "dma_attach: out of memory\n"); #endif return NULL; } di->msg_level = msg_level ? msg_level : &dma_msg_level; di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); /* init dma reg pointer */ di->d64txregs = (dma64regs_t *) dmaregstx; di->d64rxregs = (dma64regs_t *) dmaregsrx; di->hnddma.di_fn = (const di_fcn_t *)&dma64proc; /* Default flags (which can be changed by the driver calling dma_ctrlflags * before enable): For backwards compatibility both Rx Overflow Continue * and Parity are DISABLED. * supports it. */ di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d " "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " "dmaregstx %p dmaregsrx %p\n", name, "DMA64", di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx)); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL - 1] = '\0'; di->pbus = ((struct si_info *)sih)->pbus; /* save tunables */ di->ntxd = (u16) ntxd; di->nrxd = (u16) nrxd; /* the actual dma size doesn't include the extra headroom */ di->rxextrahdrroom = (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; if (rxbufsize > BCMEXTRAHDROOM) di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom); else di->rxbufsize = (u16) rxbufsize; di->nrxpost = (u16) nrxpost; di->rxoffset = (u8) rxoffset; /* * figure out the DMA physical address offset for dd and data * PCI/PCIE: they map silicon backplace address to zero based memory, need offset * Other bus: use zero * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor */ di->ddoffsetlow = 0; di->dataoffsetlow = 0; /* for pci bus, add offset */ if (sih->bustype == PCI_BUS) { /* pcie with DMA64 */ di->ddoffsetlow = 0; di->ddoffsethigh = SI_PCIE_DMA_H32; di->dataoffsetlow = di->ddoffsetlow; di->dataoffsethigh = di->ddoffsethigh; } #if defined(__mips__) && defined(IL_BIGENDIAN) di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED; #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */ /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */ if ((ai_coreid(sih) == SDIOD_CORE_ID) && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2))) di->addrext = 0; else if ((ai_coreid(sih) == I2S_CORE_ID) && ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1))) di->addrext = 0; else di->addrext = _dma_isaddrext(di); /* does the descriptors need to be aligned and if yes, on 4K/8K or not */ di->aligndesc_4k = _dma_descriptor_align(di); if (di->aligndesc_4k) { di->dmadesc_align = D64RINGALIGN_BITS; if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) { /* for smaller dd table, HW relax alignment reqmnt */ di->dmadesc_align = D64RINGALIGN_BITS - 1; } } else di->dmadesc_align = 4; /* 16 byte alignment */ DMA_NONE(("DMA descriptor align_needed %d, align %d\n", di->aligndesc_4k, di->dmadesc_align)); /* allocate tx packet pointer vector */ if (ntxd) { size = ntxd * sizeof(void *); di->txp = kzalloc(size, GFP_ATOMIC); if (di->txp == NULL) { DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name)); goto fail; } } /* allocate rx packet pointer vector */ if (nrxd) { size = nrxd * sizeof(void *); di->rxp = kzalloc(size, GFP_ATOMIC); if (di->rxp == NULL) { DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name)); goto fail; } } /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */ if (ntxd) { if (!_dma_alloc(di, DMA_TX)) goto fail; } /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */ if (nrxd) { if (!_dma_alloc(di, DMA_RX)) goto fail; } if ((di->ddoffsetlow != 0) && !di->addrext) { if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) { DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa))); goto fail; } if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) { DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa))); goto fail; } } DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext)); /* allocate DMA mapping vectors */ if (DMASGLIST_ENAB) { if (ntxd) { size = ntxd * sizeof(hnddma_seg_map_t); di->txp_dmah = kzalloc(size, GFP_ATOMIC); if (di->txp_dmah == NULL) goto fail; } if (nrxd) { size = nrxd * sizeof(hnddma_seg_map_t); di->rxp_dmah = kzalloc(size, GFP_ATOMIC); if (di->rxp_dmah == NULL) goto fail; } } return (struct hnddma_pub *) di; fail: _dma_detach(di); return NULL; }
hnddma_t * dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx, uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level) { dma_info_t *di; uint size; /* allocate private info structure */ if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) { return (NULL); } bzero((char *)di, sizeof(dma_info_t)); di->msg_level = msg_level ? msg_level : &dma_msg_level; /* old chips w/o sb is no longer supported */ ASSERT(sbh != NULL); di->dma64 = ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64); #ifndef BCMDMA64 if (di->dma64) { DMA_ERROR(("dma_attach: driver doesn't have the capability to support " "64 bits DMA\n")); goto fail; } #endif if ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64) { //printk("DMA64 supported!\n"); } /* check arguments */ ASSERT(ISPOWEROF2(ntxd)); ASSERT(ISPOWEROF2(nrxd)); if (nrxd == 0) ASSERT(dmaregsrx == NULL); if (ntxd == 0) ASSERT(dmaregstx == NULL); /* init dma reg pointer */ if (di->dma64) { ASSERT(ntxd <= D64MAXDD); ASSERT(nrxd <= D64MAXDD); di->d64txregs = (dma64regs_t *)dmaregstx; di->d64rxregs = (dma64regs_t *)dmaregsrx; di->dma64align = D64RINGALIGN; if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) { /* for smaller dd table, HW relax the alignment requirement */ di->dma64align = D64RINGALIGN / 2; } } else { ASSERT(ntxd <= D32MAXDD); ASSERT(nrxd <= D32MAXDD); di->d32txregs = (dma32regs_t *)dmaregstx; di->d32rxregs = (dma32regs_t *)dmaregsrx; } DMA_TRACE(("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d " "rxoffset %d dmaregstx %p dmaregsrx %p\n", name, (di->dma64 ? "DMA64" : "DMA32"), osh, ntxd, nrxd, rxbufsize, nrxpost, rxoffset, dmaregstx, dmaregsrx)); /* make a private copy of our callers name */ strncpy(di->name, name, MAXNAMEL); di->name[MAXNAMEL-1] = '\0'; di->osh = osh; di->sbh = sbh; /* save tunables */ di->ntxd = ntxd; di->nrxd = nrxd; /* the actual dma size doesn't include the extra headroom */ if (rxbufsize > BCMEXTRAHDROOM) di->rxbufsize = rxbufsize - BCMEXTRAHDROOM; else di->rxbufsize = rxbufsize; di->nrxpost = nrxpost; di->rxoffset = rxoffset; /* * figure out the DMA physical address offset for dd and data * for old chips w/o sb, use zero * for new chips w sb, * PCI/PCIE: they map silicon backplace address to zero based memory, need offset * Other bus: use zero * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor */ di->ddoffsetlow = 0; di->dataoffsetlow = 0; /* for pci bus, add offset */ if (sbh->bustype == PCI_BUS) { if ((sbh->buscoretype == SB_PCIE) && di->dma64) { /* pcie with DMA64 */ di->ddoffsetlow = 0; di->ddoffsethigh = SB_PCIE_DMA_H32; } else { /* pci(DMA32/DMA64) or pcie with DMA32 */ di->ddoffsetlow = SB_PCI_DMA; di->ddoffsethigh = 0; } di->dataoffsetlow = di->ddoffsetlow; di->dataoffsethigh = di->ddoffsethigh; } #if defined(__mips__) && defined(IL_BIGENDIAN) di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED; #endif di->addrext = _dma_isaddrext(di); /* allocate tx packet pointer vector */ if (ntxd) { size = ntxd * sizeof(void *); if ((di->txp = MALLOC(osh, size)) == NULL) { DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n", di->name, MALLOCED(osh))); goto fail; } bzero((char *)di->txp, size); } /* allocate rx packet pointer vector */ if (nrxd) { size = nrxd * sizeof(void *); if ((di->rxp = MALLOC(osh, size)) == NULL) { DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n", di->name, MALLOCED(osh))); goto fail; } bzero((char *)di->rxp, size); } /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */ if (ntxd) { if (!_dma_alloc(di, DMA_TX)) goto fail; } /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */ if (nrxd) { if (!_dma_alloc(di, DMA_RX)) goto fail; } if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) { DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n", di->name, di->txdpa)); goto fail; } if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) { DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n", di->name, di->rxdpa)); goto fail; } DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext)); /* allocate tx packet pointer vector and DMA mapping vectors */ if (ntxd) { size = ntxd * sizeof(osldma_t **); if ((di->txp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL) goto fail; bzero((char*)di->txp_dmah, size); }else di->txp_dmah = NULL; /* allocate rx packet pointer vector and DMA mapping vectors */ if (nrxd) { size = nrxd * sizeof(osldma_t **); if ((di->rxp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL) goto fail; bzero((char*)di->rxp_dmah, size); } else di->rxp_dmah = NULL; /* initialize opsvec of function pointers */ di->hnddma.di_fn = DMA64_ENAB(di) ? dma64proc : dma32proc; return ((hnddma_t *)di); fail: _dma_detach(di); return (NULL); }